python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ /* * NPORT * * Port object for physical port and NPIV ports. */ /* * NPORT REFERENCE COUNTING * * A nport reference should be taken when: * - an nport is allocated * - a vport populates associated nport * - a remote node is allocated * - a unsolicited frame is processed * The reference should be dropped when: * - the unsolicited frame processesing is done * - the remote node is removed * - the vport is removed * - the nport is removed */ #include "efc.h" void efc_nport_cb(void *arg, int event, void *data) { struct efc *efc = arg; struct efc_nport *nport = data; unsigned long flags = 0; efc_log_debug(efc, "nport event: %s\n", efc_sm_event_name(event)); spin_lock_irqsave(&efc->lock, flags); efc_sm_post_event(&nport->sm, event, NULL); spin_unlock_irqrestore(&efc->lock, flags); } static struct efc_nport * efc_nport_find_wwn(struct efc_domain *domain, uint64_t wwnn, uint64_t wwpn) { struct efc_nport *nport = NULL; /* Find a nport, given the WWNN and WWPN */ list_for_each_entry(nport, &domain->nport_list, list_entry) { if (nport->wwnn == wwnn && nport->wwpn == wwpn) return nport; } return NULL; } static void _efc_nport_free(struct kref *arg) { struct efc_nport *nport = container_of(arg, struct efc_nport, ref); kfree(nport); } struct efc_nport * efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn, u32 fc_id, bool enable_ini, bool enable_tgt) { struct efc_nport *nport; if (domain->efc->enable_ini) enable_ini = 0; /* Return a failure if this nport has already been allocated */ if ((wwpn != 0) || (wwnn != 0)) { nport = efc_nport_find_wwn(domain, wwnn, wwpn); if (nport) { efc_log_err(domain->efc, "NPORT %016llX %016llX already allocated\n", wwnn, wwpn); return NULL; } } nport = kzalloc(sizeof(*nport), GFP_ATOMIC); if (!nport) return nport; /* initialize refcount */ kref_init(&nport->ref); nport->release = _efc_nport_free; nport->efc = domain->efc; snprintf(nport->display_name, sizeof(nport->display_name), "------"); nport->domain = domain; xa_init(&nport->lookup); nport->instance_index = domain->nport_count++; nport->sm.app = nport; nport->enable_ini = enable_ini; nport->enable_tgt = enable_tgt; nport->enable_rscn = (nport->enable_ini || (nport->enable_tgt && enable_target_rscn(nport->efc))); /* Copy service parameters from domain */ memcpy(nport->service_params, domain->service_params, sizeof(struct fc_els_flogi)); /* Update requested fc_id */ nport->fc_id = fc_id; /* Update the nport's service parameters for the new wwn's */ nport->wwpn = wwpn; nport->wwnn = wwnn; snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), "%016llX", (unsigned long long)wwnn); /* * if this is the "first" nport of the domain, * then make it the "phys" nport */ if (list_empty(&domain->nport_list)) domain->nport = nport; INIT_LIST_HEAD(&nport->list_entry); list_add_tail(&nport->list_entry, &domain->nport_list); kref_get(&domain->ref); efc_log_debug(domain->efc, "New Nport [%s]\n", nport->display_name); return nport; } void efc_nport_free(struct efc_nport *nport) { struct efc_domain *domain; if (!nport) return; domain = nport->domain; efc_log_debug(domain->efc, "[%s] free nport\n", nport->display_name); list_del(&nport->list_entry); /* * if this is the physical nport, * then clear it out of the domain */ if (nport == domain->nport) domain->nport = NULL; xa_destroy(&nport->lookup); xa_erase(&domain->lookup, nport->fc_id); if (list_empty(&domain->nport_list)) efc_domain_post_event(domain, EFC_EVT_ALL_CHILD_NODES_FREE, NULL); kref_put(&domain->ref, domain->release); kref_put(&nport->ref, nport->release); } struct efc_nport * efc_nport_find(struct efc_domain *domain, u32 d_id) { struct efc_nport *nport; /* Find a nport object, given an FC_ID */ nport = xa_load(&domain->lookup, d_id); if (!nport || !kref_get_unless_zero(&nport->ref)) return NULL; return nport; } int efc_nport_attach(struct efc_nport *nport, u32 fc_id) { int rc; struct efc_node *node; struct efc *efc = nport->efc; unsigned long index; /* Set our lookup */ rc = xa_err(xa_store(&nport->domain->lookup, fc_id, nport, GFP_ATOMIC)); if (rc) { efc_log_err(efc, "Sport lookup store failed: %d\n", rc); return rc; } /* Update our display_name */ efc_node_fcid_display(fc_id, nport->display_name, sizeof(nport->display_name)); xa_for_each(&nport->lookup, index, node) { efc_node_update_display_name(node); } efc_log_debug(nport->efc, "[%s] attach nport: fc_id x%06x\n", nport->display_name, fc_id); /* Register a nport, given an FC_ID */ rc = efc_cmd_nport_attach(efc, nport, fc_id); if (rc < 0) { efc_log_err(nport->efc, "efc_hw_port_attach failed: %d\n", rc); return -EIO; } return 0; } static void efc_nport_shutdown(struct efc_nport *nport) { struct efc *efc = nport->efc; struct efc_node *node; unsigned long index; xa_for_each(&nport->lookup, index, node) { if (!(node->rnode.fc_id == FC_FID_FLOGI && nport->is_vport)) { efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); continue; } /* * If this is a vport, logout of the fabric * controller so that it deletes the vport * on the switch. */ /* if link is down, don't send logo */ if (efc->link_status == EFC_LINK_STATUS_DOWN) { efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); continue; } efc_log_debug(efc, "[%s] nport shutdown vport, send logo\n", node->display_name); if (!efc_send_logo(node)) { /* sent LOGO, wait for response */ efc_node_transition(node, __efc_d_wait_logo_rsp, NULL); continue; } /* * failed to send LOGO, * go ahead and cleanup node anyways */ node_printf(node, "Failed to send LOGO\n"); efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); } } static void efc_vport_link_down(struct efc_nport *nport) { struct efc *efc = nport->efc; struct efc_vport *vport; /* Clear the nport reference in the vport specification */ list_for_each_entry(vport, &efc->vport_list, list_entry) { if (vport->nport == nport) { kref_put(&nport->ref, nport->release); vport->nport = NULL; break; } } } static void __efc_nport_common(const char *funcname, struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_nport *nport = ctx->app; struct efc_domain *domain = nport->domain; struct efc *efc = nport->efc; switch (evt) { case EFC_EVT_ENTER: case EFC_EVT_REENTER: case EFC_EVT_EXIT: case EFC_EVT_ALL_CHILD_NODES_FREE: break; case EFC_EVT_NPORT_ATTACH_OK: efc_sm_transition(ctx, __efc_nport_attached, NULL); break; case EFC_EVT_SHUTDOWN: /* Flag this nport as shutting down */ nport->shutting_down = true; if (nport->is_vport) efc_vport_link_down(nport); if (xa_empty(&nport->lookup)) { /* Remove the nport from the domain's lookup table */ xa_erase(&domain->lookup, nport->fc_id); efc_sm_transition(ctx, __efc_nport_wait_port_free, NULL); if (efc_cmd_nport_free(efc, nport)) { efc_log_debug(nport->efc, "efc_hw_port_free failed\n"); /* Not much we can do, free the nport anyways */ efc_nport_free(nport); } } else { /* sm: node list is not empty / shutdown nodes */ efc_sm_transition(ctx, __efc_nport_wait_shutdown, NULL); efc_nport_shutdown(nport); } break; default: efc_log_debug(nport->efc, "[%s] %-20s %-20s not handled\n", nport->display_name, funcname, efc_sm_event_name(evt)); } } void __efc_nport_allocated(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_nport *nport = ctx->app; struct efc_domain *domain = nport->domain; nport_sm_trace(nport); switch (evt) { /* the physical nport is attached */ case EFC_EVT_NPORT_ATTACH_OK: WARN_ON(nport != domain->nport); efc_sm_transition(ctx, __efc_nport_attached, NULL); break; case EFC_EVT_NPORT_ALLOC_OK: /* ignore */ break; default: __efc_nport_common(__func__, ctx, evt, arg); } } void __efc_nport_vport_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_nport *nport = ctx->app; struct efc *efc = nport->efc; nport_sm_trace(nport); switch (evt) { case EFC_EVT_ENTER: { __be64 be_wwpn = cpu_to_be64(nport->wwpn); if (nport->wwpn == 0) efc_log_debug(efc, "vport: letting f/w select WWN\n"); if (nport->fc_id != U32_MAX) { efc_log_debug(efc, "vport: hard coding port id: %x\n", nport->fc_id); } efc_sm_transition(ctx, __efc_nport_vport_wait_alloc, NULL); /* If wwpn is zero, then we'll let the f/w assign wwpn*/ if (efc_cmd_nport_alloc(efc, nport, nport->domain, nport->wwpn == 0 ? NULL : (uint8_t *)&be_wwpn)) { efc_log_err(efc, "Can't allocate port\n"); break; } break; } default: __efc_nport_common(__func__, ctx, evt, arg); } } void __efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_nport *nport = ctx->app; struct efc *efc = nport->efc; nport_sm_trace(nport); switch (evt) { case EFC_EVT_NPORT_ALLOC_OK: { struct fc_els_flogi *sp; sp = (struct fc_els_flogi *)nport->service_params; if (nport->wwnn == 0) { nport->wwnn = be64_to_cpu(nport->sli_wwnn); nport->wwpn = be64_to_cpu(nport->sli_wwpn); snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), "%016llX", nport->wwpn); } /* Update the nport's service parameters */ sp->fl_wwpn = cpu_to_be64(nport->wwpn); sp->fl_wwnn = cpu_to_be64(nport->wwnn); /* * if nport->fc_id is uninitialized, * then request that the fabric node use FDISC * to find an fc_id. * Otherwise we're restoring vports, or we're in * fabric emulation mode, so attach the fc_id */ if (nport->fc_id == U32_MAX) { struct efc_node *fabric; fabric = efc_node_alloc(nport, FC_FID_FLOGI, false, false); if (!fabric) { efc_log_err(efc, "efc_node_alloc() failed\n"); return; } efc_node_transition(fabric, __efc_vport_fabric_init, NULL); } else { snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), "%016llX", nport->wwpn); efc_nport_attach(nport, nport->fc_id); } efc_sm_transition(ctx, __efc_nport_vport_allocated, NULL); break; } default: __efc_nport_common(__func__, ctx, evt, arg); } } void __efc_nport_vport_allocated(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_nport *nport = ctx->app; struct efc *efc = nport->efc; nport_sm_trace(nport); /* * This state is entered after the nport is allocated; * it then waits for a fabric node * FDISC to complete, which requests a nport attach. * The nport attach complete is handled in this state. */ switch (evt) { case EFC_EVT_NPORT_ATTACH_OK: { struct efc_node *node; /* Find our fabric node, and forward this event */ node = efc_node_find(nport, FC_FID_FLOGI); if (!node) { efc_log_debug(efc, "can't find node %06x\n", FC_FID_FLOGI); break; } /* sm: / forward nport attach to fabric node */ efc_node_post_event(node, evt, NULL); efc_sm_transition(ctx, __efc_nport_attached, NULL); break; } default: __efc_nport_common(__func__, ctx, evt, arg); } } static void efc_vport_update_spec(struct efc_nport *nport) { struct efc *efc = nport->efc; struct efc_vport *vport; unsigned long flags = 0; spin_lock_irqsave(&efc->vport_lock, flags); list_for_each_entry(vport, &efc->vport_list, list_entry) { if (vport->nport == nport) { vport->wwnn = nport->wwnn; vport->wwpn = nport->wwpn; vport->tgt_data = nport->tgt_data; vport->ini_data = nport->ini_data; break; } } spin_unlock_irqrestore(&efc->vport_lock, flags); } void __efc_nport_attached(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_nport *nport = ctx->app; struct efc *efc = nport->efc; nport_sm_trace(nport); switch (evt) { case EFC_EVT_ENTER: { struct efc_node *node; unsigned long index; efc_log_debug(efc, "[%s] NPORT attached WWPN %016llX WWNN %016llX\n", nport->display_name, nport->wwpn, nport->wwnn); xa_for_each(&nport->lookup, index, node) efc_node_update_display_name(node); efc->tt.new_nport(efc, nport); /* * Update the vport (if its not the physical nport) * parameters */ if (nport->is_vport) efc_vport_update_spec(nport); break; } case EFC_EVT_EXIT: efc_log_debug(efc, "[%s] NPORT deattached WWPN %016llX WWNN %016llX\n", nport->display_name, nport->wwpn, nport->wwnn); efc->tt.del_nport(efc, nport); break; default: __efc_nport_common(__func__, ctx, evt, arg); } } void __efc_nport_wait_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_nport *nport = ctx->app; struct efc_domain *domain = nport->domain; struct efc *efc = nport->efc; nport_sm_trace(nport); switch (evt) { case EFC_EVT_NPORT_ALLOC_OK: case EFC_EVT_NPORT_ALLOC_FAIL: case EFC_EVT_NPORT_ATTACH_OK: case EFC_EVT_NPORT_ATTACH_FAIL: /* ignore these events - just wait for the all free event */ break; case EFC_EVT_ALL_CHILD_NODES_FREE: { /* * Remove the nport from the domain's * sparse vector lookup table */ xa_erase(&domain->lookup, nport->fc_id); efc_sm_transition(ctx, __efc_nport_wait_port_free, NULL); if (efc_cmd_nport_free(efc, nport)) { efc_log_err(nport->efc, "efc_hw_port_free failed\n"); /* Not much we can do, free the nport anyways */ efc_nport_free(nport); } break; } default: __efc_nport_common(__func__, ctx, evt, arg); } } void __efc_nport_wait_port_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_nport *nport = ctx->app; nport_sm_trace(nport); switch (evt) { case EFC_EVT_NPORT_ATTACH_OK: /* Ignore as we are waiting for the free CB */ break; case EFC_EVT_NPORT_FREE_OK: { /* All done, free myself */ efc_nport_free(nport); break; } default: __efc_nport_common(__func__, ctx, evt, arg); } } static int efc_vport_nport_alloc(struct efc_domain *domain, struct efc_vport *vport) { struct efc_nport *nport; lockdep_assert_held(&domain->efc->lock); nport = efc_nport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id, vport->enable_ini, vport->enable_tgt); vport->nport = nport; if (!nport) return -EIO; kref_get(&nport->ref); nport->is_vport = true; nport->tgt_data = vport->tgt_data; nport->ini_data = vport->ini_data; efc_sm_transition(&nport->sm, __efc_nport_vport_init, NULL); return 0; } int efc_vport_start(struct efc_domain *domain) { struct efc *efc = domain->efc; struct efc_vport *vport; struct efc_vport *next; int rc = 0; unsigned long flags = 0; /* Use the vport spec to find the associated vports and start them */ spin_lock_irqsave(&efc->vport_lock, flags); list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) { if (!vport->nport) { if (efc_vport_nport_alloc(domain, vport)) rc = -EIO; } } spin_unlock_irqrestore(&efc->vport_lock, flags); return rc; } int efc_nport_vport_new(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn, u32 fc_id, bool ini, bool tgt, void *tgt_data, void *ini_data) { struct efc *efc = domain->efc; struct efc_vport *vport; int rc = 0; unsigned long flags = 0; if (ini && domain->efc->enable_ini == 0) { efc_log_debug(efc, "driver initiator mode not enabled\n"); return -EIO; } if (tgt && domain->efc->enable_tgt == 0) { efc_log_debug(efc, "driver target mode not enabled\n"); return -EIO; } /* * Create a vport spec if we need to recreate * this vport after a link up event */ vport = efc_vport_create_spec(domain->efc, wwnn, wwpn, fc_id, ini, tgt, tgt_data, ini_data); if (!vport) { efc_log_err(efc, "failed to create vport object entry\n"); return -EIO; } spin_lock_irqsave(&efc->lock, flags); rc = efc_vport_nport_alloc(domain, vport); spin_unlock_irqrestore(&efc->lock, flags); return rc; } int efc_nport_vport_del(struct efc *efc, struct efc_domain *domain, u64 wwpn, uint64_t wwnn) { struct efc_nport *nport; struct efc_vport *vport; struct efc_vport *next; unsigned long flags = 0; spin_lock_irqsave(&efc->vport_lock, flags); /* walk the efc_vport_list and remove from there */ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) { if (vport->wwpn == wwpn && vport->wwnn == wwnn) { list_del(&vport->list_entry); kfree(vport); break; } } spin_unlock_irqrestore(&efc->vport_lock, flags); if (!domain) { /* No domain means no nport to look for */ return 0; } spin_lock_irqsave(&efc->lock, flags); list_for_each_entry(nport, &domain->nport_list, list_entry) { if (nport->wwpn == wwpn && nport->wwnn == wwnn) { kref_put(&nport->ref, nport->release); /* Shutdown this NPORT */ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); break; } } spin_unlock_irqrestore(&efc->lock, flags); return 0; } void efc_vport_del_all(struct efc *efc) { struct efc_vport *vport; struct efc_vport *next; unsigned long flags = 0; spin_lock_irqsave(&efc->vport_lock, flags); list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) { list_del(&vport->list_entry); kfree(vport); } spin_unlock_irqrestore(&efc->vport_lock, flags); } struct efc_vport * efc_vport_create_spec(struct efc *efc, uint64_t wwnn, uint64_t wwpn, u32 fc_id, bool enable_ini, bool enable_tgt, void *tgt_data, void *ini_data) { struct efc_vport *vport; unsigned long flags = 0; /* * walk the efc_vport_list and return failure * if a valid(vport with non zero WWPN and WWNN) vport entry * is already created */ spin_lock_irqsave(&efc->vport_lock, flags); list_for_each_entry(vport, &efc->vport_list, list_entry) { if ((wwpn && vport->wwpn == wwpn) && (wwnn && vport->wwnn == wwnn)) { efc_log_err(efc, "VPORT %016llX %016llX already allocated\n", wwnn, wwpn); spin_unlock_irqrestore(&efc->vport_lock, flags); return NULL; } } vport = kzalloc(sizeof(*vport), GFP_ATOMIC); if (!vport) { spin_unlock_irqrestore(&efc->vport_lock, flags); return NULL; } vport->wwnn = wwnn; vport->wwpn = wwpn; vport->fc_id = fc_id; vport->enable_tgt = enable_tgt; vport->enable_ini = enable_ini; vport->tgt_data = tgt_data; vport->ini_data = ini_data; INIT_LIST_HEAD(&vport->list_entry); list_add_tail(&vport->list_entry, &efc->vport_list); spin_unlock_irqrestore(&efc->vport_lock, flags); return vport; }
linux-master
drivers/scsi/elx/libefc/efc_nport.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ /* * Functions to build and send ELS/CT/BLS commands and responses. */ #include "efc.h" #include "efc_els.h" #include "../libefc_sli/sli4.h" #define EFC_LOG_ENABLE_ELS_TRACE(efc) \ (((efc) != NULL) ? (((efc)->logmask & (1U << 1)) != 0) : 0) #define node_els_trace() \ do { \ if (EFC_LOG_ENABLE_ELS_TRACE(efc)) \ efc_log_info(efc, "[%s] %-20s\n", \ node->display_name, __func__); \ } while (0) #define els_io_printf(els, fmt, ...) \ efc_log_err((struct efc *)els->node->efc,\ "[%s] %-8s " fmt, \ els->node->display_name,\ els->display_name, ##__VA_ARGS__) #define EFC_ELS_RSP_LEN 1024 #define EFC_ELS_GID_PT_RSP_LEN 8096 struct efc_els_io_req * efc_els_io_alloc(struct efc_node *node, u32 reqlen) { return efc_els_io_alloc_size(node, reqlen, EFC_ELS_RSP_LEN); } struct efc_els_io_req * efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen) { struct efc *efc; struct efc_els_io_req *els; unsigned long flags = 0; efc = node->efc; if (!node->els_io_enabled) { efc_log_err(efc, "els io alloc disabled\n"); return NULL; } els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC); if (!els) { atomic_add_return(1, &efc->els_io_alloc_failed_count); return NULL; } /* initialize refcount */ kref_init(&els->ref); els->release = _efc_els_io_free; /* populate generic io fields */ els->node = node; /* now allocate DMA for request and response */ els->io.req.size = reqlen; els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size, &els->io.req.phys, GFP_KERNEL); if (!els->io.req.virt) { mempool_free(els, efc->els_io_pool); return NULL; } els->io.rsp.size = rsplen; els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size, &els->io.rsp.phys, GFP_KERNEL); if (!els->io.rsp.virt) { dma_free_coherent(&efc->pci->dev, els->io.req.size, els->io.req.virt, els->io.req.phys); mempool_free(els, efc->els_io_pool); els = NULL; } if (els) { /* initialize fields */ els->els_retries_remaining = EFC_FC_ELS_DEFAULT_RETRIES; /* add els structure to ELS IO list */ INIT_LIST_HEAD(&els->list_entry); spin_lock_irqsave(&node->els_ios_lock, flags); list_add_tail(&els->list_entry, &node->els_ios_list); spin_unlock_irqrestore(&node->els_ios_lock, flags); } return els; } void efc_els_io_free(struct efc_els_io_req *els) { kref_put(&els->ref, els->release); } void _efc_els_io_free(struct kref *arg) { struct efc_els_io_req *els = container_of(arg, struct efc_els_io_req, ref); struct efc *efc; struct efc_node *node; int send_empty_event = false; unsigned long flags = 0; node = els->node; efc = node->efc; spin_lock_irqsave(&node->els_ios_lock, flags); list_del(&els->list_entry); /* Send list empty event if the IO allocator * is disabled, and the list is empty * If node->els_io_enabled was not checked, * the event would be posted continually */ send_empty_event = (!node->els_io_enabled && list_empty(&node->els_ios_list)); spin_unlock_irqrestore(&node->els_ios_lock, flags); /* free ELS request and response buffers */ dma_free_coherent(&efc->pci->dev, els->io.rsp.size, els->io.rsp.virt, els->io.rsp.phys); dma_free_coherent(&efc->pci->dev, els->io.req.size, els->io.req.virt, els->io.req.phys); mempool_free(els, efc->els_io_pool); if (send_empty_event) efc_scsi_io_list_empty(node->efc, node); } static void efc_els_retry(struct efc_els_io_req *els); static void efc_els_delay_timer_cb(struct timer_list *t) { struct efc_els_io_req *els = from_timer(els, t, delay_timer); /* Retry delay timer expired, retry the ELS request */ efc_els_retry(els); } static int efc_els_req_cb(void *arg, u32 length, int status, u32 ext_status) { struct efc_els_io_req *els; struct efc_node *node; struct efc *efc; struct efc_node_cb cbdata; u32 reason_code; els = arg; node = els->node; efc = node->efc; if (status) els_io_printf(els, "status x%x ext x%x\n", status, ext_status); /* set the response len element of els->rsp */ els->io.rsp.len = length; cbdata.status = status; cbdata.ext_status = ext_status; cbdata.header = NULL; cbdata.els_rsp = els->io.rsp; /* set the response len element of els->rsp */ cbdata.rsp_len = length; /* FW returns the number of bytes received on the link in * the WCQE, not the amount placed in the buffer; use this info to * check if there was an overrun. */ if (length > els->io.rsp.size) { efc_log_warn(efc, "ELS response returned len=%d > buflen=%zu\n", length, els->io.rsp.size); efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); return 0; } /* Post event to ELS IO object */ switch (status) { case SLI4_FC_WCQE_STATUS_SUCCESS: efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_OK, &cbdata); break; case SLI4_FC_WCQE_STATUS_LS_RJT: reason_code = (ext_status >> 16) & 0xff; /* delay and retry if reason code is Logical Busy */ switch (reason_code) { case ELS_RJT_BUSY: els->node->els_req_cnt--; els_io_printf(els, "LS_RJT Logical Busy, delay and retry\n"); timer_setup(&els->delay_timer, efc_els_delay_timer_cb, 0); mod_timer(&els->delay_timer, jiffies + msecs_to_jiffies(5000)); break; default: efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_RJT, &cbdata); break; } break; case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: switch (ext_status) { case SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT: efc_els_retry(els); break; default: efc_log_err(efc, "LOCAL_REJECT with ext status:%x\n", ext_status); efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); break; } break; default: /* Other error */ efc_log_warn(efc, "els req failed status x%x, ext_status x%x\n", status, ext_status); efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); break; } return 0; } void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status, u32 ext_status) { struct efc_els_io_req *els = container_of(io, struct efc_els_io_req, io); WARN_ON_ONCE(!els->cb); ((efc_hw_srrs_cb_t)els->cb) (els, len, status, ext_status); } static int efc_els_send_req(struct efc_node *node, struct efc_els_io_req *els, enum efc_disc_io_type io_type) { int rc = 0; struct efc *efc = node->efc; struct efc_node_cb cbdata; /* update ELS request counter */ els->node->els_req_cnt++; /* Prepare the IO request details */ els->io.io_type = io_type; els->io.xmit_len = els->io.req.size; els->io.rsp_len = els->io.rsp.size; els->io.rpi = node->rnode.indicator; els->io.vpi = node->nport->indicator; els->io.s_id = node->nport->fc_id; els->io.d_id = node->rnode.fc_id; if (node->rnode.attached) els->io.rpi_registered = true; els->cb = efc_els_req_cb; rc = efc->tt.send_els(efc, &els->io); if (!rc) return rc; cbdata.status = EFC_STATUS_INVALID; cbdata.ext_status = EFC_STATUS_INVALID; cbdata.els_rsp = els->io.rsp; efc_log_err(efc, "efc_els_send failed: %d\n", rc); efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); return rc; } static void efc_els_retry(struct efc_els_io_req *els) { struct efc *efc; struct efc_node_cb cbdata; u32 rc; efc = els->node->efc; cbdata.status = EFC_STATUS_INVALID; cbdata.ext_status = EFC_STATUS_INVALID; cbdata.els_rsp = els->io.rsp; if (els->els_retries_remaining) { els->els_retries_remaining--; rc = efc->tt.send_els(efc, &els->io); } else { rc = -EIO; } if (rc) { efc_log_err(efc, "ELS retries exhausted\n"); efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata); } } static int efc_els_acc_cb(void *arg, u32 length, int status, u32 ext_status) { struct efc_els_io_req *els; struct efc_node *node; struct efc *efc; struct efc_node_cb cbdata; els = arg; node = els->node; efc = node->efc; cbdata.status = status; cbdata.ext_status = ext_status; cbdata.header = NULL; cbdata.els_rsp = els->io.rsp; /* Post node event */ switch (status) { case SLI4_FC_WCQE_STATUS_SUCCESS: efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_OK, &cbdata); break; default: /* Other error */ efc_log_warn(efc, "[%s] %-8s failed status x%x, ext x%x\n", node->display_name, els->display_name, status, ext_status); efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata); break; } return 0; } static int efc_els_send_rsp(struct efc_els_io_req *els, u32 rsplen) { int rc = 0; struct efc_node_cb cbdata; struct efc_node *node = els->node; struct efc *efc = node->efc; /* increment ELS completion counter */ node->els_cmpl_cnt++; els->io.io_type = EFC_DISC_IO_ELS_RESP; els->cb = efc_els_acc_cb; /* Prepare the IO request details */ els->io.xmit_len = rsplen; els->io.rsp_len = els->io.rsp.size; els->io.rpi = node->rnode.indicator; els->io.vpi = node->nport->indicator; if (node->nport->fc_id != U32_MAX) els->io.s_id = node->nport->fc_id; else els->io.s_id = els->io.iparam.els.s_id; els->io.d_id = node->rnode.fc_id; if (node->attached) els->io.rpi_registered = true; rc = efc->tt.send_els(efc, &els->io); if (!rc) return rc; cbdata.status = EFC_STATUS_INVALID; cbdata.ext_status = EFC_STATUS_INVALID; cbdata.els_rsp = els->io.rsp; efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata); return rc; } int efc_send_plogi(struct efc_node *node) { struct efc_els_io_req *els; struct efc *efc = node->efc; struct fc_els_flogi *plogi; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*plogi)); if (!els) { efc_log_err(efc, "IO alloc failed\n"); return -EIO; } els->display_name = "plogi"; /* Build PLOGI request */ plogi = els->io.req.virt; memcpy(plogi, node->nport->service_params, sizeof(*plogi)); plogi->fl_cmd = ELS_PLOGI; memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd)); return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); } int efc_send_flogi(struct efc_node *node) { struct efc_els_io_req *els; struct efc *efc; struct fc_els_flogi *flogi; efc = node->efc; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*flogi)); if (!els) { efc_log_err(efc, "IO alloc failed\n"); return -EIO; } els->display_name = "flogi"; /* Build FLOGI request */ flogi = els->io.req.virt; memcpy(flogi, node->nport->service_params, sizeof(*flogi)); flogi->fl_cmd = ELS_FLOGI; memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd)); return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); } int efc_send_fdisc(struct efc_node *node) { struct efc_els_io_req *els; struct efc *efc; struct fc_els_flogi *fdisc; efc = node->efc; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*fdisc)); if (!els) { efc_log_err(efc, "IO alloc failed\n"); return -EIO; } els->display_name = "fdisc"; /* Build FDISC request */ fdisc = els->io.req.virt; memcpy(fdisc, node->nport->service_params, sizeof(*fdisc)); fdisc->fl_cmd = ELS_FDISC; memset(fdisc->_fl_resvd, 0, sizeof(fdisc->_fl_resvd)); return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); } int efc_send_prli(struct efc_node *node) { struct efc *efc = node->efc; struct efc_els_io_req *els; struct { struct fc_els_prli prli; struct fc_els_spp spp; } *pp; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*pp)); if (!els) { efc_log_err(efc, "IO alloc failed\n"); return -EIO; } els->display_name = "prli"; /* Build PRLI request */ pp = els->io.req.virt; memset(pp, 0, sizeof(*pp)); pp->prli.prli_cmd = ELS_PRLI; pp->prli.prli_spp_len = 16; pp->prli.prli_len = cpu_to_be16(sizeof(*pp)); pp->spp.spp_type = FC_TYPE_FCP; pp->spp.spp_type_ext = 0; pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR; pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS | (node->nport->enable_ini ? FCP_SPPF_INIT_FCN : 0) | (node->nport->enable_tgt ? FCP_SPPF_TARG_FCN : 0)); return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); } int efc_send_logo(struct efc_node *node) { struct efc *efc = node->efc; struct efc_els_io_req *els; struct fc_els_logo *logo; struct fc_els_flogi *sparams; node_els_trace(); sparams = (struct fc_els_flogi *)node->nport->service_params; els = efc_els_io_alloc(node, sizeof(*logo)); if (!els) { efc_log_err(efc, "IO alloc failed\n"); return -EIO; } els->display_name = "logo"; /* Build LOGO request */ logo = els->io.req.virt; memset(logo, 0, sizeof(*logo)); logo->fl_cmd = ELS_LOGO; hton24(logo->fl_n_port_id, node->rnode.nport->fc_id); logo->fl_n_port_wwn = sparams->fl_wwpn; return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); } int efc_send_adisc(struct efc_node *node) { struct efc *efc = node->efc; struct efc_els_io_req *els; struct fc_els_adisc *adisc; struct fc_els_flogi *sparams; struct efc_nport *nport = node->nport; node_els_trace(); sparams = (struct fc_els_flogi *)node->nport->service_params; els = efc_els_io_alloc(node, sizeof(*adisc)); if (!els) { efc_log_err(efc, "IO alloc failed\n"); return -EIO; } els->display_name = "adisc"; /* Build ADISC request */ adisc = els->io.req.virt; memset(adisc, 0, sizeof(*adisc)); adisc->adisc_cmd = ELS_ADISC; hton24(adisc->adisc_hard_addr, nport->fc_id); adisc->adisc_wwpn = sparams->fl_wwpn; adisc->adisc_wwnn = sparams->fl_wwnn; hton24(adisc->adisc_port_id, node->rnode.nport->fc_id); return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); } int efc_send_scr(struct efc_node *node) { struct efc_els_io_req *els; struct efc *efc = node->efc; struct fc_els_scr *req; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*req)); if (!els) { efc_log_err(efc, "IO alloc failed\n"); return -EIO; } els->display_name = "scr"; req = els->io.req.virt; memset(req, 0, sizeof(*req)); req->scr_cmd = ELS_SCR; req->scr_reg_func = ELS_SCRF_FULL; return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ); } int efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_code, u32 reason_code_expl, u32 vendor_unique) { struct efc *efc = node->efc; struct efc_els_io_req *els = NULL; struct fc_els_ls_rjt *rjt; els = efc_els_io_alloc(node, sizeof(*rjt)); if (!els) { efc_log_err(efc, "els IO alloc failed\n"); return -EIO; } node_els_trace(); els->display_name = "ls_rjt"; memset(&els->io.iparam, 0, sizeof(els->io.iparam)); els->io.iparam.els.ox_id = ox_id; rjt = els->io.req.virt; memset(rjt, 0, sizeof(*rjt)); rjt->er_cmd = ELS_LS_RJT; rjt->er_reason = reason_code; rjt->er_explan = reason_code_expl; return efc_els_send_rsp(els, sizeof(*rjt)); } int efc_send_plogi_acc(struct efc_node *node, u32 ox_id) { struct efc *efc = node->efc; struct efc_els_io_req *els = NULL; struct fc_els_flogi *plogi; struct fc_els_flogi *req = (struct fc_els_flogi *)node->service_params; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*plogi)); if (!els) { efc_log_err(efc, "els IO alloc failed\n"); return -EIO; } els->display_name = "plogi_acc"; memset(&els->io.iparam, 0, sizeof(els->io.iparam)); els->io.iparam.els.ox_id = ox_id; plogi = els->io.req.virt; /* copy our port's service parameters to payload */ memcpy(plogi, node->nport->service_params, sizeof(*plogi)); plogi->fl_cmd = ELS_LS_ACC; memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd)); /* Set Application header support bit if requested */ if (req->fl_csp.sp_features & cpu_to_be16(FC_SP_FT_BCAST)) plogi->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_BCAST); return efc_els_send_rsp(els, sizeof(*plogi)); } int efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id) { struct efc *efc = node->efc; struct efc_els_io_req *els = NULL; struct fc_els_flogi *flogi; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*flogi)); if (!els) { efc_log_err(efc, "els IO alloc failed\n"); return -EIO; } els->display_name = "flogi_p2p_acc"; memset(&els->io.iparam, 0, sizeof(els->io.iparam)); els->io.iparam.els.ox_id = ox_id; els->io.iparam.els.s_id = s_id; flogi = els->io.req.virt; /* copy our port's service parameters to payload */ memcpy(flogi, node->nport->service_params, sizeof(*flogi)); flogi->fl_cmd = ELS_LS_ACC; memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd)); memset(flogi->fl_cssp, 0, sizeof(flogi->fl_cssp)); return efc_els_send_rsp(els, sizeof(*flogi)); } int efc_send_prli_acc(struct efc_node *node, u32 ox_id) { struct efc *efc = node->efc; struct efc_els_io_req *els = NULL; struct { struct fc_els_prli prli; struct fc_els_spp spp; } *pp; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*pp)); if (!els) { efc_log_err(efc, "els IO alloc failed\n"); return -EIO; } els->display_name = "prli_acc"; memset(&els->io.iparam, 0, sizeof(els->io.iparam)); els->io.iparam.els.ox_id = ox_id; pp = els->io.req.virt; memset(pp, 0, sizeof(*pp)); pp->prli.prli_cmd = ELS_LS_ACC; pp->prli.prli_spp_len = 0x10; pp->prli.prli_len = cpu_to_be16(sizeof(*pp)); pp->spp.spp_type = FC_TYPE_FCP; pp->spp.spp_type_ext = 0; pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR | FC_SPP_RESP_ACK; pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS | (node->nport->enable_ini ? FCP_SPPF_INIT_FCN : 0) | (node->nport->enable_tgt ? FCP_SPPF_TARG_FCN : 0)); return efc_els_send_rsp(els, sizeof(*pp)); } int efc_send_prlo_acc(struct efc_node *node, u32 ox_id) { struct efc *efc = node->efc; struct efc_els_io_req *els = NULL; struct { struct fc_els_prlo prlo; struct fc_els_spp spp; } *pp; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*pp)); if (!els) { efc_log_err(efc, "els IO alloc failed\n"); return -EIO; } els->display_name = "prlo_acc"; memset(&els->io.iparam, 0, sizeof(els->io.iparam)); els->io.iparam.els.ox_id = ox_id; pp = els->io.req.virt; memset(pp, 0, sizeof(*pp)); pp->prlo.prlo_cmd = ELS_LS_ACC; pp->prlo.prlo_obs = 0x10; pp->prlo.prlo_len = cpu_to_be16(sizeof(*pp)); pp->spp.spp_type = FC_TYPE_FCP; pp->spp.spp_type_ext = 0; pp->spp.spp_flags = FC_SPP_RESP_ACK; return efc_els_send_rsp(els, sizeof(*pp)); } int efc_send_ls_acc(struct efc_node *node, u32 ox_id) { struct efc *efc = node->efc; struct efc_els_io_req *els = NULL; struct fc_els_ls_acc *acc; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*acc)); if (!els) { efc_log_err(efc, "els IO alloc failed\n"); return -EIO; } els->display_name = "ls_acc"; memset(&els->io.iparam, 0, sizeof(els->io.iparam)); els->io.iparam.els.ox_id = ox_id; acc = els->io.req.virt; memset(acc, 0, sizeof(*acc)); acc->la_cmd = ELS_LS_ACC; return efc_els_send_rsp(els, sizeof(*acc)); } int efc_send_logo_acc(struct efc_node *node, u32 ox_id) { struct efc_els_io_req *els = NULL; struct efc *efc = node->efc; struct fc_els_ls_acc *logo; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*logo)); if (!els) { efc_log_err(efc, "els IO alloc failed\n"); return -EIO; } els->display_name = "logo_acc"; memset(&els->io.iparam, 0, sizeof(els->io.iparam)); els->io.iparam.els.ox_id = ox_id; logo = els->io.req.virt; memset(logo, 0, sizeof(*logo)); logo->la_cmd = ELS_LS_ACC; return efc_els_send_rsp(els, sizeof(*logo)); } int efc_send_adisc_acc(struct efc_node *node, u32 ox_id) { struct efc *efc = node->efc; struct efc_els_io_req *els = NULL; struct fc_els_adisc *adisc; struct fc_els_flogi *sparams; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*adisc)); if (!els) { efc_log_err(efc, "els IO alloc failed\n"); return -EIO; } els->display_name = "adisc_acc"; /* Go ahead and send the ELS_ACC */ memset(&els->io.iparam, 0, sizeof(els->io.iparam)); els->io.iparam.els.ox_id = ox_id; sparams = (struct fc_els_flogi *)node->nport->service_params; adisc = els->io.req.virt; memset(adisc, 0, sizeof(*adisc)); adisc->adisc_cmd = ELS_LS_ACC; adisc->adisc_wwpn = sparams->fl_wwpn; adisc->adisc_wwnn = sparams->fl_wwnn; hton24(adisc->adisc_port_id, node->rnode.nport->fc_id); return efc_els_send_rsp(els, sizeof(*adisc)); } static inline void fcct_build_req_header(struct fc_ct_hdr *hdr, u16 cmd, u16 max_size) { hdr->ct_rev = FC_CT_REV; hdr->ct_fs_type = FC_FST_DIR; hdr->ct_fs_subtype = FC_NS_SUBTYPE; hdr->ct_options = 0; hdr->ct_cmd = cpu_to_be16(cmd); /* words */ hdr->ct_mr_size = cpu_to_be16(max_size / (sizeof(u32))); hdr->ct_reason = 0; hdr->ct_explan = 0; hdr->ct_vendor = 0; } int efc_ns_send_rftid(struct efc_node *node) { struct efc *efc = node->efc; struct efc_els_io_req *els; struct { struct fc_ct_hdr hdr; struct fc_ns_rft_id rftid; } *ct; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*ct)); if (!els) { efc_log_err(efc, "IO alloc failed\n"); return -EIO; } els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ; els->io.iparam.ct.type = FC_TYPE_CT; els->io.iparam.ct.df_ctl = 0; els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT; els->display_name = "rftid"; ct = els->io.req.virt; memset(ct, 0, sizeof(*ct)); fcct_build_req_header(&ct->hdr, FC_NS_RFT_ID, sizeof(struct fc_ns_rft_id)); hton24(ct->rftid.fr_fid.fp_fid, node->rnode.nport->fc_id); ct->rftid.fr_fts.ff_type_map[FC_TYPE_FCP / FC_NS_BPW] = cpu_to_be32(1 << (FC_TYPE_FCP % FC_NS_BPW)); return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ); } int efc_ns_send_rffid(struct efc_node *node) { struct efc *efc = node->efc; struct efc_els_io_req *els; struct { struct fc_ct_hdr hdr; struct fc_ns_rff_id rffid; } *ct; node_els_trace(); els = efc_els_io_alloc(node, sizeof(*ct)); if (!els) { efc_log_err(efc, "IO alloc failed\n"); return -EIO; } els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ; els->io.iparam.ct.type = FC_TYPE_CT; els->io.iparam.ct.df_ctl = 0; els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT; els->display_name = "rffid"; ct = els->io.req.virt; memset(ct, 0, sizeof(*ct)); fcct_build_req_header(&ct->hdr, FC_NS_RFF_ID, sizeof(struct fc_ns_rff_id)); hton24(ct->rffid.fr_fid.fp_fid, node->rnode.nport->fc_id); if (node->nport->enable_ini) ct->rffid.fr_feat |= FCP_FEAT_INIT; if (node->nport->enable_tgt) ct->rffid.fr_feat |= FCP_FEAT_TARG; ct->rffid.fr_type = FC_TYPE_FCP; return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ); } int efc_ns_send_gidpt(struct efc_node *node) { struct efc_els_io_req *els = NULL; struct efc *efc = node->efc; struct { struct fc_ct_hdr hdr; struct fc_ns_gid_pt gidpt; } *ct; node_els_trace(); els = efc_els_io_alloc_size(node, sizeof(*ct), EFC_ELS_GID_PT_RSP_LEN); if (!els) { efc_log_err(efc, "IO alloc failed\n"); return -EIO; } els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ; els->io.iparam.ct.type = FC_TYPE_CT; els->io.iparam.ct.df_ctl = 0; els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT; els->display_name = "gidpt"; ct = els->io.req.virt; memset(ct, 0, sizeof(*ct)); fcct_build_req_header(&ct->hdr, FC_NS_GID_PT, sizeof(struct fc_ns_gid_pt)); ct->gidpt.fn_pt_type = FC_TYPE_FCP; return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ); } void efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg) { /* don't want further events that could come; e.g. abort requests * from the node state machine; thus, disable state machine */ els->els_req_free = true; efc_node_post_els_resp(els->node, evt, arg); efc_els_io_free(els); } static int efc_ct_acc_cb(void *arg, u32 length, int status, u32 ext_status) { struct efc_els_io_req *els = arg; efc_els_io_free(els); return 0; } int efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id, struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code, u32 reason_code, u32 reason_code_explanation) { struct efc_els_io_req *els = NULL; struct fc_ct_hdr *rsp = NULL; els = efc_els_io_alloc(node, 256); if (!els) { efc_log_err(efc, "IO alloc failed\n"); return -EIO; } rsp = els->io.rsp.virt; *rsp = *ct_hdr; fcct_build_req_header(rsp, cmd_rsp_code, 0); rsp->ct_reason = reason_code; rsp->ct_explan = reason_code_explanation; els->display_name = "ct_rsp"; els->cb = efc_ct_acc_cb; /* Prepare the IO request details */ els->io.io_type = EFC_DISC_IO_CT_RESP; els->io.xmit_len = sizeof(*rsp); els->io.rpi = node->rnode.indicator; els->io.d_id = node->rnode.fc_id; memset(&els->io.iparam, 0, sizeof(els->io.iparam)); els->io.iparam.ct.ox_id = ox_id; els->io.iparam.ct.r_ctl = 3; els->io.iparam.ct.type = FC_TYPE_CT; els->io.iparam.ct.df_ctl = 0; els->io.iparam.ct.timeout = 5; if (efc->tt.send_els(efc, &els->io)) { efc_els_io_free(els); return -EIO; } return 0; } int efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr) { struct sli_bls_params bls; struct fc_ba_acc *acc; struct efc *efc = node->efc; memset(&bls, 0, sizeof(bls)); bls.ox_id = be16_to_cpu(hdr->fh_ox_id); bls.rx_id = be16_to_cpu(hdr->fh_rx_id); bls.s_id = ntoh24(hdr->fh_d_id); bls.d_id = node->rnode.fc_id; bls.rpi = node->rnode.indicator; bls.vpi = node->nport->indicator; acc = (void *)bls.payload; acc->ba_ox_id = cpu_to_be16(bls.ox_id); acc->ba_rx_id = cpu_to_be16(bls.rx_id); acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX); return efc->tt.send_bls(efc, FC_RCTL_BA_ACC, &bls); }
linux-master
drivers/scsi/elx/libefc/efc_els.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ /* * device_sm Node State Machine: Remote Device States */ #include "efc.h" #include "efc_device.h" #include "efc_fabric.h" void efc_d_send_prli_rsp(struct efc_node *node, u16 ox_id) { int rc = EFC_SCSI_CALL_COMPLETE; struct efc *efc = node->efc; node->ls_acc_oxid = ox_id; node->send_ls_acc = EFC_NODE_SEND_LS_ACC_PRLI; /* * Wait for backend session registration * to complete before sending PRLI resp */ if (node->init) { efc_log_info(efc, "[%s] found(initiator) WWPN:%s WWNN:%s\n", node->display_name, node->wwpn, node->wwnn); if (node->nport->enable_tgt) rc = efc->tt.scsi_new_node(efc, node); } if (rc < 0) efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_FAIL, NULL); if (rc == EFC_SCSI_CALL_COMPLETE) efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_OK, NULL); } static void __efc_d_common(const char *funcname, struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = NULL; struct efc *efc = NULL; node = ctx->app; efc = node->efc; switch (evt) { /* Handle shutdown events */ case EFC_EVT_SHUTDOWN: efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name, funcname, efc_sm_event_name(evt)); node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_node_transition(node, __efc_d_initiate_shutdown, NULL); break; case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name, funcname, efc_sm_event_name(evt)); node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO; efc_node_transition(node, __efc_d_initiate_shutdown, NULL); break; case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name, funcname, efc_sm_event_name(evt)); node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO; efc_node_transition(node, __efc_d_initiate_shutdown, NULL); break; default: /* call default event handler common to all nodes */ __efc_node_common(funcname, ctx, evt, arg); } } static void __efc_d_wait_del_node(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); /* * State is entered when a node sends a delete initiator/target call * to the target-server/initiator-client and needs to wait for that * work to complete. */ node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); fallthrough; case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: case EFC_EVT_ALL_CHILD_NODES_FREE: /* These are expected events. */ break; case EFC_EVT_NODE_DEL_INI_COMPLETE: case EFC_EVT_NODE_DEL_TGT_COMPLETE: /* * node has either been detached or is in the process * of being detached, * call common node's initiate cleanup function */ efc_node_initiate_cleanup(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_SRRS_ELS_REQ_FAIL: /* Can happen as ELS IO IO's complete */ WARN_ON(!node->els_req_cnt); node->els_req_cnt--; break; /* ignore shutdown events as we're already in shutdown path */ case EFC_EVT_SHUTDOWN: /* have default shutdown event take precedence */ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; fallthrough; case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: node_printf(node, "%s received\n", efc_sm_event_name(evt)); break; case EFC_EVT_DOMAIN_ATTACH_OK: /* don't care about domain_attach_ok */ break; default: __efc_d_common(__func__, ctx, evt, arg); } } static void __efc_d_wait_del_ini_tgt(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); fallthrough; case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: case EFC_EVT_ALL_CHILD_NODES_FREE: /* These are expected events. */ break; case EFC_EVT_NODE_DEL_INI_COMPLETE: case EFC_EVT_NODE_DEL_TGT_COMPLETE: efc_node_transition(node, __efc_d_wait_del_node, NULL); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_SRRS_ELS_REQ_FAIL: /* Can happen as ELS IO IO's complete */ WARN_ON(!node->els_req_cnt); node->els_req_cnt--; break; /* ignore shutdown events as we're already in shutdown path */ case EFC_EVT_SHUTDOWN: /* have default shutdown event take precedence */ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; fallthrough; case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: node_printf(node, "%s received\n", efc_sm_event_name(evt)); break; case EFC_EVT_DOMAIN_ATTACH_OK: /* don't care about domain_attach_ok */ break; default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_initiate_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; struct efc *efc = node->efc; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: { int rc = EFC_SCSI_CALL_COMPLETE; /* assume no wait needed */ node->els_io_enabled = false; /* make necessary delete upcall(s) */ if (node->init && !node->targ) { efc_log_info(node->efc, "[%s] delete (initiator) WWPN %s WWNN %s\n", node->display_name, node->wwpn, node->wwnn); efc_node_transition(node, __efc_d_wait_del_node, NULL); if (node->nport->enable_tgt) rc = efc->tt.scsi_del_node(efc, node, EFC_SCSI_INITIATOR_DELETED); if (rc == EFC_SCSI_CALL_COMPLETE || rc < 0) efc_node_post_event(node, EFC_EVT_NODE_DEL_INI_COMPLETE, NULL); } else if (node->targ && !node->init) { efc_log_info(node->efc, "[%s] delete (target) WWPN %s WWNN %s\n", node->display_name, node->wwpn, node->wwnn); efc_node_transition(node, __efc_d_wait_del_node, NULL); if (node->nport->enable_ini) rc = efc->tt.scsi_del_node(efc, node, EFC_SCSI_TARGET_DELETED); if (rc == EFC_SCSI_CALL_COMPLETE) efc_node_post_event(node, EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL); } else if (node->init && node->targ) { efc_log_info(node->efc, "[%s] delete (I+T) WWPN %s WWNN %s\n", node->display_name, node->wwpn, node->wwnn); efc_node_transition(node, __efc_d_wait_del_ini_tgt, NULL); if (node->nport->enable_tgt) rc = efc->tt.scsi_del_node(efc, node, EFC_SCSI_INITIATOR_DELETED); if (rc == EFC_SCSI_CALL_COMPLETE) efc_node_post_event(node, EFC_EVT_NODE_DEL_INI_COMPLETE, NULL); /* assume no wait needed */ rc = EFC_SCSI_CALL_COMPLETE; if (node->nport->enable_ini) rc = efc->tt.scsi_del_node(efc, node, EFC_SCSI_TARGET_DELETED); if (rc == EFC_SCSI_CALL_COMPLETE) efc_node_post_event(node, EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL); } /* we've initiated the upcalls as needed, now kick off the node * detach to precipitate the aborting of outstanding exchanges * associated with said node * * Beware: if we've made upcall(s), we've already transitioned * to a new state by the time we execute this. * consider doing this before the upcalls? */ if (node->attached) { /* issue hw node free; don't care if succeeds right * away or sometime later, will check node->attached * later in shutdown process */ rc = efc_cmd_node_detach(efc, &node->rnode); if (rc < 0) node_printf(node, "Failed freeing HW node, rc=%d\n", rc); } /* if neither initiator nor target, proceed to cleanup */ if (!node->init && !node->targ) { /* * node has either been detached or is in * the process of being detached, * call common node's initiate cleanup function */ efc_node_initiate_cleanup(node); } break; } case EFC_EVT_ALL_CHILD_NODES_FREE: /* Ignore, this can happen if an ELS is * aborted while in a delay/retry state */ break; default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_wait_loop(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_DOMAIN_ATTACH_OK: { /* send PLOGI automatically if initiator */ efc_node_init_device(node, true); break; } default: __efc_d_common(__func__, ctx, evt, arg); } } void efc_send_ls_acc_after_attach(struct efc_node *node, struct fc_frame_header *hdr, enum efc_node_send_ls_acc ls) { u16 ox_id = be16_to_cpu(hdr->fh_ox_id); /* Save the OX_ID for sending LS_ACC sometime later */ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE); node->ls_acc_oxid = ox_id; node->send_ls_acc = ls; node->ls_acc_did = ntoh24(hdr->fh_d_id); } void efc_process_prli_payload(struct efc_node *node, void *prli) { struct { struct fc_els_prli prli; struct fc_els_spp sp; } *pp; pp = prli; node->init = (pp->sp.spp_flags & FCP_SPPF_INIT_FCN) != 0; node->targ = (pp->sp.spp_flags & FCP_SPPF_TARG_FCN) != 0; } void __efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_SRRS_ELS_CMPL_FAIL: WARN_ON(!node->els_cmpl_cnt); node->els_cmpl_cnt--; node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_node_transition(node, __efc_d_initiate_shutdown, NULL); break; case EFC_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */ WARN_ON(!node->els_cmpl_cnt); node->els_cmpl_cnt--; efc_node_transition(node, __efc_d_port_logged_in, NULL); break; default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_SRRS_ELS_REQ_OK: case EFC_EVT_SRRS_ELS_REQ_RJT: case EFC_EVT_SRRS_ELS_REQ_FAIL: /* LOGO response received, sent shutdown */ if (efc_node_check_els_req(ctx, evt, arg, ELS_LOGO, __efc_d_common, __func__)) return; WARN_ON(!node->els_req_cnt); node->els_req_cnt--; node_printf(node, "LOGO sent (evt=%s), shutdown node\n", efc_sm_event_name(evt)); /* sm: / post explicit logout */ efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); break; default: __efc_d_common(__func__, ctx, evt, arg); } } void efc_node_init_device(struct efc_node *node, bool send_plogi) { node->send_plogi = send_plogi; if ((node->efc->nodedb_mask & EFC_NODEDB_PAUSE_NEW_NODES) && (node->rnode.fc_id != FC_FID_DOM_MGR)) { node->nodedb_state = __efc_d_init; efc_node_transition(node, __efc_node_paused, NULL); } else { efc_node_transition(node, __efc_d_init, NULL); } } static void efc_d_check_plogi_topology(struct efc_node *node, u32 d_id) { switch (node->nport->topology) { case EFC_NPORT_TOPO_P2P: /* we're not attached and nport is p2p, * need to attach */ efc_domain_attach(node->nport->domain, d_id); efc_node_transition(node, __efc_d_wait_domain_attach, NULL); break; case EFC_NPORT_TOPO_FABRIC: /* we're not attached and nport is fabric, domain * attach should have already been requested as part * of the fabric state machine, wait for it */ efc_node_transition(node, __efc_d_wait_domain_attach, NULL); break; case EFC_NPORT_TOPO_UNKNOWN: /* Two possibilities: * 1. received a PLOGI before our FLOGI has completed * (possible since completion comes in on another * CQ), thus we don't know what we're connected to * yet; transition to a state to wait for the * fabric node to tell us; * 2. PLOGI received before link went down and we * haven't performed domain attach yet. * Note: we cannot distinguish between 1. and 2. * so have to assume PLOGI * was received after link back up. */ node_printf(node, "received PLOGI, unknown topology did=0x%x\n", d_id); efc_node_transition(node, __efc_d_wait_topology_notify, NULL); break; default: node_printf(node, "received PLOGI, unexpected topology %d\n", node->nport->topology); } } void __efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); /* * This state is entered when a node is instantiated, * either having been discovered from a name services query, * or having received a PLOGI/FLOGI. */ switch (evt) { case EFC_EVT_ENTER: if (!node->send_plogi) break; /* only send if we have initiator capability, * and domain is attached */ if (node->nport->enable_ini && node->nport->domain->attached) { efc_send_plogi(node); efc_node_transition(node, __efc_d_wait_plogi_rsp, NULL); } else { node_printf(node, "not sending plogi nport.ini=%d,", node->nport->enable_ini); node_printf(node, "domain attached=%d\n", node->nport->domain->attached); } break; case EFC_EVT_PLOGI_RCVD: { /* T, or I+T */ struct fc_frame_header *hdr = cbdata->header->dma.virt; int rc; efc_node_save_sparms(node, cbdata->payload->dma.virt); efc_send_ls_acc_after_attach(node, cbdata->header->dma.virt, EFC_NODE_SEND_LS_ACC_PLOGI); /* domain not attached; several possibilities: */ if (!node->nport->domain->attached) { efc_d_check_plogi_topology(node, ntoh24(hdr->fh_d_id)); break; } /* domain already attached */ rc = efc_node_attach(node); efc_node_transition(node, __efc_d_wait_node_attach, NULL); if (rc < 0) efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); break; } case EFC_EVT_FDISC_RCVD: { __efc_d_common(__func__, ctx, evt, arg); break; } case EFC_EVT_FLOGI_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt; u32 d_id = ntoh24(hdr->fh_d_id); /* sm: / save sparams, send FLOGI acc */ memcpy(node->nport->domain->flogi_service_params, cbdata->payload->dma.virt, sizeof(struct fc_els_flogi)); /* send FC LS_ACC response, override s_id */ efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P); efc_send_flogi_p2p_acc(node, be16_to_cpu(hdr->fh_ox_id), d_id); if (efc_p2p_setup(node->nport)) { node_printf(node, "p2p failed, shutting down node\n"); efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); break; } efc_node_transition(node, __efc_p2p_wait_flogi_acc_cmpl, NULL); break; } case EFC_EVT_LOGO_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt; if (!node->nport->domain->attached) { /* most likely a frame left over from before a link * down; drop and * shut node down w/ "explicit logout" so pending * frames are processed */ node_printf(node, "%s domain not attached, dropping\n", efc_sm_event_name(evt)); efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); break; } efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); break; } case EFC_EVT_PRLI_RCVD: case EFC_EVT_PRLO_RCVD: case EFC_EVT_PDISC_RCVD: case EFC_EVT_ADISC_RCVD: case EFC_EVT_RSCN_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt; if (!node->nport->domain->attached) { /* most likely a frame left over from before a link * down; drop and shut node down w/ "explicit logout" * so pending frames are processed */ node_printf(node, "%s domain not attached, dropping\n", efc_sm_event_name(evt)); efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); break; } node_printf(node, "%s received, sending reject\n", efc_sm_event_name(evt)); efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0); break; } case EFC_EVT_FCP_CMD_RCVD: { /* note: problem, we're now expecting an ELS REQ completion * from both the LOGO and PLOGI */ if (!node->nport->domain->attached) { /* most likely a frame left over from before a * link down; drop and * shut node down w/ "explicit logout" so pending * frames are processed */ node_printf(node, "%s domain not attached, dropping\n", efc_sm_event_name(evt)); efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); break; } /* Send LOGO */ node_printf(node, "FCP_CMND received, send LOGO\n"); if (efc_send_logo(node)) { /* * failed to send LOGO, go ahead and cleanup node * anyways */ node_printf(node, "Failed to send LOGO\n"); efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); } else { /* sent LOGO, wait for response */ efc_node_transition(node, __efc_d_wait_logo_rsp, NULL); } break; } case EFC_EVT_DOMAIN_ATTACH_OK: /* don't care about domain_attach_ok */ break; default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { int rc; struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_PLOGI_RCVD: { /* T, or I+T */ /* received PLOGI with svc parms, go ahead and attach node * when PLOGI that was sent ultimately completes, it'll be a * no-op * * If there is an outstanding PLOGI sent, can we set a flag * to indicate that we don't want to retry it if it times out? */ efc_node_save_sparms(node, cbdata->payload->dma.virt); efc_send_ls_acc_after_attach(node, cbdata->header->dma.virt, EFC_NODE_SEND_LS_ACC_PLOGI); /* sm: domain->attached / efc_node_attach */ rc = efc_node_attach(node); efc_node_transition(node, __efc_d_wait_node_attach, NULL); if (rc < 0) efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); break; } case EFC_EVT_PRLI_RCVD: /* I, or I+T */ /* sent PLOGI and before completion was seen, received the * PRLI from the remote node (WCQEs and RCQEs come in on * different queues and order of processing cannot be assumed) * Save OXID so PRLI can be sent after the attach and continue * to wait for PLOGI response */ efc_process_prli_payload(node, cbdata->payload->dma.virt); efc_send_ls_acc_after_attach(node, cbdata->header->dma.virt, EFC_NODE_SEND_LS_ACC_PRLI); efc_node_transition(node, __efc_d_wait_plogi_rsp_recvd_prli, NULL); break; case EFC_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */ case EFC_EVT_PRLO_RCVD: case EFC_EVT_PDISC_RCVD: case EFC_EVT_FDISC_RCVD: case EFC_EVT_ADISC_RCVD: case EFC_EVT_RSCN_RCVD: case EFC_EVT_SCR_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt; node_printf(node, "%s received, sending reject\n", efc_sm_event_name(evt)); efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0); break; } case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */ /* Completion from PLOGI sent */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, __efc_d_common, __func__)) return; WARN_ON(!node->els_req_cnt); node->els_req_cnt--; /* sm: / save sparams, efc_node_attach */ efc_node_save_sparms(node, cbdata->els_rsp.virt); rc = efc_node_attach(node); efc_node_transition(node, __efc_d_wait_node_attach, NULL); if (rc < 0) efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); break; case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */ /* PLOGI failed, shutdown the node */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, __efc_d_common, __func__)) return; WARN_ON(!node->els_req_cnt); node->els_req_cnt--; efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); break; case EFC_EVT_SRRS_ELS_REQ_RJT: /* Our PLOGI was rejected, this is ok in some cases */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, __efc_d_common, __func__)) return; WARN_ON(!node->els_req_cnt); node->els_req_cnt--; break; case EFC_EVT_FCP_CMD_RCVD: { /* not logged in yet and outstanding PLOGI so don't send LOGO, * just drop */ node_printf(node, "FCP_CMND received, drop\n"); break; } default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { int rc; struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: /* * Since we've received a PRLI, we have a port login and will * just need to wait for the PLOGI response to do the node * attach and then we can send the LS_ACC for the PRLI. If, * during this time, we receive FCP_CMNDs (which is possible * since we've already sent a PRLI and our peer may have * accepted). At this time, we are not waiting on any other * unsolicited frames to continue with the login process. Thus, * it will not hurt to hold frames here. */ efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */ /* Completion from PLOGI sent */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, __efc_d_common, __func__)) return; WARN_ON(!node->els_req_cnt); node->els_req_cnt--; /* sm: / save sparams, efc_node_attach */ efc_node_save_sparms(node, cbdata->els_rsp.virt); rc = efc_node_attach(node); efc_node_transition(node, __efc_d_wait_node_attach, NULL); if (rc < 0) efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); break; case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */ case EFC_EVT_SRRS_ELS_REQ_RJT: /* PLOGI failed, shutdown the node */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, __efc_d_common, __func__)) return; WARN_ON(!node->els_req_cnt); node->els_req_cnt--; efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); break; default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_wait_domain_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { int rc; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_DOMAIN_ATTACH_OK: WARN_ON(!node->nport->domain->attached); /* sm: / efc_node_attach */ rc = efc_node_attach(node); efc_node_transition(node, __efc_d_wait_node_attach, NULL); if (rc < 0) efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); break; default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_wait_topology_notify(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { int rc; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: { enum efc_nport_topology *topology = arg; WARN_ON(node->nport->domain->attached); WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI); node_printf(node, "topology notification, topology=%d\n", *topology); /* At the time the PLOGI was received, the topology was unknown, * so we didn't know which node would perform the domain attach: * 1. The node from which the PLOGI was sent (p2p) or * 2. The node to which the FLOGI was sent (fabric). */ if (*topology == EFC_NPORT_TOPO_P2P) { /* if this is p2p, need to attach to the domain using * the d_id from the PLOGI received */ efc_domain_attach(node->nport->domain, node->ls_acc_did); } /* else, if this is fabric, the domain attach * should be performed by the fabric node (node sending FLOGI); * just wait for attach to complete */ efc_node_transition(node, __efc_d_wait_domain_attach, NULL); break; } case EFC_EVT_DOMAIN_ATTACH_OK: WARN_ON(!node->nport->domain->attached); node_printf(node, "domain attach ok\n"); /* sm: / efc_node_attach */ rc = efc_node_attach(node); efc_node_transition(node, __efc_d_wait_node_attach, NULL); if (rc < 0) efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); break; default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_wait_node_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_NODE_ATTACH_OK: node->attached = true; switch (node->send_ls_acc) { case EFC_NODE_SEND_LS_ACC_PLOGI: { /* sm: send_plogi_acc is set / send PLOGI acc */ /* Normal case for T, or I+T */ efc_send_plogi_acc(node, node->ls_acc_oxid); efc_node_transition(node, __efc_d_wait_plogi_acc_cmpl, NULL); node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; node->ls_acc_io = NULL; break; } case EFC_NODE_SEND_LS_ACC_PRLI: { efc_d_send_prli_rsp(node, node->ls_acc_oxid); node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; node->ls_acc_io = NULL; break; } case EFC_NODE_SEND_LS_ACC_NONE: default: /* Normal case for I */ /* sm: send_plogi_acc is not set / send PLOGI acc */ efc_node_transition(node, __efc_d_port_logged_in, NULL); break; } break; case EFC_EVT_NODE_ATTACH_FAIL: /* node attach failed, shutdown the node */ node->attached = false; node_printf(node, "node attach failed\n"); node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_node_transition(node, __efc_d_initiate_shutdown, NULL); break; /* Handle shutdown events */ case EFC_EVT_SHUTDOWN: node_printf(node, "%s received\n", efc_sm_event_name(evt)); node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_node_transition(node, __efc_d_wait_attach_evt_shutdown, NULL); break; case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: node_printf(node, "%s received\n", efc_sm_event_name(evt)); node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO; efc_node_transition(node, __efc_d_wait_attach_evt_shutdown, NULL); break; case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: node_printf(node, "%s received\n", efc_sm_event_name(evt)); node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO; efc_node_transition(node, __efc_d_wait_attach_evt_shutdown, NULL); break; default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; /* wait for any of these attach events and then shutdown */ case EFC_EVT_NODE_ATTACH_OK: node->attached = true; node_printf(node, "Attach evt=%s, proceed to shutdown\n", efc_sm_event_name(evt)); efc_node_transition(node, __efc_d_initiate_shutdown, NULL); break; case EFC_EVT_NODE_ATTACH_FAIL: /* node attach failed, shutdown the node */ node->attached = false; node_printf(node, "Attach evt=%s, proceed to shutdown\n", efc_sm_event_name(evt)); efc_node_transition(node, __efc_d_initiate_shutdown, NULL); break; /* ignore shutdown events as we're already in shutdown path */ case EFC_EVT_SHUTDOWN: /* have default shutdown event take precedence */ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; fallthrough; case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: node_printf(node, "%s received\n", efc_sm_event_name(evt)); break; default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_port_logged_in(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: /* Normal case for I or I+T */ if (node->nport->enable_ini && !(node->rnode.fc_id != FC_FID_DOM_MGR)) { /* sm: if enable_ini / send PRLI */ efc_send_prli(node); /* can now expect ELS_REQ_OK/FAIL/RJT */ } break; case EFC_EVT_FCP_CMD_RCVD: { break; } case EFC_EVT_PRLI_RCVD: { /* Normal case for T or I+T */ struct fc_frame_header *hdr = cbdata->header->dma.virt; struct { struct fc_els_prli prli; struct fc_els_spp sp; } *pp; pp = cbdata->payload->dma.virt; if (pp->sp.spp_type != FC_TYPE_FCP) { /*Only FCP is supported*/ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0); break; } efc_process_prli_payload(node, cbdata->payload->dma.virt); efc_d_send_prli_rsp(node, be16_to_cpu(hdr->fh_ox_id)); break; } case EFC_EVT_NODE_SESS_REG_OK: if (node->send_ls_acc == EFC_NODE_SEND_LS_ACC_PRLI) efc_send_prli_acc(node, node->ls_acc_oxid); node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; efc_node_transition(node, __efc_d_device_ready, NULL); break; case EFC_EVT_NODE_SESS_REG_FAIL: efc_send_ls_rjt(node, node->ls_acc_oxid, ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0); node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; break; case EFC_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */ /* Normal case for I or I+T */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI, __efc_d_common, __func__)) return; WARN_ON(!node->els_req_cnt); node->els_req_cnt--; /* sm: / process PRLI payload */ efc_process_prli_payload(node, cbdata->els_rsp.virt); efc_node_transition(node, __efc_d_device_ready, NULL); break; } case EFC_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */ /* I, I+T, assume some link failure, shutdown node */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI, __efc_d_common, __func__)) return; WARN_ON(!node->els_req_cnt); node->els_req_cnt--; efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); break; } case EFC_EVT_SRRS_ELS_REQ_RJT: { /* PRLI rejected by remote * Normal for I, I+T (connected to an I) * Node doesn't want to be a target, stay here and wait for a * PRLI from the remote node * if it really wants to connect to us as target */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI, __efc_d_common, __func__)) return; WARN_ON(!node->els_req_cnt); node->els_req_cnt--; break; } case EFC_EVT_SRRS_ELS_CMPL_OK: { /* Normal T, I+T, target-server rejected the process login */ /* This would be received only in the case where we sent * LS_RJT for the PRLI, so * do nothing. (note: as T only we could shutdown the node) */ WARN_ON(!node->els_cmpl_cnt); node->els_cmpl_cnt--; break; } case EFC_EVT_PLOGI_RCVD: { /*sm: / save sparams, set send_plogi_acc, *post implicit logout * Save plogi parameters */ efc_node_save_sparms(node, cbdata->payload->dma.virt); efc_send_ls_acc_after_attach(node, cbdata->header->dma.virt, EFC_NODE_SEND_LS_ACC_PLOGI); /* Restart node attach with new service parameters, * and send ACC */ efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL); break; } case EFC_EVT_LOGO_RCVD: { /* I, T, I+T */ struct fc_frame_header *hdr = cbdata->header->dma.virt; node_printf(node, "%s received attached=%d\n", efc_sm_event_name(evt), node->attached); /* sm: / send LOGO acc */ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); break; } default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_SRRS_ELS_CMPL_OK: case EFC_EVT_SRRS_ELS_CMPL_FAIL: /* sm: / post explicit logout */ WARN_ON(!node->els_cmpl_cnt); node->els_cmpl_cnt--; efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); break; default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_device_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; struct efc *efc = node->efc; efc_node_evt_set(ctx, evt, __func__); if (evt != EFC_EVT_FCP_CMD_RCVD) node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: node->fcp_enabled = true; if (node->targ) { efc_log_info(efc, "[%s] found (target) WWPN %s WWNN %s\n", node->display_name, node->wwpn, node->wwnn); if (node->nport->enable_ini) efc->tt.scsi_new_node(efc, node); } break; case EFC_EVT_EXIT: node->fcp_enabled = false; break; case EFC_EVT_PLOGI_RCVD: { /* sm: / save sparams, set send_plogi_acc, post implicit * logout * Save plogi parameters */ efc_node_save_sparms(node, cbdata->payload->dma.virt); efc_send_ls_acc_after_attach(node, cbdata->header->dma.virt, EFC_NODE_SEND_LS_ACC_PLOGI); /* * Restart node attach with new service parameters, * and send ACC */ efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL); break; } case EFC_EVT_PRLI_RCVD: { /* T, I+T: remote initiator is slow to get started */ struct fc_frame_header *hdr = cbdata->header->dma.virt; struct { struct fc_els_prli prli; struct fc_els_spp sp; } *pp; pp = cbdata->payload->dma.virt; if (pp->sp.spp_type != FC_TYPE_FCP) { /*Only FCP is supported*/ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0); break; } efc_process_prli_payload(node, cbdata->payload->dma.virt); efc_send_prli_acc(node, be16_to_cpu(hdr->fh_ox_id)); break; } case EFC_EVT_PRLO_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt; /* sm: / send PRLO acc */ efc_send_prlo_acc(node, be16_to_cpu(hdr->fh_ox_id)); /* need implicit logout? */ break; } case EFC_EVT_LOGO_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt; node_printf(node, "%s received attached=%d\n", efc_sm_event_name(evt), node->attached); /* sm: / send LOGO acc */ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); break; } case EFC_EVT_ADISC_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt; /* sm: / send ADISC acc */ efc_send_adisc_acc(node, be16_to_cpu(hdr->fh_ox_id)); break; } case EFC_EVT_ABTS_RCVD: /* sm: / process ABTS */ efc_log_err(efc, "Unexpected event:%s\n", efc_sm_event_name(evt)); break; case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: break; case EFC_EVT_NODE_REFOUND: break; case EFC_EVT_NODE_MISSING: if (node->nport->enable_rscn) efc_node_transition(node, __efc_d_device_gone, NULL); break; case EFC_EVT_SRRS_ELS_CMPL_OK: /* T, or I+T, PRLI accept completed ok */ WARN_ON(!node->els_cmpl_cnt); node->els_cmpl_cnt--; break; case EFC_EVT_SRRS_ELS_CMPL_FAIL: /* T, or I+T, PRLI accept failed to complete */ WARN_ON(!node->els_cmpl_cnt); node->els_cmpl_cnt--; node_printf(node, "Failed to send PRLI LS_ACC\n"); break; default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_device_gone(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; struct efc *efc = node->efc; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: { int rc = EFC_SCSI_CALL_COMPLETE; int rc_2 = EFC_SCSI_CALL_COMPLETE; static const char * const labels[] = { "none", "initiator", "target", "initiator+target" }; efc_log_info(efc, "[%s] missing (%s) WWPN %s WWNN %s\n", node->display_name, labels[(node->targ << 1) | (node->init)], node->wwpn, node->wwnn); switch (efc_node_get_enable(node)) { case EFC_NODE_ENABLE_T_TO_T: case EFC_NODE_ENABLE_I_TO_T: case EFC_NODE_ENABLE_IT_TO_T: rc = efc->tt.scsi_del_node(efc, node, EFC_SCSI_TARGET_MISSING); break; case EFC_NODE_ENABLE_T_TO_I: case EFC_NODE_ENABLE_I_TO_I: case EFC_NODE_ENABLE_IT_TO_I: rc = efc->tt.scsi_del_node(efc, node, EFC_SCSI_INITIATOR_MISSING); break; case EFC_NODE_ENABLE_T_TO_IT: rc = efc->tt.scsi_del_node(efc, node, EFC_SCSI_INITIATOR_MISSING); break; case EFC_NODE_ENABLE_I_TO_IT: rc = efc->tt.scsi_del_node(efc, node, EFC_SCSI_TARGET_MISSING); break; case EFC_NODE_ENABLE_IT_TO_IT: rc = efc->tt.scsi_del_node(efc, node, EFC_SCSI_INITIATOR_MISSING); rc_2 = efc->tt.scsi_del_node(efc, node, EFC_SCSI_TARGET_MISSING); break; default: rc = EFC_SCSI_CALL_COMPLETE; break; } if (rc == EFC_SCSI_CALL_COMPLETE && rc_2 == EFC_SCSI_CALL_COMPLETE) efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL); break; } case EFC_EVT_NODE_REFOUND: /* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */ /* reauthenticate with PLOGI/PRLI */ /* efc_node_transition(node, __efc_d_discovered, NULL); */ /* reauthenticate with ADISC */ /* sm: / send ADISC */ efc_send_adisc(node); efc_node_transition(node, __efc_d_wait_adisc_rsp, NULL); break; case EFC_EVT_PLOGI_RCVD: { /* sm: / save sparams, set send_plogi_acc, post implicit * logout * Save plogi parameters */ efc_node_save_sparms(node, cbdata->payload->dma.virt); efc_send_ls_acc_after_attach(node, cbdata->header->dma.virt, EFC_NODE_SEND_LS_ACC_PLOGI); /* * Restart node attach with new service parameters, and send * ACC */ efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL); break; } case EFC_EVT_FCP_CMD_RCVD: { /* most likely a stale frame (received prior to link down), * if attempt to send LOGO, will probably timeout and eat * up 20s; thus, drop FCP_CMND */ node_printf(node, "FCP_CMND received, drop\n"); break; } case EFC_EVT_LOGO_RCVD: { /* I, T, I+T */ struct fc_frame_header *hdr = cbdata->header->dma.virt; node_printf(node, "%s received attached=%d\n", efc_sm_event_name(evt), node->attached); /* sm: / send LOGO acc */ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); break; } default: __efc_d_common(__func__, ctx, evt, arg); } } void __efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_SRRS_ELS_REQ_OK: if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC, __efc_d_common, __func__)) return; WARN_ON(!node->els_req_cnt); node->els_req_cnt--; efc_node_transition(node, __efc_d_device_ready, NULL); break; case EFC_EVT_SRRS_ELS_REQ_RJT: /* received an LS_RJT, in this case, send shutdown * (explicit logo) event which will unregister the node, * and start over with PLOGI */ if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC, __efc_d_common, __func__)) return; WARN_ON(!node->els_req_cnt); node->els_req_cnt--; /* sm: / post explicit logout */ efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL); break; case EFC_EVT_LOGO_RCVD: { /* In this case, we have the equivalent of an LS_RJT for * the ADISC, so we need to abort the ADISC, and re-login * with PLOGI */ /* sm: / request abort, send LOGO acc */ struct fc_frame_header *hdr = cbdata->header->dma.virt; node_printf(node, "%s received attached=%d\n", efc_sm_event_name(evt), node->attached); efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id)); efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL); break; } default: __efc_d_common(__func__, ctx, evt, arg); } }
linux-master
drivers/scsi/elx/libefc/efc_device.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ /* * This file implements remote node state machines for: * - Fabric logins. * - Fabric controller events. * - Name/directory services interaction. * - Point-to-point logins. */ /* * fabric_sm Node State Machine: Fabric States * ns_sm Node State Machine: Name/Directory Services States * p2p_sm Node State Machine: Point-to-Point Node States */ #include "efc.h" static void efc_fabric_initiate_shutdown(struct efc_node *node) { struct efc *efc = node->efc; node->els_io_enabled = false; if (node->attached) { int rc; /* issue hw node free; don't care if succeeds right away * or sometime later, will check node->attached later in * shutdown process */ rc = efc_cmd_node_detach(efc, &node->rnode); if (rc < 0) { node_printf(node, "Failed freeing HW node, rc=%d\n", rc); } } /* * node has either been detached or is in the process of being detached, * call common node's initiate cleanup function */ efc_node_initiate_cleanup(node); } static void __efc_fabric_common(const char *funcname, struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = NULL; node = ctx->app; switch (evt) { case EFC_EVT_DOMAIN_ATTACH_OK: break; case EFC_EVT_SHUTDOWN: node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_fabric_initiate_shutdown(node); break; default: /* call default event handler common to all nodes */ __efc_node_common(funcname, ctx, evt, arg); } } void __efc_fabric_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; struct efc *efc = node->efc; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_REENTER: efc_log_debug(efc, ">>> reenter !!\n"); fallthrough; case EFC_EVT_ENTER: /* send FLOGI */ efc_send_flogi(node); efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL); break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } void efc_fabric_set_topology(struct efc_node *node, enum efc_nport_topology topology) { node->nport->topology = topology; } void efc_fabric_notify_topology(struct efc_node *node) { struct efc_node *tmp_node; unsigned long index; /* * now loop through the nodes in the nport * and send topology notification */ xa_for_each(&node->nport->lookup, index, tmp_node) { if (tmp_node != node) { efc_node_post_event(tmp_node, EFC_EVT_NPORT_TOPOLOGY_NOTIFY, &node->nport->topology); } } } static bool efc_rnode_is_nport(struct fc_els_flogi *rsp) { return !(ntohs(rsp->fl_csp.sp_features) & FC_SP_FT_FPORT); } void __efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_SRRS_ELS_REQ_OK: { if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI, __efc_fabric_common, __func__)) { return; } WARN_ON(!node->els_req_cnt); node->els_req_cnt--; memcpy(node->nport->domain->flogi_service_params, cbdata->els_rsp.virt, sizeof(struct fc_els_flogi)); /* Check to see if the fabric is an F_PORT or and N_PORT */ if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) { /* sm: if not nport / efc_domain_attach */ /* ext_status has the fc_id, attach domain */ efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC); efc_fabric_notify_topology(node); WARN_ON(node->nport->domain->attached); efc_domain_attach(node->nport->domain, cbdata->ext_status); efc_node_transition(node, __efc_fabric_wait_domain_attach, NULL); break; } /* sm: if nport and p2p_winner / efc_domain_attach */ efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P); if (efc_p2p_setup(node->nport)) { node_printf(node, "p2p setup failed, shutting down node\n"); node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_fabric_initiate_shutdown(node); break; } if (node->nport->p2p_winner) { efc_node_transition(node, __efc_p2p_wait_domain_attach, NULL); if (node->nport->domain->attached && !node->nport->domain->domain_notify_pend) { /* * already attached, * just send ATTACH_OK */ node_printf(node, "p2p winner, domain already attached\n"); efc_node_post_event(node, EFC_EVT_DOMAIN_ATTACH_OK, NULL); } } else { /* * peer is p2p winner; * PLOGI will be received on the * remote SID=1 node; * this node has served its purpose */ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_fabric_initiate_shutdown(node); } break; } case EFC_EVT_ELS_REQ_ABORTED: case EFC_EVT_SRRS_ELS_REQ_RJT: case EFC_EVT_SRRS_ELS_REQ_FAIL: { struct efc_nport *nport = node->nport; /* * with these errors, we have no recovery, * so shutdown the nport, leave the link * up and the domain ready */ if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI, __efc_fabric_common, __func__)) { return; } node_printf(node, "FLOGI failed evt=%s, shutting down nport [%s]\n", efc_sm_event_name(evt), nport->display_name); WARN_ON(!node->els_req_cnt); node->els_req_cnt--; efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); break; } default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_vport_fabric_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: /* sm: / send FDISC */ efc_send_fdisc(node); efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL); break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_SRRS_ELS_REQ_OK: { /* fc_id is in ext_status */ if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC, __efc_fabric_common, __func__)) { return; } WARN_ON(!node->els_req_cnt); node->els_req_cnt--; /* sm: / efc_nport_attach */ efc_nport_attach(node->nport, cbdata->ext_status); efc_node_transition(node, __efc_fabric_wait_domain_attach, NULL); break; } case EFC_EVT_SRRS_ELS_REQ_RJT: case EFC_EVT_SRRS_ELS_REQ_FAIL: { if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC, __efc_fabric_common, __func__)) { return; } WARN_ON(!node->els_req_cnt); node->els_req_cnt--; efc_log_err(node->efc, "FDISC failed, shutting down nport\n"); /* sm: / shutdown nport */ efc_sm_post_event(&node->nport->sm, EFC_EVT_SHUTDOWN, NULL); break; } default: __efc_fabric_common(__func__, ctx, evt, arg); } } static int efc_start_ns_node(struct efc_nport *nport) { struct efc_node *ns; /* Instantiate a name services node */ ns = efc_node_find(nport, FC_FID_DIR_SERV); if (!ns) { ns = efc_node_alloc(nport, FC_FID_DIR_SERV, false, false); if (!ns) return -EIO; } /* * for found ns, should we be transitioning from here? * breaks transition only * 1. from within state machine or * 2. if after alloc */ if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER) efc_node_pause(ns, __efc_ns_init); else efc_node_transition(ns, __efc_ns_init, NULL); return 0; } static int efc_start_fabctl_node(struct efc_nport *nport) { struct efc_node *fabctl; fabctl = efc_node_find(nport, FC_FID_FCTRL); if (!fabctl) { fabctl = efc_node_alloc(nport, FC_FID_FCTRL, false, false); if (!fabctl) return -EIO; } /* * for found ns, should we be transitioning from here? * breaks transition only * 1. from within state machine or * 2. if after alloc */ efc_node_transition(fabctl, __efc_fabctl_init, NULL); return 0; } void __efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_DOMAIN_ATTACH_OK: case EFC_EVT_NPORT_ATTACH_OK: { int rc; rc = efc_start_ns_node(node->nport); if (rc) return; /* sm: if enable_ini / start fabctl node */ /* Instantiate the fabric controller (sends SCR) */ if (node->nport->enable_rscn) { rc = efc_start_fabctl_node(node->nport); if (rc) return; } efc_node_transition(node, __efc_fabric_idle, NULL); break; } default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_fabric_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_DOMAIN_ATTACH_OK: break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: /* sm: / send PLOGI */ efc_send_plogi(node); efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL); break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_SRRS_ELS_REQ_OK: { int rc; /* Save service parameters */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, __efc_fabric_common, __func__)) { return; } WARN_ON(!node->els_req_cnt); node->els_req_cnt--; /* sm: / save sparams, efc_node_attach */ efc_node_save_sparms(node, cbdata->els_rsp.virt); rc = efc_node_attach(node); efc_node_transition(node, __efc_ns_wait_node_attach, NULL); if (rc < 0) efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); break; } default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_ns_wait_node_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_NODE_ATTACH_OK: node->attached = true; /* sm: / send RFTID */ efc_ns_send_rftid(node); efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL); break; case EFC_EVT_NODE_ATTACH_FAIL: /* node attach failed, shutdown the node */ node->attached = false; node_printf(node, "Node attach failed\n"); node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_fabric_initiate_shutdown(node); break; case EFC_EVT_SHUTDOWN: node_printf(node, "Shutdown event received\n"); node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_node_transition(node, __efc_fabric_wait_attach_evt_shutdown, NULL); break; /* * if receive RSCN just ignore, * we haven't sent GID_PT yet (ACC sent by fabctl node) */ case EFC_EVT_RSCN_RCVD: break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; /* wait for any of these attach events and then shutdown */ case EFC_EVT_NODE_ATTACH_OK: node->attached = true; node_printf(node, "Attach evt=%s, proceed to shutdown\n", efc_sm_event_name(evt)); efc_fabric_initiate_shutdown(node); break; case EFC_EVT_NODE_ATTACH_FAIL: node->attached = false; node_printf(node, "Attach evt=%s, proceed to shutdown\n", efc_sm_event_name(evt)); efc_fabric_initiate_shutdown(node); break; /* ignore shutdown event as we're already in shutdown path */ case EFC_EVT_SHUTDOWN: node_printf(node, "Shutdown event received\n"); break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_SRRS_ELS_REQ_OK: if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFT_ID, __efc_fabric_common, __func__)) { return; } WARN_ON(!node->els_req_cnt); node->els_req_cnt--; /* sm: / send RFFID */ efc_ns_send_rffid(node); efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL); break; /* * if receive RSCN just ignore, * we haven't sent GID_PT yet (ACC sent by fabctl node) */ case EFC_EVT_RSCN_RCVD: break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); /* * Waits for an RFFID response event; * if rscn enabled, a GIDPT name services request is issued. */ switch (evt) { case EFC_EVT_SRRS_ELS_REQ_OK: { if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID, __efc_fabric_common, __func__)) { return; } WARN_ON(!node->els_req_cnt); node->els_req_cnt--; if (node->nport->enable_rscn) { /* sm: if enable_rscn / send GIDPT */ efc_ns_send_gidpt(node); efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL); } else { /* if 'T' only, we're done, go to idle */ efc_node_transition(node, __efc_ns_idle, NULL); } break; } /* * if receive RSCN just ignore, * we haven't sent GID_PT yet (ACC sent by fabctl node) */ case EFC_EVT_RSCN_RCVD: break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } static int efc_process_gidpt_payload(struct efc_node *node, void *data, u32 gidpt_len) { u32 i, j; struct efc_node *newnode; struct efc_nport *nport = node->nport; struct efc *efc = node->efc; u32 port_id = 0, port_count, plist_count; struct efc_node *n; struct efc_node **active_nodes; int residual; struct { struct fc_ct_hdr hdr; struct fc_gid_pn_resp pn_rsp; } *rsp; struct fc_gid_pn_resp *gidpt; unsigned long index; rsp = data; gidpt = &rsp->pn_rsp; residual = be16_to_cpu(rsp->hdr.ct_mr_size); if (residual != 0) efc_log_debug(node->efc, "residual is %u words\n", residual); if (be16_to_cpu(rsp->hdr.ct_cmd) == FC_FS_RJT) { node_printf(node, "GIDPT request failed: rsn x%x rsn_expl x%x\n", rsp->hdr.ct_reason, rsp->hdr.ct_explan); return -EIO; } plist_count = (gidpt_len - sizeof(struct fc_ct_hdr)) / sizeof(*gidpt); /* Count the number of nodes */ port_count = 0; xa_for_each(&nport->lookup, index, n) { port_count++; } /* Allocate a buffer for all nodes */ active_nodes = kcalloc(port_count, sizeof(*active_nodes), GFP_ATOMIC); if (!active_nodes) { node_printf(node, "efc_malloc failed\n"); return -EIO; } /* Fill buffer with fc_id of active nodes */ i = 0; xa_for_each(&nport->lookup, index, n) { port_id = n->rnode.fc_id; switch (port_id) { case FC_FID_FLOGI: case FC_FID_FCTRL: case FC_FID_DIR_SERV: break; default: if (port_id != FC_FID_DOM_MGR) active_nodes[i++] = n; break; } } /* update the active nodes buffer */ for (i = 0; i < plist_count; i++) { hton24(gidpt[i].fp_fid, port_id); for (j = 0; j < port_count; j++) { if (active_nodes[j] && port_id == active_nodes[j]->rnode.fc_id) { active_nodes[j] = NULL; } } if (gidpt[i].fp_resvd & FC_NS_FID_LAST) break; } /* Those remaining in the active_nodes[] are now gone ! */ for (i = 0; i < port_count; i++) { /* * if we're an initiator and the remote node * is a target, then post the node missing event. * if we're target and we have enabled * target RSCN, then post the node missing event. */ if (!active_nodes[i]) continue; if ((node->nport->enable_ini && active_nodes[i]->targ) || (node->nport->enable_tgt && enable_target_rscn(efc))) { efc_node_post_event(active_nodes[i], EFC_EVT_NODE_MISSING, NULL); } else { node_printf(node, "GID_PT: skipping non-tgt port_id x%06x\n", active_nodes[i]->rnode.fc_id); } } kfree(active_nodes); for (i = 0; i < plist_count; i++) { hton24(gidpt[i].fp_fid, port_id); /* Don't create node for ourselves */ if (port_id == node->rnode.nport->fc_id) { if (gidpt[i].fp_resvd & FC_NS_FID_LAST) break; continue; } newnode = efc_node_find(nport, port_id); if (!newnode) { if (!node->nport->enable_ini) continue; newnode = efc_node_alloc(nport, port_id, false, false); if (!newnode) { efc_log_err(efc, "efc_node_alloc() failed\n"); return -EIO; } /* * send PLOGI automatically * if initiator */ efc_node_init_device(newnode, true); } if (node->nport->enable_ini && newnode->targ) { efc_node_post_event(newnode, EFC_EVT_NODE_REFOUND, NULL); } if (gidpt[i].fp_resvd & FC_NS_FID_LAST) break; } return 0; } void __efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); /* * Wait for a GIDPT response from the name server. Process the FC_IDs * that are reported by creating new remote ports, as needed. */ switch (evt) { case EFC_EVT_SRRS_ELS_REQ_OK: { if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_GID_PT, __efc_fabric_common, __func__)) { return; } WARN_ON(!node->els_req_cnt); node->els_req_cnt--; /* sm: / process GIDPT payload */ efc_process_gidpt_payload(node, cbdata->els_rsp.virt, cbdata->els_rsp.len); efc_node_transition(node, __efc_ns_idle, NULL); break; } case EFC_EVT_SRRS_ELS_REQ_FAIL: { /* not much we can do; will retry with the next RSCN */ node_printf(node, "GID_PT failed to complete\n"); WARN_ON(!node->els_req_cnt); node->els_req_cnt--; efc_node_transition(node, __efc_ns_idle, NULL); break; } /* if receive RSCN here, queue up another discovery processing */ case EFC_EVT_RSCN_RCVD: { node_printf(node, "RSCN received during GID_PT processing\n"); node->rscn_pending = true; break; } default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; struct efc *efc = node->efc; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); /* * Wait for RSCN received events (posted from the fabric controller) * and restart the GIDPT name services query and processing. */ switch (evt) { case EFC_EVT_ENTER: if (!node->rscn_pending) break; node_printf(node, "RSCN pending, restart discovery\n"); node->rscn_pending = false; fallthrough; case EFC_EVT_RSCN_RCVD: { /* sm: / send GIDPT */ /* * If target RSCN processing is enabled, * and this is target only (not initiator), * and tgt_rscn_delay is non-zero, * then we delay issuing the GID_PT */ if (efc->tgt_rscn_delay_msec != 0 && !node->nport->enable_ini && node->nport->enable_tgt && enable_target_rscn(efc)) { efc_node_transition(node, __efc_ns_gidpt_delay, NULL); } else { efc_ns_send_gidpt(node); efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL); } break; } default: __efc_fabric_common(__func__, ctx, evt, arg); } } static void gidpt_delay_timer_cb(struct timer_list *t) { struct efc_node *node = from_timer(node, t, gidpt_delay_timer); del_timer(&node->gidpt_delay_timer); efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL); } void __efc_ns_gidpt_delay(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; struct efc *efc = node->efc; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: { u64 delay_msec, tmp; /* * Compute the delay time. * Set to tgt_rscn_delay, if the time since last GIDPT * is less than tgt_rscn_period, then use tgt_rscn_period. */ delay_msec = efc->tgt_rscn_delay_msec; tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec; if (tmp < efc->tgt_rscn_period_msec) delay_msec = efc->tgt_rscn_period_msec; timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb, 0); mod_timer(&node->gidpt_delay_timer, jiffies + msecs_to_jiffies(delay_msec)); break; } case EFC_EVT_GIDPT_DELAY_EXPIRED: node->time_last_gidpt_msec = jiffies_to_msecs(jiffies); efc_ns_send_gidpt(node); efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL); break; case EFC_EVT_RSCN_RCVD: { efc_log_debug(efc, "RSCN received while in GIDPT delay - no action\n"); break; } default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_fabctl_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: /* no need to login to fabric controller, just send SCR */ efc_send_scr(node); efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL); break; case EFC_EVT_NODE_ATTACH_OK: node->attached = true; break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); /* * Fabric controller node state machine: * Wait for an SCR response from the fabric controller. */ switch (evt) { case EFC_EVT_SRRS_ELS_REQ_OK: if (efc_node_check_els_req(ctx, evt, arg, ELS_SCR, __efc_fabric_common, __func__)) { return; } WARN_ON(!node->els_req_cnt); node->els_req_cnt--; efc_node_transition(node, __efc_fabctl_ready, NULL); break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } static void efc_process_rscn(struct efc_node *node, struct efc_node_cb *cbdata) { struct efc *efc = node->efc; struct efc_nport *nport = node->nport; struct efc_node *ns; /* Forward this event to the name-services node */ ns = efc_node_find(nport, FC_FID_DIR_SERV); if (ns) efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata); else efc_log_warn(efc, "can't find name server node\n"); } void __efc_fabctl_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); /* * Fabric controller node state machine: Ready. * In this state, the fabric controller sends a RSCN, which is received * by this node and is forwarded to the name services node object; and * the RSCN LS_ACC is sent. */ switch (evt) { case EFC_EVT_RSCN_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt; /* * sm: / process RSCN (forward to name services node), * send LS_ACC */ efc_process_rscn(node, cbdata); efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id)); efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl, NULL); break; } default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_SRRS_ELS_CMPL_OK: WARN_ON(!node->els_cmpl_cnt); node->els_cmpl_cnt--; efc_node_transition(node, __efc_fabctl_ready, NULL); break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } static uint64_t efc_get_wwpn(struct fc_els_flogi *sp) { return be64_to_cpu(sp->fl_wwnn); } static int efc_rnode_is_winner(struct efc_nport *nport) { struct fc_els_flogi *remote_sp; u64 remote_wwpn; u64 local_wwpn = nport->wwpn; u64 wwn_bump = 0; remote_sp = (struct fc_els_flogi *)nport->domain->flogi_service_params; remote_wwpn = efc_get_wwpn(remote_sp); local_wwpn ^= wwn_bump; efc_log_debug(nport->efc, "r: %llx\n", be64_to_cpu(remote_sp->fl_wwpn)); efc_log_debug(nport->efc, "l: %llx\n", local_wwpn); if (remote_wwpn == local_wwpn) { efc_log_warn(nport->efc, "WWPN of remote node [%08x %08x] matches local WWPN\n", (u32)(local_wwpn >> 32ll), (u32)local_wwpn); return -1; } return (remote_wwpn > local_wwpn); } void __efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; struct efc *efc = node->efc; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_DOMAIN_ATTACH_OK: { struct efc_nport *nport = node->nport; struct efc_node *rnode; /* * this transient node (SID=0 (recv'd FLOGI) * or DID=fabric (sent FLOGI)) * is the p2p winner, will use a separate node * to send PLOGI to peer */ WARN_ON(!node->nport->p2p_winner); rnode = efc_node_find(nport, node->nport->p2p_remote_port_id); if (rnode) { /* * the "other" transient p2p node has * already kicked off the * new node from which PLOGI is sent */ node_printf(node, "Node with fc_id x%x already exists\n", rnode->rnode.fc_id); } else { /* * create new node (SID=1, DID=2) * from which to send PLOGI */ rnode = efc_node_alloc(nport, nport->p2p_remote_port_id, false, false); if (!rnode) { efc_log_err(efc, "node alloc failed\n"); return; } efc_fabric_notify_topology(node); /* sm: / allocate p2p remote node */ efc_node_transition(rnode, __efc_p2p_rnode_init, NULL); } /* * the transient node (SID=0 or DID=fabric) * has served its purpose */ if (node->rnode.fc_id == 0) { /* * if this is the SID=0 node, * move to the init state in case peer * has restarted FLOGI discovery and FLOGI is pending */ /* don't send PLOGI on efc_d_init entry */ efc_node_init_device(node, false); } else { /* * if this is the DID=fabric node * (we initiated FLOGI), shut it down */ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_fabric_initiate_shutdown(node); } break; } default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_p2p_rnode_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: /* sm: / send PLOGI */ efc_send_plogi(node); efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL); break; case EFC_EVT_ABTS_RCVD: /* sm: send BA_ACC */ efc_send_bls_acc(node, cbdata->header->dma.virt); break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_SRRS_ELS_CMPL_OK: WARN_ON(!node->els_cmpl_cnt); node->els_cmpl_cnt--; /* sm: if p2p_winner / domain_attach */ if (node->nport->p2p_winner) { efc_node_transition(node, __efc_p2p_wait_domain_attach, NULL); if (!node->nport->domain->attached) { node_printf(node, "Domain not attached\n"); efc_domain_attach(node->nport->domain, node->nport->p2p_port_id); } else { node_printf(node, "Domain already attached\n"); efc_node_post_event(node, EFC_EVT_DOMAIN_ATTACH_OK, NULL); } } else { /* this node has served its purpose; * we'll expect a PLOGI on a separate * node (remote SID=0x1); return this node * to init state in case peer * restarts discovery -- it may already * have (pending frames may exist). */ /* don't send PLOGI on efc_d_init entry */ efc_node_init_device(node, false); } break; case EFC_EVT_SRRS_ELS_CMPL_FAIL: /* * LS_ACC failed, possibly due to link down; * shutdown node and wait * for FLOGI discovery to restart */ node_printf(node, "FLOGI LS_ACC failed, shutting down\n"); WARN_ON(!node->els_cmpl_cnt); node->els_cmpl_cnt--; node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_fabric_initiate_shutdown(node); break; case EFC_EVT_ABTS_RCVD: { /* sm: / send BA_ACC */ efc_send_bls_acc(node, cbdata->header->dma.virt); break; } default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_SRRS_ELS_REQ_OK: { int rc; if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, __efc_fabric_common, __func__)) { return; } WARN_ON(!node->els_req_cnt); node->els_req_cnt--; /* sm: / save sparams, efc_node_attach */ efc_node_save_sparms(node, cbdata->els_rsp.virt); rc = efc_node_attach(node); efc_node_transition(node, __efc_p2p_wait_node_attach, NULL); if (rc < 0) efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); break; } case EFC_EVT_SRRS_ELS_REQ_FAIL: { if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, __efc_fabric_common, __func__)) { return; } node_printf(node, "PLOGI failed, shutting down\n"); WARN_ON(!node->els_req_cnt); node->els_req_cnt--; node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_fabric_initiate_shutdown(node); break; } case EFC_EVT_PLOGI_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt; /* if we're in external loopback mode, just send LS_ACC */ if (node->efc->external_loopback) { efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id)); } else { /* * if this isn't external loopback, * pass to default handler */ __efc_fabric_common(__func__, ctx, evt, arg); } break; } case EFC_EVT_PRLI_RCVD: /* I, or I+T */ /* sent PLOGI and before completion was seen, received the * PRLI from the remote node (WCQEs and RCQEs come in on * different queues and order of processing cannot be assumed) * Save OXID so PRLI can be sent after the attach and continue * to wait for PLOGI response */ efc_process_prli_payload(node, cbdata->payload->dma.virt); efc_send_ls_acc_after_attach(node, cbdata->header->dma.virt, EFC_NODE_SEND_LS_ACC_PRLI); efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli, NULL); break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: /* * Since we've received a PRLI, we have a port login and will * just need to wait for the PLOGI response to do the node * attach and then we can send the LS_ACC for the PRLI. If, * during this time, we receive FCP_CMNDs (which is possible * since we've already sent a PRLI and our peer may have * accepted). * At this time, we are not waiting on any other unsolicited * frames to continue with the login process. Thus, it will not * hurt to hold frames here. */ efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_SRRS_ELS_REQ_OK: { /* PLOGI response received */ int rc; /* Completion from PLOGI sent */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, __efc_fabric_common, __func__)) { return; } WARN_ON(!node->els_req_cnt); node->els_req_cnt--; /* sm: / save sparams, efc_node_attach */ efc_node_save_sparms(node, cbdata->els_rsp.virt); rc = efc_node_attach(node); efc_node_transition(node, __efc_p2p_wait_node_attach, NULL); if (rc < 0) efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); break; } case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */ case EFC_EVT_SRRS_ELS_REQ_RJT: /* PLOGI failed, shutdown the node */ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI, __efc_fabric_common, __func__)) { return; } WARN_ON(!node->els_req_cnt); node->els_req_cnt--; node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_fabric_initiate_shutdown(node); break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } void __efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node_cb *cbdata = arg; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_NODE_ATTACH_OK: node->attached = true; switch (node->send_ls_acc) { case EFC_NODE_SEND_LS_ACC_PRLI: { efc_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid); node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; node->ls_acc_io = NULL; break; } case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */ case EFC_NODE_SEND_LS_ACC_NONE: default: /* Normal case for I */ /* sm: send_plogi_acc is not set / send PLOGI acc */ efc_node_transition(node, __efc_d_port_logged_in, NULL); break; } break; case EFC_EVT_NODE_ATTACH_FAIL: /* node attach failed, shutdown the node */ node->attached = false; node_printf(node, "Node attach failed\n"); node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_fabric_initiate_shutdown(node); break; case EFC_EVT_SHUTDOWN: node_printf(node, "%s received\n", efc_sm_event_name(evt)); node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; efc_node_transition(node, __efc_fabric_wait_attach_evt_shutdown, NULL); break; case EFC_EVT_PRLI_RCVD: node_printf(node, "%s: PRLI received before node is attached\n", efc_sm_event_name(evt)); efc_process_prli_payload(node, cbdata->payload->dma.virt); efc_send_ls_acc_after_attach(node, cbdata->header->dma.virt, EFC_NODE_SEND_LS_ACC_PRLI); break; default: __efc_fabric_common(__func__, ctx, evt, arg); } } int efc_p2p_setup(struct efc_nport *nport) { struct efc *efc = nport->efc; int rnode_winner; rnode_winner = efc_rnode_is_winner(nport); /* set nport flags to indicate p2p "winner" */ if (rnode_winner == 1) { nport->p2p_remote_port_id = 0; nport->p2p_port_id = 0; nport->p2p_winner = false; } else if (rnode_winner == 0) { nport->p2p_remote_port_id = 2; nport->p2p_port_id = 1; nport->p2p_winner = true; } else { /* no winner; only okay if external loopback enabled */ if (nport->efc->external_loopback) { /* * External loopback mode enabled; * local nport and remote node * will be registered with an NPortID = 1; */ efc_log_debug(efc, "External loopback mode enabled\n"); nport->p2p_remote_port_id = 1; nport->p2p_port_id = 1; nport->p2p_winner = true; } else { efc_log_warn(efc, "failed to determine p2p winner\n"); return rnode_winner; } } return 0; }
linux-master
drivers/scsi/elx/libefc/efc_fabric.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ /* * domain_sm Domain State Machine: States */ #include "efc.h" int efc_domain_cb(void *arg, int event, void *data) { struct efc *efc = arg; struct efc_domain *domain = NULL; int rc = 0; unsigned long flags = 0; if (event != EFC_HW_DOMAIN_FOUND) domain = data; /* Accept domain callback events from the user driver */ spin_lock_irqsave(&efc->lock, flags); switch (event) { case EFC_HW_DOMAIN_FOUND: { u64 fcf_wwn = 0; struct efc_domain_record *drec = data; /* extract the fcf_wwn */ fcf_wwn = be64_to_cpu(*((__be64 *)drec->wwn)); efc_log_debug(efc, "Domain found: wwn %016llX\n", fcf_wwn); /* lookup domain, or allocate a new one */ domain = efc->domain; if (!domain) { domain = efc_domain_alloc(efc, fcf_wwn); if (!domain) { efc_log_err(efc, "efc_domain_alloc() failed\n"); rc = -1; break; } efc_sm_transition(&domain->drvsm, __efc_domain_init, NULL); } efc_domain_post_event(domain, EFC_EVT_DOMAIN_FOUND, drec); break; } case EFC_HW_DOMAIN_LOST: domain_trace(domain, "EFC_HW_DOMAIN_LOST:\n"); efc->hold_frames = true; efc_domain_post_event(domain, EFC_EVT_DOMAIN_LOST, NULL); break; case EFC_HW_DOMAIN_ALLOC_OK: domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_OK:\n"); efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_OK, NULL); break; case EFC_HW_DOMAIN_ALLOC_FAIL: domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_FAIL:\n"); efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_FAIL, NULL); break; case EFC_HW_DOMAIN_ATTACH_OK: domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_OK:\n"); efc_domain_post_event(domain, EFC_EVT_DOMAIN_ATTACH_OK, NULL); break; case EFC_HW_DOMAIN_ATTACH_FAIL: domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_FAIL:\n"); efc_domain_post_event(domain, EFC_EVT_DOMAIN_ATTACH_FAIL, NULL); break; case EFC_HW_DOMAIN_FREE_OK: domain_trace(domain, "EFC_HW_DOMAIN_FREE_OK:\n"); efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_OK, NULL); break; case EFC_HW_DOMAIN_FREE_FAIL: domain_trace(domain, "EFC_HW_DOMAIN_FREE_FAIL:\n"); efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_FAIL, NULL); break; default: efc_log_warn(efc, "unsupported event %#x\n", event); } spin_unlock_irqrestore(&efc->lock, flags); if (efc->domain && domain->req_accept_frames) { domain->req_accept_frames = false; efc->hold_frames = false; } return rc; } static void _efc_domain_free(struct kref *arg) { struct efc_domain *domain = container_of(arg, struct efc_domain, ref); struct efc *efc = domain->efc; if (efc->domain_free_cb) (*efc->domain_free_cb)(efc, efc->domain_free_cb_arg); kfree(domain); } void efc_domain_free(struct efc_domain *domain) { struct efc *efc; efc = domain->efc; /* Hold frames to clear the domain pointer from the xport lookup */ efc->hold_frames = false; efc_log_debug(efc, "Domain free: wwn %016llX\n", domain->fcf_wwn); xa_destroy(&domain->lookup); efc->domain = NULL; kref_put(&domain->ref, domain->release); } struct efc_domain * efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn) { struct efc_domain *domain; domain = kzalloc(sizeof(*domain), GFP_ATOMIC); if (!domain) return NULL; domain->efc = efc; domain->drvsm.app = domain; /* initialize refcount */ kref_init(&domain->ref); domain->release = _efc_domain_free; xa_init(&domain->lookup); INIT_LIST_HEAD(&domain->nport_list); efc->domain = domain; domain->fcf_wwn = fcf_wwn; efc_log_debug(efc, "Domain allocated: wwn %016llX\n", domain->fcf_wwn); return domain; } void efc_register_domain_free_cb(struct efc *efc, void (*callback)(struct efc *efc, void *arg), void *arg) { /* Register a callback to be called when the domain is freed */ efc->domain_free_cb = callback; efc->domain_free_cb_arg = arg; if (!efc->domain && callback) (*callback)(efc, arg); } static void __efc_domain_common(const char *funcname, struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_domain *domain = ctx->app; switch (evt) { case EFC_EVT_ENTER: case EFC_EVT_REENTER: case EFC_EVT_EXIT: case EFC_EVT_ALL_CHILD_NODES_FREE: /* * this can arise if an FLOGI fails on the NPORT, * and the NPORT is shutdown */ break; default: efc_log_warn(domain->efc, "%-20s %-20s not handled\n", funcname, efc_sm_event_name(evt)); } } static void __efc_domain_common_shutdown(const char *funcname, struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_domain *domain = ctx->app; switch (evt) { case EFC_EVT_ENTER: case EFC_EVT_REENTER: case EFC_EVT_EXIT: break; case EFC_EVT_DOMAIN_FOUND: /* save drec, mark domain_found_pending */ memcpy(&domain->pending_drec, arg, sizeof(domain->pending_drec)); domain->domain_found_pending = true; break; case EFC_EVT_DOMAIN_LOST: /* unmark domain_found_pending */ domain->domain_found_pending = false; break; default: efc_log_warn(domain->efc, "%-20s %-20s not handled\n", funcname, efc_sm_event_name(evt)); } } #define std_domain_state_decl(...)\ struct efc_domain *domain = NULL;\ struct efc *efc = NULL;\ \ WARN_ON(!ctx || !ctx->app);\ domain = ctx->app;\ WARN_ON(!domain->efc);\ efc = domain->efc void __efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { std_domain_state_decl(); domain_sm_trace(domain); switch (evt) { case EFC_EVT_ENTER: domain->attached = false; break; case EFC_EVT_DOMAIN_FOUND: { u32 i; struct efc_domain_record *drec = arg; struct efc_nport *nport; u64 my_wwnn = efc->req_wwnn; u64 my_wwpn = efc->req_wwpn; __be64 bewwpn; if (my_wwpn == 0 || my_wwnn == 0) { efc_log_debug(efc, "using default hardware WWN config\n"); my_wwpn = efc->def_wwpn; my_wwnn = efc->def_wwnn; } efc_log_debug(efc, "Create nport WWPN %016llX WWNN %016llX\n", my_wwpn, my_wwnn); /* Allocate a nport and transition to __efc_nport_allocated */ nport = efc_nport_alloc(domain, my_wwpn, my_wwnn, U32_MAX, efc->enable_ini, efc->enable_tgt); if (!nport) { efc_log_err(efc, "efc_nport_alloc() failed\n"); break; } efc_sm_transition(&nport->sm, __efc_nport_allocated, NULL); bewwpn = cpu_to_be64(nport->wwpn); /* allocate struct efc_nport object for local port * Note: drec->fc_id is ALPA from read_topology only if loop */ if (efc_cmd_nport_alloc(efc, nport, NULL, (uint8_t *)&bewwpn)) { efc_log_err(efc, "Can't allocate port\n"); efc_nport_free(nport); break; } domain->is_loop = drec->is_loop; /* * If the loop position map includes ALPA == 0, * then we are in a public loop (NL_PORT) * Note that the first element of the loopmap[] * contains the count of elements, and if * ALPA == 0 is present, it will occupy the first * location after the count. */ domain->is_nlport = drec->map.loop[1] == 0x00; if (!domain->is_loop) { /* Initiate HW domain alloc */ if (efc_cmd_domain_alloc(efc, domain, drec->index)) { efc_log_err(efc, "Failed to initiate HW domain allocation\n"); break; } efc_sm_transition(ctx, __efc_domain_wait_alloc, arg); break; } efc_log_debug(efc, "%s fc_id=%#x speed=%d\n", drec->is_loop ? (domain->is_nlport ? "public-loop" : "loop") : "other", drec->fc_id, drec->speed); nport->fc_id = drec->fc_id; nport->topology = EFC_NPORT_TOPO_FC_AL; snprintf(nport->display_name, sizeof(nport->display_name), "s%06x", drec->fc_id); if (efc->enable_ini) { u32 count = drec->map.loop[0]; efc_log_debug(efc, "%d position map entries\n", count); for (i = 1; i <= count; i++) { if (drec->map.loop[i] != drec->fc_id) { struct efc_node *node; efc_log_debug(efc, "%#x -> %#x\n", drec->fc_id, drec->map.loop[i]); node = efc_node_alloc(nport, drec->map.loop[i], false, true); if (!node) { efc_log_err(efc, "efc_node_alloc() failed\n"); break; } efc_node_transition(node, __efc_d_wait_loop, NULL); } } } /* Initiate HW domain alloc */ if (efc_cmd_domain_alloc(efc, domain, drec->index)) { efc_log_err(efc, "Failed to initiate HW domain allocation\n"); break; } efc_sm_transition(ctx, __efc_domain_wait_alloc, arg); break; } default: __efc_domain_common(__func__, ctx, evt, arg); } } void __efc_domain_wait_alloc(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { std_domain_state_decl(); domain_sm_trace(domain); switch (evt) { case EFC_EVT_DOMAIN_ALLOC_OK: { struct fc_els_flogi *sp; struct efc_nport *nport; nport = domain->nport; if (WARN_ON(!nport)) return; sp = (struct fc_els_flogi *)nport->service_params; /* Save the domain service parameters */ memcpy(domain->service_params + 4, domain->dma.virt, sizeof(struct fc_els_flogi) - 4); memcpy(nport->service_params + 4, domain->dma.virt, sizeof(struct fc_els_flogi) - 4); /* * Update the nport's service parameters, * user might have specified non-default names */ sp->fl_wwpn = cpu_to_be64(nport->wwpn); sp->fl_wwnn = cpu_to_be64(nport->wwnn); /* * Take the loop topology path, * unless we are an NL_PORT (public loop) */ if (domain->is_loop && !domain->is_nlport) { /* * For loop, we already have our FC ID * and don't need fabric login. * Transition to the allocated state and * post an event to attach to * the domain. Note that this breaks the * normal action/transition * pattern here to avoid a race with the * domain attach callback. */ /* sm: is_loop / domain_attach */ efc_sm_transition(ctx, __efc_domain_allocated, NULL); __efc_domain_attach_internal(domain, nport->fc_id); break; } { struct efc_node *node; /* alloc fabric node, send FLOGI */ node = efc_node_find(nport, FC_FID_FLOGI); if (node) { efc_log_err(efc, "Fabric Controller node already exists\n"); break; } node = efc_node_alloc(nport, FC_FID_FLOGI, false, false); if (!node) { efc_log_err(efc, "Error: efc_node_alloc() failed\n"); } else { efc_node_transition(node, __efc_fabric_init, NULL); } /* Accept frames */ domain->req_accept_frames = true; } /* sm: / start fabric logins */ efc_sm_transition(ctx, __efc_domain_allocated, NULL); break; } case EFC_EVT_DOMAIN_ALLOC_FAIL: efc_log_err(efc, "%s recv'd waiting for DOMAIN_ALLOC_OK;", efc_sm_event_name(evt)); efc_log_err(efc, "shutting down domain\n"); domain->req_domain_free = true; break; case EFC_EVT_DOMAIN_FOUND: /* Should not happen */ break; case EFC_EVT_DOMAIN_LOST: efc_log_debug(efc, "%s received while waiting for hw_domain_alloc()\n", efc_sm_event_name(evt)); efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL); break; default: __efc_domain_common(__func__, ctx, evt, arg); } } void __efc_domain_allocated(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { std_domain_state_decl(); domain_sm_trace(domain); switch (evt) { case EFC_EVT_DOMAIN_REQ_ATTACH: { int rc = 0; u32 fc_id; if (WARN_ON(!arg)) return; fc_id = *((u32 *)arg); efc_log_debug(efc, "Requesting hw domain attach fc_id x%x\n", fc_id); /* Update nport lookup */ rc = xa_err(xa_store(&domain->lookup, fc_id, domain->nport, GFP_ATOMIC)); if (rc) { efc_log_err(efc, "Sport lookup store failed: %d\n", rc); return; } /* Update display name for the nport */ efc_node_fcid_display(fc_id, domain->nport->display_name, sizeof(domain->nport->display_name)); /* Issue domain attach call */ rc = efc_cmd_domain_attach(efc, domain, fc_id); if (rc) { efc_log_err(efc, "efc_hw_domain_attach failed: %d\n", rc); return; } /* sm: / domain_attach */ efc_sm_transition(ctx, __efc_domain_wait_attach, NULL); break; } case EFC_EVT_DOMAIN_FOUND: /* Should not happen */ efc_log_err(efc, "%s: evt: %d should not happen\n", __func__, evt); break; case EFC_EVT_DOMAIN_LOST: { efc_log_debug(efc, "%s received while in EFC_EVT_DOMAIN_REQ_ATTACH\n", efc_sm_event_name(evt)); if (!list_empty(&domain->nport_list)) { /* * if there are nports, transition to * wait state and send shutdown to each * nport */ struct efc_nport *nport = NULL, *nport_next = NULL; efc_sm_transition(ctx, __efc_domain_wait_nports_free, NULL); list_for_each_entry_safe(nport, nport_next, &domain->nport_list, list_entry) { efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); } } else { /* no nports exist, free domain */ efc_sm_transition(ctx, __efc_domain_wait_shutdown, NULL); if (efc_cmd_domain_free(efc, domain)) efc_log_err(efc, "hw_domain_free failed\n"); } break; } default: __efc_domain_common(__func__, ctx, evt, arg); } } void __efc_domain_wait_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { std_domain_state_decl(); domain_sm_trace(domain); switch (evt) { case EFC_EVT_DOMAIN_ATTACH_OK: { struct efc_node *node = NULL; struct efc_nport *nport, *next_nport; unsigned long index; /* * Set domain notify pending state to avoid * duplicate domain event post */ domain->domain_notify_pend = true; /* Mark as attached */ domain->attached = true; /* Transition to ready */ /* sm: / forward event to all nports and nodes */ efc_sm_transition(ctx, __efc_domain_ready, NULL); /* We have an FCFI, so we can accept frames */ domain->req_accept_frames = true; /* * Notify all nodes that the domain attach request * has completed * Note: nport will have already received notification * of nport attached as a result of the HW's port attach. */ list_for_each_entry_safe(nport, next_nport, &domain->nport_list, list_entry) { xa_for_each(&nport->lookup, index, node) { efc_node_post_event(node, EFC_EVT_DOMAIN_ATTACH_OK, NULL); } } domain->domain_notify_pend = false; break; } case EFC_EVT_DOMAIN_ATTACH_FAIL: efc_log_debug(efc, "%s received while waiting for hw attach\n", efc_sm_event_name(evt)); break; case EFC_EVT_DOMAIN_FOUND: /* Should not happen */ efc_log_err(efc, "%s: evt: %d should not happen\n", __func__, evt); break; case EFC_EVT_DOMAIN_LOST: /* * Domain lost while waiting for an attach to complete, * go to a state that waits for the domain attach to * complete, then handle domain lost */ efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL); break; case EFC_EVT_DOMAIN_REQ_ATTACH: /* * In P2P we can get an attach request from * the other FLOGI path, so drop this one */ break; default: __efc_domain_common(__func__, ctx, evt, arg); } } void __efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { std_domain_state_decl(); domain_sm_trace(domain); switch (evt) { case EFC_EVT_ENTER: { /* start any pending vports */ if (efc_vport_start(domain)) { efc_log_debug(domain->efc, "efc_vport_start didn't start vports\n"); } break; } case EFC_EVT_DOMAIN_LOST: { if (!list_empty(&domain->nport_list)) { /* * if there are nports, transition to wait state * and send shutdown to each nport */ struct efc_nport *nport = NULL, *nport_next = NULL; efc_sm_transition(ctx, __efc_domain_wait_nports_free, NULL); list_for_each_entry_safe(nport, nport_next, &domain->nport_list, list_entry) { efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); } } else { /* no nports exist, free domain */ efc_sm_transition(ctx, __efc_domain_wait_shutdown, NULL); if (efc_cmd_domain_free(efc, domain)) efc_log_err(efc, "hw_domain_free failed\n"); } break; } case EFC_EVT_DOMAIN_FOUND: /* Should not happen */ efc_log_err(efc, "%s: evt: %d should not happen\n", __func__, evt); break; case EFC_EVT_DOMAIN_REQ_ATTACH: { /* can happen during p2p */ u32 fc_id; fc_id = *((u32 *)arg); /* Assume that the domain is attached */ WARN_ON(!domain->attached); /* * Verify that the requested FC_ID * is the same as the one we're working with */ WARN_ON(domain->nport->fc_id != fc_id); break; } default: __efc_domain_common(__func__, ctx, evt, arg); } } void __efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { std_domain_state_decl(); domain_sm_trace(domain); /* Wait for nodes to free prior to the domain shutdown */ switch (evt) { case EFC_EVT_ALL_CHILD_NODES_FREE: { int rc; /* sm: / efc_hw_domain_free */ efc_sm_transition(ctx, __efc_domain_wait_shutdown, NULL); /* Request efc_hw_domain_free and wait for completion */ rc = efc_cmd_domain_free(efc, domain); if (rc) { efc_log_err(efc, "efc_hw_domain_free() failed: %d\n", rc); } break; } default: __efc_domain_common_shutdown(__func__, ctx, evt, arg); } } void __efc_domain_wait_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { std_domain_state_decl(); domain_sm_trace(domain); switch (evt) { case EFC_EVT_DOMAIN_FREE_OK: /* sm: / domain_free */ if (domain->domain_found_pending) { /* * save fcf_wwn and drec from this domain, * free current domain and allocate * a new one with the same fcf_wwn * could use a SLI-4 "re-register VPI" * operation here? */ u64 fcf_wwn = domain->fcf_wwn; struct efc_domain_record drec = domain->pending_drec; efc_log_debug(efc, "Reallocating domain\n"); domain->req_domain_free = true; domain = efc_domain_alloc(efc, fcf_wwn); if (!domain) { efc_log_err(efc, "efc_domain_alloc() failed\n"); return; } /* * got a new domain; at this point, * there are at least two domains * once the req_domain_free flag is processed, * the associated domain will be removed. */ efc_sm_transition(&domain->drvsm, __efc_domain_init, NULL); efc_sm_post_event(&domain->drvsm, EFC_EVT_DOMAIN_FOUND, &drec); } else { domain->req_domain_free = true; } break; default: __efc_domain_common_shutdown(__func__, ctx, evt, arg); } } void __efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { std_domain_state_decl(); domain_sm_trace(domain); /* * Wait for the domain alloc/attach completion * after receiving a domain lost. */ switch (evt) { case EFC_EVT_DOMAIN_ALLOC_OK: case EFC_EVT_DOMAIN_ATTACH_OK: { if (!list_empty(&domain->nport_list)) { /* * if there are nports, transition to * wait state and send shutdown to each nport */ struct efc_nport *nport = NULL, *nport_next = NULL; efc_sm_transition(ctx, __efc_domain_wait_nports_free, NULL); list_for_each_entry_safe(nport, nport_next, &domain->nport_list, list_entry) { efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); } } else { /* no nports exist, free domain */ efc_sm_transition(ctx, __efc_domain_wait_shutdown, NULL); if (efc_cmd_domain_free(efc, domain)) efc_log_err(efc, "hw_domain_free() failed\n"); } break; } case EFC_EVT_DOMAIN_ALLOC_FAIL: case EFC_EVT_DOMAIN_ATTACH_FAIL: efc_log_err(efc, "[domain] %-20s: failed\n", efc_sm_event_name(evt)); break; default: __efc_domain_common_shutdown(__func__, ctx, evt, arg); } } void __efc_domain_attach_internal(struct efc_domain *domain, u32 s_id) { memcpy(domain->dma.virt, ((uint8_t *)domain->flogi_service_params) + 4, sizeof(struct fc_els_flogi) - 4); (void)efc_sm_post_event(&domain->drvsm, EFC_EVT_DOMAIN_REQ_ATTACH, &s_id); } void efc_domain_attach(struct efc_domain *domain, u32 s_id) { __efc_domain_attach_internal(domain, s_id); } int efc_domain_post_event(struct efc_domain *domain, enum efc_sm_event event, void *arg) { int rc; bool req_domain_free; rc = efc_sm_post_event(&domain->drvsm, event, arg); req_domain_free = domain->req_domain_free; domain->req_domain_free = false; if (req_domain_free) efc_domain_free(domain); return rc; } static void efct_domain_process_pending(struct efc_domain *domain) { struct efc *efc = domain->efc; struct efc_hw_sequence *seq = NULL; u32 processed = 0; unsigned long flags = 0; for (;;) { /* need to check for hold frames condition after each frame * processed because any given frame could cause a transition * to a state that holds frames */ if (efc->hold_frames) break; /* Get next frame/sequence */ spin_lock_irqsave(&efc->pend_frames_lock, flags); if (!list_empty(&efc->pend_frames)) { seq = list_first_entry(&efc->pend_frames, struct efc_hw_sequence, list_entry); list_del(&seq->list_entry); } if (!seq) { processed = efc->pend_frames_processed; efc->pend_frames_processed = 0; spin_unlock_irqrestore(&efc->pend_frames_lock, flags); break; } efc->pend_frames_processed++; spin_unlock_irqrestore(&efc->pend_frames_lock, flags); /* now dispatch frame(s) to dispatch function */ if (efc_domain_dispatch_frame(domain, seq)) efc->tt.hw_seq_free(efc, seq); seq = NULL; } if (processed != 0) efc_log_debug(efc, "%u domain frames held and processed\n", processed); } void efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq) { struct efc_domain *domain = efc->domain; /* * If we are holding frames or the domain is not yet registered or * there's already frames on the pending list, * then add the new frame to pending list */ if (!domain || efc->hold_frames || !list_empty(&efc->pend_frames)) { unsigned long flags = 0; spin_lock_irqsave(&efc->pend_frames_lock, flags); INIT_LIST_HEAD(&seq->list_entry); list_add_tail(&seq->list_entry, &efc->pend_frames); spin_unlock_irqrestore(&efc->pend_frames_lock, flags); if (domain) { /* immediately process pending frames */ efct_domain_process_pending(domain); } } else { /* * We are not holding frames and pending list is empty, * just process frame. A non-zero return means the frame * was not handled - so cleanup */ if (efc_domain_dispatch_frame(domain, seq)) efc->tt.hw_seq_free(efc, seq); } } int efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq) { struct efc_domain *domain = (struct efc_domain *)arg; struct efc *efc = domain->efc; struct fc_frame_header *hdr; struct efc_node *node = NULL; struct efc_nport *nport = NULL; unsigned long flags = 0; u32 s_id, d_id, rc = EFC_HW_SEQ_FREE; if (!seq->header || !seq->header->dma.virt || !seq->payload->dma.virt) { efc_log_err(efc, "Sequence header or payload is null\n"); return rc; } hdr = seq->header->dma.virt; /* extract the s_id and d_id */ s_id = ntoh24(hdr->fh_s_id); d_id = ntoh24(hdr->fh_d_id); spin_lock_irqsave(&efc->lock, flags); nport = efc_nport_find(domain, d_id); if (!nport) { if (hdr->fh_type == FC_TYPE_FCP) { /* Drop frame */ efc_log_warn(efc, "FCP frame with invalid d_id x%x\n", d_id); goto out; } /* p2p will use this case */ nport = domain->nport; if (!nport || !kref_get_unless_zero(&nport->ref)) { efc_log_err(efc, "Physical nport is NULL\n"); goto out; } } /* Lookup the node given the remote s_id */ node = efc_node_find(nport, s_id); /* If not found, then create a new node */ if (!node) { /* * If this is solicited data or control based on R_CTL and * there is no node context, then we can drop the frame */ if ((hdr->fh_r_ctl == FC_RCTL_DD_SOL_DATA) || (hdr->fh_r_ctl == FC_RCTL_DD_SOL_CTL)) { efc_log_debug(efc, "sol data/ctrl frame without node\n"); goto out_release; } node = efc_node_alloc(nport, s_id, false, false); if (!node) { efc_log_err(efc, "efc_node_alloc() failed\n"); goto out_release; } /* don't send PLOGI on efc_d_init entry */ efc_node_init_device(node, false); } if (node->hold_frames || !list_empty(&node->pend_frames)) { /* add frame to node's pending list */ spin_lock(&node->pend_frames_lock); INIT_LIST_HEAD(&seq->list_entry); list_add_tail(&seq->list_entry, &node->pend_frames); spin_unlock(&node->pend_frames_lock); rc = EFC_HW_SEQ_HOLD; goto out_release; } /* now dispatch frame to the node frame handler */ efc_node_dispatch_frame(node, seq); out_release: kref_put(&nport->ref, nport->release); out: spin_unlock_irqrestore(&efc->lock, flags); return rc; } void efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq) { struct fc_frame_header *hdr = seq->header->dma.virt; u32 port_id; struct efc_node *node = (struct efc_node *)arg; struct efc *efc = node->efc; port_id = ntoh24(hdr->fh_s_id); if (WARN_ON(port_id != node->rnode.fc_id)) return; if ((!(ntoh24(hdr->fh_f_ctl) & FC_FC_END_SEQ)) || !(ntoh24(hdr->fh_f_ctl) & FC_FC_SEQ_INIT)) { node_printf(node, "Drop frame hdr = %08x %08x %08x %08x %08x %08x\n", cpu_to_be32(((u32 *)hdr)[0]), cpu_to_be32(((u32 *)hdr)[1]), cpu_to_be32(((u32 *)hdr)[2]), cpu_to_be32(((u32 *)hdr)[3]), cpu_to_be32(((u32 *)hdr)[4]), cpu_to_be32(((u32 *)hdr)[5])); return; } switch (hdr->fh_r_ctl) { case FC_RCTL_ELS_REQ: case FC_RCTL_ELS_REP: efc_node_recv_els_frame(node, seq); break; case FC_RCTL_BA_ABTS: case FC_RCTL_BA_ACC: case FC_RCTL_BA_RJT: case FC_RCTL_BA_NOP: efc_log_err(efc, "Received ABTS:\n"); break; case FC_RCTL_DD_UNSOL_CMD: case FC_RCTL_DD_UNSOL_CTL: switch (hdr->fh_type) { case FC_TYPE_FCP: if ((hdr->fh_r_ctl & 0xf) == FC_RCTL_DD_UNSOL_CMD) { if (!node->fcp_enabled) { efc_node_recv_fcp_cmd(node, seq); break; } efc_log_err(efc, "Recvd FCP CMD. Drop IO\n"); } else if ((hdr->fh_r_ctl & 0xf) == FC_RCTL_DD_SOL_DATA) { node_printf(node, "solicited data recvd. Drop IO\n"); } break; case FC_TYPE_CT: efc_node_recv_ct_frame(node, seq); break; default: break; } break; default: efc_log_err(efc, "Unhandled frame rctl: %02x\n", hdr->fh_r_ctl); } }
linux-master
drivers/scsi/elx/libefc/efc_domain.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ #include "efclib.h" #include "../libefc_sli/sli4.h" #include "efc_cmds.h" #include "efc_sm.h" static void efc_nport_free_resources(struct efc_nport *nport, int evt, void *data) { struct efc *efc = nport->efc; /* Clear the nport attached flag */ nport->attached = false; /* Free the service parameters buffer */ if (nport->dma.virt) { dma_free_coherent(&efc->pci->dev, nport->dma.size, nport->dma.virt, nport->dma.phys); memset(&nport->dma, 0, sizeof(struct efc_dma)); } /* Free the SLI resources */ sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator); efc_nport_cb(efc, evt, nport); } static int efc_nport_get_mbox_status(struct efc_nport *nport, u8 *mqe, int status) { struct efc *efc = nport->efc; struct sli4_mbox_command_header *hdr = (struct sli4_mbox_command_header *)mqe; if (status || le16_to_cpu(hdr->status)) { efc_log_debug(efc, "bad status vpi=%#x st=%x hdr=%x\n", nport->indicator, status, le16_to_cpu(hdr->status)); return -EIO; } return 0; } static int efc_nport_free_unreg_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg) { struct efc_nport *nport = arg; int evt = EFC_EVT_NPORT_FREE_OK; int rc; rc = efc_nport_get_mbox_status(nport, mqe, status); if (rc) evt = EFC_EVT_NPORT_FREE_FAIL; efc_nport_free_resources(nport, evt, mqe); return rc; } static void efc_nport_free_unreg_vpi(struct efc_nport *nport) { struct efc *efc = nport->efc; int rc; u8 data[SLI4_BMBX_SIZE]; rc = sli_cmd_unreg_vpi(efc->sli, data, nport->indicator, SLI4_UNREG_TYPE_PORT); if (rc) { efc_log_err(efc, "UNREG_VPI format failure\n"); efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data); return; } rc = efc->tt.issue_mbox_rqst(efc->base, data, efc_nport_free_unreg_vpi_cb, nport); if (rc) { efc_log_err(efc, "UNREG_VPI command failure\n"); efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data); } } static void efc_nport_send_evt(struct efc_nport *nport, int evt, void *data) { struct efc *efc = nport->efc; /* Now inform the registered callbacks */ efc_nport_cb(efc, evt, nport); /* Set the nport attached flag */ if (evt == EFC_EVT_NPORT_ATTACH_OK) nport->attached = true; /* If there is a pending free request, then handle it now */ if (nport->free_req_pending) efc_nport_free_unreg_vpi(nport); } static int efc_nport_alloc_init_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg) { struct efc_nport *nport = arg; if (efc_nport_get_mbox_status(nport, mqe, status)) { efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe); return -EIO; } efc_nport_send_evt(nport, EFC_EVT_NPORT_ALLOC_OK, mqe); return 0; } static void efc_nport_alloc_init_vpi(struct efc_nport *nport) { struct efc *efc = nport->efc; u8 data[SLI4_BMBX_SIZE]; int rc; /* If there is a pending free request, then handle it now */ if (nport->free_req_pending) { efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_OK, data); return; } rc = sli_cmd_init_vpi(efc->sli, data, nport->indicator, nport->domain->indicator); if (rc) { efc_log_err(efc, "INIT_VPI format failure\n"); efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data); return; } rc = efc->tt.issue_mbox_rqst(efc->base, data, efc_nport_alloc_init_vpi_cb, nport); if (rc) { efc_log_err(efc, "INIT_VPI command failure\n"); efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data); } } static int efc_nport_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe, void *arg) { struct efc_nport *nport = arg; u8 *payload = NULL; if (efc_nport_get_mbox_status(nport, mqe, status)) { efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe); return -EIO; } payload = nport->dma.virt; memcpy(&nport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(nport->sli_wwpn)); memcpy(&nport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(nport->sli_wwnn)); dma_free_coherent(&efc->pci->dev, nport->dma.size, nport->dma.virt, nport->dma.phys); memset(&nport->dma, 0, sizeof(struct efc_dma)); efc_nport_alloc_init_vpi(nport); return 0; } static void efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport) { u8 data[SLI4_BMBX_SIZE]; int rc; /* Allocate memory for the service parameters */ nport->dma.size = EFC_SPARAM_DMA_SZ; nport->dma.virt = dma_alloc_coherent(&efc->pci->dev, nport->dma.size, &nport->dma.phys, GFP_KERNEL); if (!nport->dma.virt) { efc_log_err(efc, "Failed to allocate DMA memory\n"); efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data); return; } rc = sli_cmd_read_sparm64(efc->sli, data, &nport->dma, nport->indicator); if (rc) { efc_log_err(efc, "READ_SPARM64 format failure\n"); efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data); return; } rc = efc->tt.issue_mbox_rqst(efc->base, data, efc_nport_alloc_read_sparm64_cb, nport); if (rc) { efc_log_err(efc, "READ_SPARM64 command failure\n"); efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data); } } int efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport, struct efc_domain *domain, u8 *wwpn) { u32 index; nport->indicator = U32_MAX; nport->free_req_pending = false; if (wwpn) memcpy(&nport->sli_wwpn, wwpn, sizeof(nport->sli_wwpn)); /* * allocate a VPI object for the port and stores it in the * indicator field of the port object. */ if (sli_resource_alloc(efc->sli, SLI4_RSRC_VPI, &nport->indicator, &index)) { efc_log_err(efc, "VPI allocation failure\n"); return -EIO; } if (domain) { /* * If the WWPN is NULL, fetch the default * WWPN and WWNN before initializing the VPI */ if (!wwpn) efc_nport_alloc_read_sparm64(efc, nport); else efc_nport_alloc_init_vpi(nport); } else if (!wwpn) { /* domain NULL and wwpn non-NULL */ efc_log_err(efc, "need WWN for physical port\n"); sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator); return -EIO; } return 0; } static int efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg) { struct efc_nport *nport = arg; nport->attaching = false; if (efc_nport_get_mbox_status(nport, mqe, status)) { efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe); return -EIO; } efc_nport_send_evt(nport, EFC_EVT_NPORT_ATTACH_OK, mqe); return 0; } int efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id) { u8 buf[SLI4_BMBX_SIZE]; int rc = 0; if (!nport) { efc_log_err(efc, "bad param(s) nport=%p\n", nport); return -EIO; } nport->fc_id = fc_id; /* register previously-allocated VPI with the device */ rc = sli_cmd_reg_vpi(efc->sli, buf, nport->fc_id, nport->sli_wwpn, nport->indicator, nport->domain->indicator, false); if (rc) { efc_log_err(efc, "REG_VPI format failure\n"); efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf); return rc; } rc = efc->tt.issue_mbox_rqst(efc->base, buf, efc_nport_attach_reg_vpi_cb, nport); if (rc) { efc_log_err(efc, "REG_VPI command failure\n"); efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf); } else { nport->attaching = true; } return rc; } int efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport) { if (!nport) { efc_log_err(efc, "bad parameter(s) nport=%p\n", nport); return -EIO; } /* Issue the UNREG_VPI command to free the assigned VPI context */ if (nport->attached) efc_nport_free_unreg_vpi(nport); else if (nport->attaching) nport->free_req_pending = true; else efc_sm_post_event(&nport->sm, EFC_EVT_NPORT_FREE_OK, NULL); return 0; } static int efc_domain_get_mbox_status(struct efc_domain *domain, u8 *mqe, int status) { struct efc *efc = domain->efc; struct sli4_mbox_command_header *hdr = (struct sli4_mbox_command_header *)mqe; if (status || le16_to_cpu(hdr->status)) { efc_log_debug(efc, "bad status vfi=%#x st=%x hdr=%x\n", domain->indicator, status, le16_to_cpu(hdr->status)); return -EIO; } return 0; } static void efc_domain_free_resources(struct efc_domain *domain, int evt, void *data) { struct efc *efc = domain->efc; /* Free the service parameters buffer */ if (domain->dma.virt) { dma_free_coherent(&efc->pci->dev, domain->dma.size, domain->dma.virt, domain->dma.phys); memset(&domain->dma, 0, sizeof(struct efc_dma)); } /* Free the SLI resources */ sli_resource_free(efc->sli, SLI4_RSRC_VFI, domain->indicator); efc_domain_cb(efc, evt, domain); } static void efc_domain_send_nport_evt(struct efc_domain *domain, int port_evt, int domain_evt, void *data) { struct efc *efc = domain->efc; /* Send alloc/attach ok to the physical nport */ efc_nport_send_evt(domain->nport, port_evt, NULL); /* Now inform the registered callbacks */ efc_domain_cb(efc, domain_evt, domain); } static int efc_domain_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe, void *arg) { struct efc_domain *domain = arg; if (efc_domain_get_mbox_status(domain, mqe, status)) { efc_domain_free_resources(domain, EFC_HW_DOMAIN_ALLOC_FAIL, mqe); return -EIO; } efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ALLOC_OK, EFC_HW_DOMAIN_ALLOC_OK, mqe); return 0; } static void efc_domain_alloc_read_sparm64(struct efc_domain *domain) { struct efc *efc = domain->efc; u8 data[SLI4_BMBX_SIZE]; int rc; rc = sli_cmd_read_sparm64(efc->sli, data, &domain->dma, 0); if (rc) { efc_log_err(efc, "READ_SPARM64 format failure\n"); efc_domain_free_resources(domain, EFC_HW_DOMAIN_ALLOC_FAIL, data); return; } rc = efc->tt.issue_mbox_rqst(efc->base, data, efc_domain_alloc_read_sparm64_cb, domain); if (rc) { efc_log_err(efc, "READ_SPARM64 command failure\n"); efc_domain_free_resources(domain, EFC_HW_DOMAIN_ALLOC_FAIL, data); } } static int efc_domain_alloc_init_vfi_cb(struct efc *efc, int status, u8 *mqe, void *arg) { struct efc_domain *domain = arg; if (efc_domain_get_mbox_status(domain, mqe, status)) { efc_domain_free_resources(domain, EFC_HW_DOMAIN_ALLOC_FAIL, mqe); return -EIO; } efc_domain_alloc_read_sparm64(domain); return 0; } static void efc_domain_alloc_init_vfi(struct efc_domain *domain) { struct efc *efc = domain->efc; struct efc_nport *nport = domain->nport; u8 data[SLI4_BMBX_SIZE]; int rc; /* * For FC, the HW alread registered an FCFI. * Copy FCF information into the domain and jump to INIT_VFI. */ domain->fcf_indicator = efc->fcfi; rc = sli_cmd_init_vfi(efc->sli, data, domain->indicator, domain->fcf_indicator, nport->indicator); if (rc) { efc_log_err(efc, "INIT_VFI format failure\n"); efc_domain_free_resources(domain, EFC_HW_DOMAIN_ALLOC_FAIL, data); return; } efc_log_err(efc, "%s issue mbox\n", __func__); rc = efc->tt.issue_mbox_rqst(efc->base, data, efc_domain_alloc_init_vfi_cb, domain); if (rc) { efc_log_err(efc, "INIT_VFI command failure\n"); efc_domain_free_resources(domain, EFC_HW_DOMAIN_ALLOC_FAIL, data); } } int efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf) { u32 index; if (!domain || !domain->nport) { efc_log_err(efc, "bad parameter(s) domain=%p nport=%p\n", domain, domain ? domain->nport : NULL); return -EIO; } /* allocate memory for the service parameters */ domain->dma.size = EFC_SPARAM_DMA_SZ; domain->dma.virt = dma_alloc_coherent(&efc->pci->dev, domain->dma.size, &domain->dma.phys, GFP_KERNEL); if (!domain->dma.virt) { efc_log_err(efc, "Failed to allocate DMA memory\n"); return -EIO; } domain->fcf = fcf; domain->fcf_indicator = U32_MAX; domain->indicator = U32_MAX; if (sli_resource_alloc(efc->sli, SLI4_RSRC_VFI, &domain->indicator, &index)) { efc_log_err(efc, "VFI allocation failure\n"); dma_free_coherent(&efc->pci->dev, domain->dma.size, domain->dma.virt, domain->dma.phys); memset(&domain->dma, 0, sizeof(struct efc_dma)); return -EIO; } efc_domain_alloc_init_vfi(domain); return 0; } static int efc_domain_attach_reg_vfi_cb(struct efc *efc, int status, u8 *mqe, void *arg) { struct efc_domain *domain = arg; if (efc_domain_get_mbox_status(domain, mqe, status)) { efc_domain_free_resources(domain, EFC_HW_DOMAIN_ATTACH_FAIL, mqe); return -EIO; } efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ATTACH_OK, EFC_HW_DOMAIN_ATTACH_OK, mqe); return 0; } int efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id) { u8 buf[SLI4_BMBX_SIZE]; int rc = 0; if (!domain) { efc_log_err(efc, "bad param(s) domain=%p\n", domain); return -EIO; } domain->nport->fc_id = fc_id; rc = sli_cmd_reg_vfi(efc->sli, buf, SLI4_BMBX_SIZE, domain->indicator, domain->fcf_indicator, domain->dma, domain->nport->indicator, domain->nport->sli_wwpn, domain->nport->fc_id); if (rc) { efc_log_err(efc, "REG_VFI format failure\n"); goto cleanup; } rc = efc->tt.issue_mbox_rqst(efc->base, buf, efc_domain_attach_reg_vfi_cb, domain); if (rc) { efc_log_err(efc, "REG_VFI command failure\n"); goto cleanup; } return rc; cleanup: efc_domain_free_resources(domain, EFC_HW_DOMAIN_ATTACH_FAIL, buf); return rc; } static int efc_domain_free_unreg_vfi_cb(struct efc *efc, int status, u8 *mqe, void *arg) { struct efc_domain *domain = arg; int evt = EFC_HW_DOMAIN_FREE_OK; int rc; rc = efc_domain_get_mbox_status(domain, mqe, status); if (rc) { evt = EFC_HW_DOMAIN_FREE_FAIL; rc = -EIO; } efc_domain_free_resources(domain, evt, mqe); return rc; } static void efc_domain_free_unreg_vfi(struct efc_domain *domain) { struct efc *efc = domain->efc; int rc; u8 data[SLI4_BMBX_SIZE]; rc = sli_cmd_unreg_vfi(efc->sli, data, domain->indicator, SLI4_UNREG_TYPE_DOMAIN); if (rc) { efc_log_err(efc, "UNREG_VFI format failure\n"); goto cleanup; } rc = efc->tt.issue_mbox_rqst(efc->base, data, efc_domain_free_unreg_vfi_cb, domain); if (rc) { efc_log_err(efc, "UNREG_VFI command failure\n"); goto cleanup; } return; cleanup: efc_domain_free_resources(domain, EFC_HW_DOMAIN_FREE_FAIL, data); } int efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain) { if (!domain) { efc_log_err(efc, "bad parameter(s) domain=%p\n", domain); return -EIO; } efc_domain_free_unreg_vfi(domain); return 0; } int efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr, struct efc_nport *nport) { /* Check for invalid indicator */ if (rnode->indicator != U32_MAX) { efc_log_err(efc, "RPI allocation failure addr=%#x rpi=%#x\n", fc_addr, rnode->indicator); return -EIO; } /* NULL SLI port indicates an unallocated remote node */ rnode->nport = NULL; if (sli_resource_alloc(efc->sli, SLI4_RSRC_RPI, &rnode->indicator, &rnode->index)) { efc_log_err(efc, "RPI allocation failure addr=%#x\n", fc_addr); return -EIO; } rnode->fc_id = fc_addr; rnode->nport = nport; return 0; } static int efc_cmd_node_attach_cb(struct efc *efc, int status, u8 *mqe, void *arg) { struct efc_remote_node *rnode = arg; struct sli4_mbox_command_header *hdr = (struct sli4_mbox_command_header *)mqe; int evt = 0; if (status || le16_to_cpu(hdr->status)) { efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status, le16_to_cpu(hdr->status)); rnode->attached = false; evt = EFC_EVT_NODE_ATTACH_FAIL; } else { rnode->attached = true; evt = EFC_EVT_NODE_ATTACH_OK; } efc_remote_node_cb(efc, evt, rnode); return 0; } int efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode, struct efc_dma *sparms) { int rc = -EIO; u8 buf[SLI4_BMBX_SIZE]; if (!rnode || !sparms) { efc_log_err(efc, "bad parameter(s) rnode=%p sparms=%p\n", rnode, sparms); return -EIO; } /* * If the attach count is non-zero, this RPI has already been reg'd. * Otherwise, register the RPI */ if (rnode->index == U32_MAX) { efc_log_err(efc, "bad parameter rnode->index invalid\n"); return -EIO; } /* Update a remote node object with the remote port's service params */ if (!sli_cmd_reg_rpi(efc->sli, buf, rnode->indicator, rnode->nport->indicator, rnode->fc_id, sparms, 0, 0)) rc = efc->tt.issue_mbox_rqst(efc->base, buf, efc_cmd_node_attach_cb, rnode); return rc; } int efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode) { int rc = 0; if (!rnode) { efc_log_err(efc, "bad parameter rnode=%p\n", rnode); return -EIO; } if (rnode->nport) { if (rnode->attached) { efc_log_err(efc, "rnode is still attached\n"); return -EIO; } if (rnode->indicator != U32_MAX) { if (sli_resource_free(efc->sli, SLI4_RSRC_RPI, rnode->indicator)) { efc_log_err(efc, "RPI free fail RPI %d addr=%#x\n", rnode->indicator, rnode->fc_id); rc = -EIO; } else { rnode->indicator = U32_MAX; rnode->index = U32_MAX; } } } return rc; } static int efc_cmd_node_free_cb(struct efc *efc, int status, u8 *mqe, void *arg) { struct efc_remote_node *rnode = arg; struct sli4_mbox_command_header *hdr = (struct sli4_mbox_command_header *)mqe; int evt = EFC_EVT_NODE_FREE_FAIL; int rc = 0; if (status || le16_to_cpu(hdr->status)) { efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status, le16_to_cpu(hdr->status)); /* * In certain cases, a non-zero MQE status is OK (all must be * true): * - node is attached * - status is 0x1400 */ if (!rnode->attached || (le16_to_cpu(hdr->status) != SLI4_MBX_STATUS_RPI_NOT_REG)) rc = -EIO; } if (!rc) { rnode->attached = false; evt = EFC_EVT_NODE_FREE_OK; } efc_remote_node_cb(efc, evt, rnode); return rc; } int efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode) { u8 buf[SLI4_BMBX_SIZE]; int rc = -EIO; if (!rnode) { efc_log_err(efc, "bad parameter rnode=%p\n", rnode); return -EIO; } if (rnode->nport) { if (!rnode->attached) return -EIO; rc = -EIO; if (!sli_cmd_unreg_rpi(efc->sli, buf, rnode->indicator, SLI4_RSRC_RPI, U32_MAX)) rc = efc->tt.issue_mbox_rqst(efc->base, buf, efc_cmd_node_free_cb, rnode); if (rc != 0) { efc_log_err(efc, "UNREG_RPI failed\n"); rc = -EIO; } } return rc; }
linux-master
drivers/scsi/elx/libefc/efc_cmds.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ #include "efc.h" int efc_remote_node_cb(void *arg, int event, void *data) { struct efc *efc = arg; struct efc_remote_node *rnode = data; struct efc_node *node = rnode->node; unsigned long flags = 0; spin_lock_irqsave(&efc->lock, flags); efc_node_post_event(node, event, NULL); spin_unlock_irqrestore(&efc->lock, flags); return 0; } struct efc_node * efc_node_find(struct efc_nport *nport, u32 port_id) { /* Find an FC node structure given the FC port ID */ return xa_load(&nport->lookup, port_id); } static void _efc_node_free(struct kref *arg) { struct efc_node *node = container_of(arg, struct efc_node, ref); struct efc *efc = node->efc; struct efc_dma *dma; dma = &node->sparm_dma_buf; dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys); memset(dma, 0, sizeof(struct efc_dma)); mempool_free(node, efc->node_pool); } struct efc_node *efc_node_alloc(struct efc_nport *nport, u32 port_id, bool init, bool targ) { int rc; struct efc_node *node = NULL; struct efc *efc = nport->efc; struct efc_dma *dma; if (nport->shutting_down) { efc_log_debug(efc, "node allocation when shutting down %06x", port_id); return NULL; } node = mempool_alloc(efc->node_pool, GFP_ATOMIC); if (!node) { efc_log_err(efc, "node allocation failed %06x", port_id); return NULL; } memset(node, 0, sizeof(*node)); dma = &node->sparm_dma_buf; dma->size = NODE_SPARAMS_SIZE; dma->virt = dma_pool_zalloc(efc->node_dma_pool, GFP_ATOMIC, &dma->phys); if (!dma->virt) { efc_log_err(efc, "node dma alloc failed\n"); goto dma_fail; } node->rnode.indicator = U32_MAX; node->nport = nport; node->efc = efc; node->init = init; node->targ = targ; spin_lock_init(&node->pend_frames_lock); INIT_LIST_HEAD(&node->pend_frames); spin_lock_init(&node->els_ios_lock); INIT_LIST_HEAD(&node->els_ios_list); node->els_io_enabled = true; rc = efc_cmd_node_alloc(efc, &node->rnode, port_id, nport); if (rc) { efc_log_err(efc, "efc_hw_node_alloc failed: %d\n", rc); goto hw_alloc_fail; } node->rnode.node = node; node->sm.app = node; node->evtdepth = 0; efc_node_update_display_name(node); rc = xa_err(xa_store(&nport->lookup, port_id, node, GFP_ATOMIC)); if (rc) { efc_log_err(efc, "Node lookup store failed: %d\n", rc); goto xa_fail; } /* initialize refcount */ kref_init(&node->ref); node->release = _efc_node_free; kref_get(&nport->ref); return node; xa_fail: efc_node_free_resources(efc, &node->rnode); hw_alloc_fail: dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys); dma_fail: mempool_free(node, efc->node_pool); return NULL; } void efc_node_free(struct efc_node *node) { struct efc_nport *nport; struct efc *efc; int rc = 0; struct efc_node *ns = NULL; nport = node->nport; efc = node->efc; node_printf(node, "Free'd\n"); if (node->refound) { /* * Save the name server node. We will send fake RSCN event at * the end to handle ignored RSCN event during node deletion */ ns = efc_node_find(node->nport, FC_FID_DIR_SERV); } if (!node->nport) { efc_log_err(efc, "Node already Freed\n"); return; } /* Free HW resources */ rc = efc_node_free_resources(efc, &node->rnode); if (rc < 0) efc_log_err(efc, "efc_hw_node_free failed: %d\n", rc); /* if the gidpt_delay_timer is still running, then delete it */ if (timer_pending(&node->gidpt_delay_timer)) del_timer(&node->gidpt_delay_timer); xa_erase(&nport->lookup, node->rnode.fc_id); /* * If the node_list is empty, * then post a ALL_CHILD_NODES_FREE event to the nport, * after the lock is released. * The nport may be free'd as a result of the event. */ if (xa_empty(&nport->lookup)) efc_sm_post_event(&nport->sm, EFC_EVT_ALL_CHILD_NODES_FREE, NULL); node->nport = NULL; node->sm.current_state = NULL; kref_put(&nport->ref, nport->release); kref_put(&node->ref, node->release); if (ns) { /* sending fake RSCN event to name server node */ efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, NULL); } } static void efc_dma_copy_in(struct efc_dma *dma, void *buffer, u32 buffer_length) { if (!dma || !buffer || !buffer_length) return; if (buffer_length > dma->size) buffer_length = dma->size; memcpy(dma->virt, buffer, buffer_length); dma->len = buffer_length; } int efc_node_attach(struct efc_node *node) { int rc = 0; struct efc_nport *nport = node->nport; struct efc_domain *domain = nport->domain; struct efc *efc = node->efc; if (!domain->attached) { efc_log_err(efc, "Warning: unattached domain\n"); return -EIO; } /* Update node->wwpn/wwnn */ efc_node_build_eui_name(node->wwpn, sizeof(node->wwpn), efc_node_get_wwpn(node)); efc_node_build_eui_name(node->wwnn, sizeof(node->wwnn), efc_node_get_wwnn(node)); efc_dma_copy_in(&node->sparm_dma_buf, node->service_params + 4, sizeof(node->service_params) - 4); /* take lock to protect node->rnode.attached */ rc = efc_cmd_node_attach(efc, &node->rnode, &node->sparm_dma_buf); if (rc < 0) efc_log_debug(efc, "efc_hw_node_attach failed: %d\n", rc); return rc; } void efc_node_fcid_display(u32 fc_id, char *buffer, u32 buffer_length) { switch (fc_id) { case FC_FID_FLOGI: snprintf(buffer, buffer_length, "fabric"); break; case FC_FID_FCTRL: snprintf(buffer, buffer_length, "fabctl"); break; case FC_FID_DIR_SERV: snprintf(buffer, buffer_length, "nserve"); break; default: if (fc_id == FC_FID_DOM_MGR) { snprintf(buffer, buffer_length, "dctl%02x", (fc_id & 0x0000ff)); } else { snprintf(buffer, buffer_length, "%06x", fc_id); } break; } } void efc_node_update_display_name(struct efc_node *node) { u32 port_id = node->rnode.fc_id; struct efc_nport *nport = node->nport; char portid_display[16]; efc_node_fcid_display(port_id, portid_display, sizeof(portid_display)); snprintf(node->display_name, sizeof(node->display_name), "%s.%s", nport->display_name, portid_display); } void efc_node_send_ls_io_cleanup(struct efc_node *node) { if (node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE) { efc_log_debug(node->efc, "[%s] cleaning up LS_ACC oxid=0x%x\n", node->display_name, node->ls_acc_oxid); node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE; node->ls_acc_io = NULL; } } static void efc_node_handle_implicit_logo(struct efc_node *node) { int rc; /* * currently, only case for implicit logo is PLOGI * recvd. Thus, node's ELS IO pending list won't be * empty (PLOGI will be on it) */ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI); node_printf(node, "Reason: implicit logout, re-authenticate\n"); /* Re-attach node with the same HW node resources */ node->req_free = false; rc = efc_node_attach(node); efc_node_transition(node, __efc_d_wait_node_attach, NULL); node->els_io_enabled = true; if (rc < 0) efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL); } static void efc_node_handle_explicit_logo(struct efc_node *node) { s8 pend_frames_empty; unsigned long flags = 0; /* cleanup any pending LS_ACC ELSs */ efc_node_send_ls_io_cleanup(node); spin_lock_irqsave(&node->pend_frames_lock, flags); pend_frames_empty = list_empty(&node->pend_frames); spin_unlock_irqrestore(&node->pend_frames_lock, flags); /* * there are two scenarios where we want to keep * this node alive: * 1. there are pending frames that need to be * processed or * 2. we're an initiator and the remote node is * a target and we need to re-authenticate */ node_printf(node, "Shutdown: explicit logo pend=%d ", !pend_frames_empty); node_printf(node, "nport.ini=%d node.tgt=%d\n", node->nport->enable_ini, node->targ); if (!pend_frames_empty || (node->nport->enable_ini && node->targ)) { u8 send_plogi = false; if (node->nport->enable_ini && node->targ) { /* * we're an initiator and * node shutting down is a target; * we'll need to re-authenticate in * initial state */ send_plogi = true; } /* * transition to __efc_d_init * (will retain HW node resources) */ node->els_io_enabled = true; node->req_free = false; /* * either pending frames exist or we are re-authenticating * with PLOGI (or both); in either case, return to initial * state */ efc_node_init_device(node, send_plogi); } /* else: let node shutdown occur */ } static void efc_node_purge_pending(struct efc_node *node) { struct efc *efc = node->efc; struct efc_hw_sequence *frame, *next; unsigned long flags = 0; spin_lock_irqsave(&node->pend_frames_lock, flags); list_for_each_entry_safe(frame, next, &node->pend_frames, list_entry) { list_del(&frame->list_entry); efc->tt.hw_seq_free(efc, frame); } spin_unlock_irqrestore(&node->pend_frames_lock, flags); } void __efc_node_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: { efc_node_hold_frames(node); WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list)); /* by default, we will be freeing node after we unwind */ node->req_free = true; switch (node->shutdown_reason) { case EFC_NODE_SHUTDOWN_IMPLICIT_LOGO: /* Node shutdown b/c of PLOGI received when node * already logged in. We have PLOGI service * parameters, so submit node attach; we won't be * freeing this node */ efc_node_handle_implicit_logo(node); break; case EFC_NODE_SHUTDOWN_EXPLICIT_LOGO: efc_node_handle_explicit_logo(node); break; case EFC_NODE_SHUTDOWN_DEFAULT: default: { /* * shutdown due to link down, * node going away (xport event) or * nport shutdown, purge pending and * proceed to cleanup node */ /* cleanup any pending LS_ACC ELSs */ efc_node_send_ls_io_cleanup(node); node_printf(node, "Shutdown reason: default, purge pending\n"); efc_node_purge_pending(node); break; } } break; } case EFC_EVT_EXIT: efc_node_accept_frames(node); break; default: __efc_node_common(__func__, ctx, evt, arg); } } static bool efc_node_check_els_quiesced(struct efc_node *node) { /* check to see if ELS requests, completions are quiesced */ if (node->els_req_cnt == 0 && node->els_cmpl_cnt == 0 && efc_els_io_list_empty(node, &node->els_ios_list)) { if (!node->attached) { /* hw node detach already completed, proceed */ node_printf(node, "HW node not attached\n"); efc_node_transition(node, __efc_node_wait_ios_shutdown, NULL); } else { /* * hw node detach hasn't completed, * transition and wait */ node_printf(node, "HW node still attached\n"); efc_node_transition(node, __efc_node_wait_node_free, NULL); } return true; } return false; } void efc_node_initiate_cleanup(struct efc_node *node) { /* * if ELS's have already been quiesced, will move to next state * if ELS's have not been quiesced, abort them */ if (!efc_node_check_els_quiesced(node)) { efc_node_hold_frames(node); efc_node_transition(node, __efc_node_wait_els_shutdown, NULL); } } void __efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { bool check_quiesce = false; struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); /* Node state machine: Wait for all ELSs to complete */ switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); if (efc_els_io_list_empty(node, &node->els_ios_list)) { node_printf(node, "All ELS IOs complete\n"); check_quiesce = true; } break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_SRRS_ELS_REQ_OK: case EFC_EVT_SRRS_ELS_REQ_FAIL: case EFC_EVT_SRRS_ELS_REQ_RJT: case EFC_EVT_ELS_REQ_ABORTED: if (WARN_ON(!node->els_req_cnt)) break; node->els_req_cnt--; check_quiesce = true; break; case EFC_EVT_SRRS_ELS_CMPL_OK: case EFC_EVT_SRRS_ELS_CMPL_FAIL: if (WARN_ON(!node->els_cmpl_cnt)) break; node->els_cmpl_cnt--; check_quiesce = true; break; case EFC_EVT_ALL_CHILD_NODES_FREE: /* all ELS IO's complete */ node_printf(node, "All ELS IOs complete\n"); WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list)); check_quiesce = true; break; case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: check_quiesce = true; break; case EFC_EVT_DOMAIN_ATTACH_OK: /* don't care about domain_attach_ok */ break; /* ignore shutdown events as we're already in shutdown path */ case EFC_EVT_SHUTDOWN: /* have default shutdown event take precedence */ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; fallthrough; case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: node_printf(node, "%s received\n", efc_sm_event_name(evt)); break; default: __efc_node_common(__func__, ctx, evt, arg); } if (check_quiesce) efc_node_check_els_quiesced(node); } void __efc_node_wait_node_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_NODE_FREE_OK: /* node is officially no longer attached */ node->attached = false; efc_node_transition(node, __efc_node_wait_ios_shutdown, NULL); break; case EFC_EVT_ALL_CHILD_NODES_FREE: case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: /* As IOs and ELS IO's complete we expect to get these events */ break; case EFC_EVT_DOMAIN_ATTACH_OK: /* don't care about domain_attach_ok */ break; /* ignore shutdown events as we're already in shutdown path */ case EFC_EVT_SHUTDOWN: /* have default shutdown event take precedence */ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; fallthrough; case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: node_printf(node, "%s received\n", efc_sm_event_name(evt)); break; default: __efc_node_common(__func__, ctx, evt, arg); } } void __efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; struct efc *efc = node->efc; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); switch (evt) { case EFC_EVT_ENTER: efc_node_hold_frames(node); /* first check to see if no ELS IOs are outstanding */ if (efc_els_io_list_empty(node, &node->els_ios_list)) /* If there are any active IOS, Free them. */ efc_node_transition(node, __efc_node_shutdown, NULL); break; case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY: case EFC_EVT_ALL_CHILD_NODES_FREE: if (efc_els_io_list_empty(node, &node->els_ios_list)) efc_node_transition(node, __efc_node_shutdown, NULL); break; case EFC_EVT_EXIT: efc_node_accept_frames(node); break; case EFC_EVT_SRRS_ELS_REQ_FAIL: /* Can happen as ELS IO IO's complete */ if (WARN_ON(!node->els_req_cnt)) break; node->els_req_cnt--; break; /* ignore shutdown events as we're already in shutdown path */ case EFC_EVT_SHUTDOWN: /* have default shutdown event take precedence */ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT; fallthrough; case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO: case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO: efc_log_debug(efc, "[%s] %-20s\n", node->display_name, efc_sm_event_name(evt)); break; case EFC_EVT_DOMAIN_ATTACH_OK: /* don't care about domain_attach_ok */ break; default: __efc_node_common(__func__, ctx, evt, arg); } } void __efc_node_common(const char *funcname, struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = NULL; struct efc *efc = NULL; struct efc_node_cb *cbdata = arg; node = ctx->app; efc = node->efc; switch (evt) { case EFC_EVT_ENTER: case EFC_EVT_REENTER: case EFC_EVT_EXIT: case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: case EFC_EVT_NODE_MISSING: case EFC_EVT_FCP_CMD_RCVD: break; case EFC_EVT_NODE_REFOUND: node->refound = true; break; /* * node->attached must be set appropriately * for all node attach/detach events */ case EFC_EVT_NODE_ATTACH_OK: node->attached = true; break; case EFC_EVT_NODE_FREE_OK: case EFC_EVT_NODE_ATTACH_FAIL: node->attached = false; break; /* * handle any ELS completions that * other states either didn't care about * or forgot about */ case EFC_EVT_SRRS_ELS_CMPL_OK: case EFC_EVT_SRRS_ELS_CMPL_FAIL: if (WARN_ON(!node->els_cmpl_cnt)) break; node->els_cmpl_cnt--; break; /* * handle any ELS request completions that * other states either didn't care about * or forgot about */ case EFC_EVT_SRRS_ELS_REQ_OK: case EFC_EVT_SRRS_ELS_REQ_FAIL: case EFC_EVT_SRRS_ELS_REQ_RJT: case EFC_EVT_ELS_REQ_ABORTED: if (WARN_ON(!node->els_req_cnt)) break; node->els_req_cnt--; break; case EFC_EVT_ELS_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt; /* * Unsupported ELS was received, * send LS_RJT, command not supported */ efc_log_debug(efc, "[%s] (%s) ELS x%02x, LS_RJT not supported\n", node->display_name, funcname, ((u8 *)cbdata->payload->dma.virt)[0]); efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), ELS_RJT_UNSUP, ELS_EXPL_NONE, 0); break; } case EFC_EVT_PLOGI_RCVD: case EFC_EVT_FLOGI_RCVD: case EFC_EVT_LOGO_RCVD: case EFC_EVT_PRLI_RCVD: case EFC_EVT_PRLO_RCVD: case EFC_EVT_PDISC_RCVD: case EFC_EVT_FDISC_RCVD: case EFC_EVT_ADISC_RCVD: case EFC_EVT_RSCN_RCVD: case EFC_EVT_SCR_RCVD: { struct fc_frame_header *hdr = cbdata->header->dma.virt; /* sm: / send ELS_RJT */ efc_log_debug(efc, "[%s] (%s) %s sending ELS_RJT\n", node->display_name, funcname, efc_sm_event_name(evt)); /* if we didn't catch this in a state, send generic LS_RJT */ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id), ELS_RJT_UNAB, ELS_EXPL_NONE, 0); break; } case EFC_EVT_ABTS_RCVD: { efc_log_debug(efc, "[%s] (%s) %s sending BA_ACC\n", node->display_name, funcname, efc_sm_event_name(evt)); /* sm: / send BA_ACC */ efc_send_bls_acc(node, cbdata->header->dma.virt); break; } default: efc_log_debug(node->efc, "[%s] %-20s %-20s not handled\n", node->display_name, funcname, efc_sm_event_name(evt)); } } void efc_node_save_sparms(struct efc_node *node, void *payload) { memcpy(node->service_params, payload, sizeof(node->service_params)); } void efc_node_post_event(struct efc_node *node, enum efc_sm_event evt, void *arg) { bool free_node = false; node->evtdepth++; efc_sm_post_event(&node->sm, evt, arg); /* If our event call depth is one and * we're not holding frames * then we can dispatch any pending frames. * We don't want to allow the efc_process_node_pending() * call to recurse. */ if (!node->hold_frames && node->evtdepth == 1) efc_process_node_pending(node); node->evtdepth--; /* * Free the node object if so requested, * and we're at an event call depth of zero */ if (node->evtdepth == 0 && node->req_free) free_node = true; if (free_node) efc_node_free(node); } void efc_node_transition(struct efc_node *node, void (*state)(struct efc_sm_ctx *, enum efc_sm_event, void *), void *data) { struct efc_sm_ctx *ctx = &node->sm; if (ctx->current_state == state) { efc_node_post_event(node, EFC_EVT_REENTER, data); } else { efc_node_post_event(node, EFC_EVT_EXIT, data); ctx->current_state = state; efc_node_post_event(node, EFC_EVT_ENTER, data); } } void efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name) { memset(buf, 0, buf_len); snprintf(buf, buf_len, "eui.%016llX", (unsigned long long)eui_name); } u64 efc_node_get_wwpn(struct efc_node *node) { struct fc_els_flogi *sp = (struct fc_els_flogi *)node->service_params; return be64_to_cpu(sp->fl_wwpn); } u64 efc_node_get_wwnn(struct efc_node *node) { struct fc_els_flogi *sp = (struct fc_els_flogi *)node->service_params; return be64_to_cpu(sp->fl_wwnn); } int efc_node_check_els_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg, u8 cmd, void (*efc_node_common_func)(const char *, struct efc_sm_ctx *, enum efc_sm_event, void *), const char *funcname) { return 0; } int efc_node_check_ns_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg, u16 cmd, void (*efc_node_common_func)(const char *, struct efc_sm_ctx *, enum efc_sm_event, void *), const char *funcname) { return 0; } int efc_els_io_list_empty(struct efc_node *node, struct list_head *list) { int empty; unsigned long flags = 0; spin_lock_irqsave(&node->els_ios_lock, flags); empty = list_empty(list); spin_unlock_irqrestore(&node->els_ios_lock, flags); return empty; } void efc_node_pause(struct efc_node *node, void (*state)(struct efc_sm_ctx *, enum efc_sm_event, void *)) { node->nodedb_state = state; efc_node_transition(node, __efc_node_paused, NULL); } void __efc_node_paused(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg) { struct efc_node *node = ctx->app; efc_node_evt_set(ctx, evt, __func__); node_sm_trace(); /* * This state is entered when a state is "paused". When resumed, the * node is transitioned to a previously saved state (node->ndoedb_state) */ switch (evt) { case EFC_EVT_ENTER: node_printf(node, "Paused\n"); break; case EFC_EVT_RESUME: { void (*pf)(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg); pf = node->nodedb_state; node->nodedb_state = NULL; efc_node_transition(node, pf, NULL); break; } case EFC_EVT_DOMAIN_ATTACH_OK: break; case EFC_EVT_SHUTDOWN: node->req_free = true; break; default: __efc_node_common(__func__, ctx, evt, arg); } } void efc_node_recv_els_frame(struct efc_node *node, struct efc_hw_sequence *seq) { u32 prli_size = sizeof(struct fc_els_prli) + sizeof(struct fc_els_spp); struct { u32 cmd; enum efc_sm_event evt; u32 payload_size; } els_cmd_list[] = { {ELS_PLOGI, EFC_EVT_PLOGI_RCVD, sizeof(struct fc_els_flogi)}, {ELS_FLOGI, EFC_EVT_FLOGI_RCVD, sizeof(struct fc_els_flogi)}, {ELS_LOGO, EFC_EVT_LOGO_RCVD, sizeof(struct fc_els_ls_acc)}, {ELS_PRLI, EFC_EVT_PRLI_RCVD, prli_size}, {ELS_PRLO, EFC_EVT_PRLO_RCVD, prli_size}, {ELS_PDISC, EFC_EVT_PDISC_RCVD, MAX_ACC_REJECT_PAYLOAD}, {ELS_FDISC, EFC_EVT_FDISC_RCVD, MAX_ACC_REJECT_PAYLOAD}, {ELS_ADISC, EFC_EVT_ADISC_RCVD, sizeof(struct fc_els_adisc)}, {ELS_RSCN, EFC_EVT_RSCN_RCVD, MAX_ACC_REJECT_PAYLOAD}, {ELS_SCR, EFC_EVT_SCR_RCVD, MAX_ACC_REJECT_PAYLOAD}, }; struct efc_node_cb cbdata; u8 *buf = seq->payload->dma.virt; enum efc_sm_event evt = EFC_EVT_ELS_RCVD; u32 i; memset(&cbdata, 0, sizeof(cbdata)); cbdata.header = seq->header; cbdata.payload = seq->payload; /* find a matching event for the ELS command */ for (i = 0; i < ARRAY_SIZE(els_cmd_list); i++) { if (els_cmd_list[i].cmd == buf[0]) { evt = els_cmd_list[i].evt; break; } } efc_node_post_event(node, evt, &cbdata); } void efc_node_recv_ct_frame(struct efc_node *node, struct efc_hw_sequence *seq) { struct fc_ct_hdr *iu = seq->payload->dma.virt; struct fc_frame_header *hdr = seq->header->dma.virt; struct efc *efc = node->efc; u16 gscmd = be16_to_cpu(iu->ct_cmd); efc_log_err(efc, "[%s] Received cmd :%x sending CT_REJECT\n", node->display_name, gscmd); efc_send_ct_rsp(efc, node, be16_to_cpu(hdr->fh_ox_id), iu, FC_FS_RJT, FC_FS_RJT_UNSUP, 0); } void efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq) { struct efc_node_cb cbdata; memset(&cbdata, 0, sizeof(cbdata)); cbdata.header = seq->header; cbdata.payload = seq->payload; efc_node_post_event(node, EFC_EVT_FCP_CMD_RCVD, &cbdata); } void efc_process_node_pending(struct efc_node *node) { struct efc *efc = node->efc; struct efc_hw_sequence *seq = NULL; u32 pend_frames_processed = 0; unsigned long flags = 0; for (;;) { /* need to check for hold frames condition after each frame * processed because any given frame could cause a transition * to a state that holds frames */ if (node->hold_frames) break; seq = NULL; /* Get next frame/sequence */ spin_lock_irqsave(&node->pend_frames_lock, flags); if (!list_empty(&node->pend_frames)) { seq = list_first_entry(&node->pend_frames, struct efc_hw_sequence, list_entry); list_del(&seq->list_entry); } spin_unlock_irqrestore(&node->pend_frames_lock, flags); if (!seq) { pend_frames_processed = node->pend_frames_processed; node->pend_frames_processed = 0; break; } node->pend_frames_processed++; /* now dispatch frame(s) to dispatch function */ efc_node_dispatch_frame(node, seq); efc->tt.hw_seq_free(efc, seq); } if (pend_frames_processed != 0) efc_log_debug(efc, "%u node frames held and processed\n", pend_frames_processed); } void efc_scsi_sess_reg_complete(struct efc_node *node, u32 status) { unsigned long flags = 0; enum efc_sm_event evt = EFC_EVT_NODE_SESS_REG_OK; struct efc *efc = node->efc; if (status) evt = EFC_EVT_NODE_SESS_REG_FAIL; spin_lock_irqsave(&efc->lock, flags); /* Notify the node to resume */ efc_node_post_event(node, evt, NULL); spin_unlock_irqrestore(&efc->lock, flags); } void efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node) { unsigned long flags = 0; spin_lock_irqsave(&efc->lock, flags); /* Notify the node to resume */ efc_node_post_event(node, EFC_EVT_NODE_DEL_INI_COMPLETE, NULL); spin_unlock_irqrestore(&efc->lock, flags); } void efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node) { unsigned long flags = 0; spin_lock_irqsave(&efc->lock, flags); /* Notify the node to resume */ efc_node_post_event(node, EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL); spin_unlock_irqrestore(&efc->lock, flags); } void efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node) { unsigned long flags = 0; spin_lock_irqsave(&efc->lock, flags); efc_node_post_event(node, EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL); spin_unlock_irqrestore(&efc->lock, flags); } void efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg) { struct efc *efc = node->efc; unsigned long flags = 0; spin_lock_irqsave(&efc->lock, flags); efc_node_post_event(node, evt, arg); spin_unlock_irqrestore(&efc->lock, flags); } void efc_node_post_shutdown(struct efc_node *node, void *arg) { unsigned long flags = 0; struct efc *efc = node->efc; spin_lock_irqsave(&efc->lock, flags); efc_node_post_event(node, EFC_EVT_SHUTDOWN, arg); spin_unlock_irqrestore(&efc->lock, flags); }
linux-master
drivers/scsi/elx/libefc/efc_node.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. */ /* * All common (i.e. transport-independent) SLI-4 functions are implemented * in this file. */ #include "sli4.h" static struct sli4_asic_entry_t sli4_asic_table[] = { { SLI4_ASIC_REV_B0, SLI4_ASIC_GEN_5}, { SLI4_ASIC_REV_D0, SLI4_ASIC_GEN_5}, { SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6}, { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_6}, { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_6}, { SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6}, { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_7}, { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_7}, }; /* Convert queue type enum (SLI_QTYPE_*) into a string */ static char *SLI4_QNAME[] = { "Event Queue", "Completion Queue", "Mailbox Queue", "Work Queue", "Receive Queue", "Undefined" }; /** * sli_config_cmd_init() - Write a SLI_CONFIG command to the provided buffer. * * @sli4: SLI context pointer. * @buf: Destination buffer for the command. * @length: Length in bytes of attached command. * @dma: DMA buffer for non-embedded commands. * Return: Command payload buffer. */ static void * sli_config_cmd_init(struct sli4 *sli4, void *buf, u32 length, struct efc_dma *dma) { struct sli4_cmd_sli_config *config; u32 flags; if (length > sizeof(config->payload.embed) && !dma) { efc_log_err(sli4, "Too big for an embedded cmd with len(%d)\n", length); return NULL; } memset(buf, 0, SLI4_BMBX_SIZE); config = buf; config->hdr.command = SLI4_MBX_CMD_SLI_CONFIG; if (!dma) { flags = SLI4_SLICONF_EMB; config->dw1_flags = cpu_to_le32(flags); config->payload_len = cpu_to_le32(length); return config->payload.embed; } flags = SLI4_SLICONF_PMDCMD_VAL_1; flags &= ~SLI4_SLICONF_EMB; config->dw1_flags = cpu_to_le32(flags); config->payload.mem.addr.low = cpu_to_le32(lower_32_bits(dma->phys)); config->payload.mem.addr.high = cpu_to_le32(upper_32_bits(dma->phys)); config->payload.mem.length = cpu_to_le32(dma->size & SLI4_SLICONF_PMD_LEN); config->payload_len = cpu_to_le32(dma->size); /* save pointer to DMA for BMBX dumping purposes */ sli4->bmbx_non_emb_pmd = dma; return dma->virt; } /** * sli_cmd_common_create_cq() - Write a COMMON_CREATE_CQ V2 command. * * @sli4: SLI context pointer. * @buf: Destination buffer for the command. * @qmem: DMA memory for queue. * @eq_id: EQ id assosiated with this cq. * Return: status -EIO/0. */ static int sli_cmd_common_create_cq(struct sli4 *sli4, void *buf, struct efc_dma *qmem, u16 eq_id) { struct sli4_rqst_cmn_create_cq_v2 *cqv2 = NULL; u32 p; uintptr_t addr; u32 num_pages = 0; size_t cmd_size = 0; u32 page_size = 0; u32 n_cqe = 0; u32 dw5_flags = 0; u16 dw6w1_arm = 0; __le32 len; /* First calculate number of pages and the mailbox cmd length */ n_cqe = qmem->size / SLI4_CQE_BYTES; switch (n_cqe) { case 256: case 512: case 1024: case 2048: page_size = SZ_4K; break; case 4096: page_size = SZ_8K; break; default: return -EIO; } num_pages = sli_page_count(qmem->size, page_size); cmd_size = SLI4_RQST_CMDSZ(cmn_create_cq_v2) + SZ_DMAADDR * num_pages; cqv2 = sli_config_cmd_init(sli4, buf, cmd_size, NULL); if (!cqv2) return -EIO; len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_v2, SZ_DMAADDR * num_pages); sli_cmd_fill_hdr(&cqv2->hdr, SLI4_CMN_CREATE_CQ, SLI4_SUBSYSTEM_COMMON, CMD_V2, len); cqv2->page_size = page_size / SLI_PAGE_SIZE; /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */ cqv2->num_pages = cpu_to_le16(num_pages); if (!num_pages || num_pages > SLI4_CREATE_CQV2_MAX_PAGES) return -EIO; switch (num_pages) { case 1: dw5_flags |= SLI4_CQ_CNT_VAL(256); break; case 2: dw5_flags |= SLI4_CQ_CNT_VAL(512); break; case 4: dw5_flags |= SLI4_CQ_CNT_VAL(1024); break; case 8: dw5_flags |= SLI4_CQ_CNT_VAL(LARGE); cqv2->cqe_count = cpu_to_le16(n_cqe); break; default: efc_log_err(sli4, "num_pages %d not valid\n", num_pages); return -EIO; } if (sli4->if_type == SLI4_INTF_IF_TYPE_6) dw5_flags |= SLI4_CREATE_CQV2_AUTOVALID; dw5_flags |= SLI4_CREATE_CQV2_EVT; dw5_flags |= SLI4_CREATE_CQV2_VALID; cqv2->dw5_flags = cpu_to_le32(dw5_flags); cqv2->dw6w1_arm = cpu_to_le16(dw6w1_arm); cqv2->eq_id = cpu_to_le16(eq_id); for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) { cqv2->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); cqv2->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); } return 0; } static int sli_cmd_common_create_eq(struct sli4 *sli4, void *buf, struct efc_dma *qmem) { struct sli4_rqst_cmn_create_eq *eq; u32 p; uintptr_t addr; u16 num_pages; u32 dw5_flags = 0; u32 dw6_flags = 0, ver; eq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_create_eq), NULL); if (!eq) return -EIO; if (sli4->if_type == SLI4_INTF_IF_TYPE_6) ver = CMD_V2; else ver = CMD_V0; sli_cmd_fill_hdr(&eq->hdr, SLI4_CMN_CREATE_EQ, SLI4_SUBSYSTEM_COMMON, ver, SLI4_RQST_PYLD_LEN(cmn_create_eq)); /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */ num_pages = qmem->size / SLI_PAGE_SIZE; eq->num_pages = cpu_to_le16(num_pages); switch (num_pages) { case 1: dw5_flags |= SLI4_EQE_SIZE_4; dw6_flags |= SLI4_EQ_CNT_VAL(1024); break; case 2: dw5_flags |= SLI4_EQE_SIZE_4; dw6_flags |= SLI4_EQ_CNT_VAL(2048); break; case 4: dw5_flags |= SLI4_EQE_SIZE_4; dw6_flags |= SLI4_EQ_CNT_VAL(4096); break; default: efc_log_err(sli4, "num_pages %d not valid\n", num_pages); return -EIO; } if (sli4->if_type == SLI4_INTF_IF_TYPE_6) dw5_flags |= SLI4_CREATE_EQ_AUTOVALID; dw5_flags |= SLI4_CREATE_EQ_VALID; dw6_flags &= (~SLI4_CREATE_EQ_ARM); eq->dw5_flags = cpu_to_le32(dw5_flags); eq->dw6_flags = cpu_to_le32(dw6_flags); eq->dw7_delaymulti = cpu_to_le32(SLI4_CREATE_EQ_DELAYMULTI); for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += SLI_PAGE_SIZE) { eq->page_address[p].low = cpu_to_le32(lower_32_bits(addr)); eq->page_address[p].high = cpu_to_le32(upper_32_bits(addr)); } return 0; } static int sli_cmd_common_create_mq_ext(struct sli4 *sli4, void *buf, struct efc_dma *qmem, u16 cq_id) { struct sli4_rqst_cmn_create_mq_ext *mq; u32 p; uintptr_t addr; u32 num_pages; u16 dw6w1_flags = 0; mq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_create_mq_ext), NULL); if (!mq) return -EIO; sli_cmd_fill_hdr(&mq->hdr, SLI4_CMN_CREATE_MQ_EXT, SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN(cmn_create_mq_ext)); /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */ num_pages = qmem->size / SLI_PAGE_SIZE; mq->num_pages = cpu_to_le16(num_pages); switch (num_pages) { case 1: dw6w1_flags |= SLI4_MQE_SIZE_16; break; case 2: dw6w1_flags |= SLI4_MQE_SIZE_32; break; case 4: dw6w1_flags |= SLI4_MQE_SIZE_64; break; case 8: dw6w1_flags |= SLI4_MQE_SIZE_128; break; default: efc_log_info(sli4, "num_pages %d not valid\n", num_pages); return -EIO; } mq->async_event_bitmap = cpu_to_le32(SLI4_ASYNC_EVT_FC_ALL); if (sli4->params.mq_create_version) { mq->cq_id_v1 = cpu_to_le16(cq_id); mq->hdr.dw3_version = cpu_to_le32(CMD_V1); } else { dw6w1_flags |= (cq_id << SLI4_CREATE_MQEXT_CQID_SHIFT); } mq->dw7_val = cpu_to_le32(SLI4_CREATE_MQEXT_VAL); mq->dw6w1_flags = cpu_to_le16(dw6w1_flags); for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += SLI_PAGE_SIZE) { mq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); mq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); } return 0; } int sli_cmd_wq_create(struct sli4 *sli4, void *buf, struct efc_dma *qmem, u16 cq_id) { struct sli4_rqst_wq_create *wq; u32 p; uintptr_t addr; u32 page_size = 0; u32 n_wqe = 0; u16 num_pages; wq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(wq_create), NULL); if (!wq) return -EIO; sli_cmd_fill_hdr(&wq->hdr, SLI4_OPC_WQ_CREATE, SLI4_SUBSYSTEM_FC, CMD_V1, SLI4_RQST_PYLD_LEN(wq_create)); n_wqe = qmem->size / sli4->wqe_size; switch (qmem->size) { case 4096: case 8192: case 16384: case 32768: page_size = SZ_4K; break; case 65536: page_size = SZ_8K; break; case 131072: page_size = SZ_16K; break; case 262144: page_size = SZ_32K; break; case 524288: page_size = SZ_64K; break; default: return -EIO; } /* valid values for number of pages(num_pages): 1-8 */ num_pages = sli_page_count(qmem->size, page_size); wq->num_pages = cpu_to_le16(num_pages); if (!num_pages || num_pages > SLI4_WQ_CREATE_MAX_PAGES) return -EIO; wq->cq_id = cpu_to_le16(cq_id); wq->page_size = page_size / SLI_PAGE_SIZE; if (sli4->wqe_size == SLI4_WQE_EXT_BYTES) wq->wqe_size_byte |= SLI4_WQE_EXT_SIZE; else wq->wqe_size_byte |= SLI4_WQE_SIZE; wq->wqe_count = cpu_to_le16(n_wqe); for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) { wq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); wq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); } return 0; } static int sli_cmd_rq_create_v1(struct sli4 *sli4, void *buf, struct efc_dma *qmem, u16 cq_id, u16 buffer_size) { struct sli4_rqst_rq_create_v1 *rq; u32 p; uintptr_t addr; u32 num_pages; rq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(rq_create_v1), NULL); if (!rq) return -EIO; sli_cmd_fill_hdr(&rq->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC, CMD_V1, SLI4_RQST_PYLD_LEN(rq_create_v1)); /* Disable "no buffer warnings" to avoid Lancer bug */ rq->dim_dfd_dnb |= SLI4_RQ_CREATE_V1_DNB; /* valid values for number of pages: 1-8 (sec 4.5.6) */ num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); rq->num_pages = cpu_to_le16(num_pages); if (!num_pages || num_pages > SLI4_RQ_CREATE_V1_MAX_PAGES) { efc_log_info(sli4, "num_pages %d not valid, max %d\n", num_pages, SLI4_RQ_CREATE_V1_MAX_PAGES); return -EIO; } /* * RQE count is the total number of entries (note not lg2(# entries)) */ rq->rqe_count = cpu_to_le16(qmem->size / SLI4_RQE_SIZE); rq->rqe_size_byte |= SLI4_RQE_SIZE_8; rq->page_size = SLI4_RQ_PAGE_SIZE_4096; if (buffer_size < sli4->rq_min_buf_size || buffer_size > sli4->rq_max_buf_size) { efc_log_err(sli4, "buffer_size %d out of range (%d-%d)\n", buffer_size, sli4->rq_min_buf_size, sli4->rq_max_buf_size); return -EIO; } rq->buffer_size = cpu_to_le32(buffer_size); rq->cq_id = cpu_to_le16(cq_id); for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += SLI_PAGE_SIZE) { rq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); rq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); } return 0; } static int sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs, struct sli4_queue *qs[], u32 base_cq_id, u32 header_buffer_size, u32 payload_buffer_size, struct efc_dma *dma) { struct sli4_rqst_rq_create_v2 *req = NULL; u32 i, p, offset = 0; u32 payload_size, page_count; uintptr_t addr; u32 num_pages; __le32 len; page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rqs; /* Payload length must accommodate both request and response */ payload_size = max(SLI4_RQST_CMDSZ(rq_create_v2) + SZ_DMAADDR * page_count, sizeof(struct sli4_rsp_cmn_create_queue_set)); dma->size = payload_size; dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size, &dma->phys, GFP_KERNEL); if (!dma->virt) return -EIO; memset(dma->virt, 0, payload_size); req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma); if (!req) return -EIO; len = SLI4_RQST_PYLD_LEN_VAR(rq_create_v2, SZ_DMAADDR * page_count); sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC, CMD_V2, len); /* Fill Payload fields */ req->dim_dfd_dnb |= SLI4_RQCREATEV2_DNB; num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE); req->num_pages = cpu_to_le16(num_pages); req->rqe_count = cpu_to_le16(qs[0]->dma.size / SLI4_RQE_SIZE); req->rqe_size_byte |= SLI4_RQE_SIZE_8; req->page_size = SLI4_RQ_PAGE_SIZE_4096; req->rq_count = num_rqs; req->base_cq_id = cpu_to_le16(base_cq_id); req->hdr_buffer_size = cpu_to_le16(header_buffer_size); req->payload_buffer_size = cpu_to_le16(payload_buffer_size); for (i = 0; i < num_rqs; i++) { for (p = 0, addr = qs[i]->dma.phys; p < num_pages; p++, addr += SLI_PAGE_SIZE) { req->page_phys_addr[offset].low = cpu_to_le32(lower_32_bits(addr)); req->page_phys_addr[offset].high = cpu_to_le32(upper_32_bits(addr)); offset++; } } return 0; } static void __sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q) { if (!q->dma.size) return; dma_free_coherent(&sli4->pci->dev, q->dma.size, q->dma.virt, q->dma.phys); memset(&q->dma, 0, sizeof(struct efc_dma)); } int __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype, size_t size, u32 n_entries, u32 align) { if (q->dma.virt) { efc_log_err(sli4, "%s failed\n", __func__); return -EIO; } memset(q, 0, sizeof(struct sli4_queue)); q->dma.size = size * n_entries; q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size, &q->dma.phys, GFP_KERNEL); if (!q->dma.virt) { memset(&q->dma, 0, sizeof(struct efc_dma)); efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]); return -EIO; } memset(q->dma.virt, 0, size * n_entries); spin_lock_init(&q->lock); q->type = qtype; q->size = size; q->length = n_entries; if (q->type == SLI4_QTYPE_EQ || q->type == SLI4_QTYPE_CQ) { /* For prism, phase will be flipped after * a sweep through eq and cq */ q->phase = 1; } /* Limit to hwf the queue size per interrupt */ q->proc_limit = n_entries / 2; if (q->type == SLI4_QTYPE_EQ) q->posted_limit = q->length / 2; else q->posted_limit = 64; return 0; } int sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q, u32 n_entries, u32 buffer_size, struct sli4_queue *cq, bool is_hdr) { if (__sli_queue_init(sli4, q, SLI4_QTYPE_RQ, SLI4_RQE_SIZE, n_entries, SLI_PAGE_SIZE)) return -EIO; if (sli_cmd_rq_create_v1(sli4, sli4->bmbx.virt, &q->dma, cq->id, buffer_size)) goto error; if (__sli_create_queue(sli4, q)) goto error; if (is_hdr && q->id & 1) { efc_log_info(sli4, "bad header RQ_ID %d\n", q->id); goto error; } else if (!is_hdr && (q->id & 1) == 0) { efc_log_info(sli4, "bad data RQ_ID %d\n", q->id); goto error; } if (is_hdr) q->u.flag |= SLI4_QUEUE_FLAG_HDR; else q->u.flag &= ~SLI4_QUEUE_FLAG_HDR; return 0; error: __sli_queue_destroy(sli4, q); return -EIO; } int sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs, struct sli4_queue *qs[], u32 base_cq_id, u32 n_entries, u32 header_buffer_size, u32 payload_buffer_size) { u32 i; struct efc_dma dma = {0}; struct sli4_rsp_cmn_create_queue_set *rsp = NULL; void __iomem *db_regaddr = NULL; u32 num_rqs = num_rq_pairs * 2; for (i = 0; i < num_rqs; i++) { if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_RQ, SLI4_RQE_SIZE, n_entries, SLI_PAGE_SIZE)) { goto error; } } if (sli_cmd_rq_create_v2(sli4, num_rqs, qs, base_cq_id, header_buffer_size, payload_buffer_size, &dma)) { goto error; } if (sli_bmbx_command(sli4)) { efc_log_err(sli4, "bootstrap mailbox write failed RQSet\n"); goto error; } if (sli4->if_type == SLI4_INTF_IF_TYPE_6) db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG; else db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG; rsp = dma.virt; if (rsp->hdr.status) { efc_log_err(sli4, "bad create RQSet status=%#x addl=%#x\n", rsp->hdr.status, rsp->hdr.additional_status); goto error; } for (i = 0; i < num_rqs; i++) { qs[i]->id = i + le16_to_cpu(rsp->q_id); if ((qs[i]->id & 1) == 0) qs[i]->u.flag |= SLI4_QUEUE_FLAG_HDR; else qs[i]->u.flag &= ~SLI4_QUEUE_FLAG_HDR; qs[i]->db_regaddr = db_regaddr; } dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys); return 0; error: for (i = 0; i < num_rqs; i++) __sli_queue_destroy(sli4, qs[i]); if (dma.virt) dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys); return -EIO; } static int sli_res_sli_config(struct sli4 *sli4, void *buf) { struct sli4_cmd_sli_config *sli_config = buf; /* sanity check */ if (!buf || sli_config->hdr.command != SLI4_MBX_CMD_SLI_CONFIG) { efc_log_err(sli4, "bad parameter buf=%p cmd=%#x\n", buf, buf ? sli_config->hdr.command : -1); return -EIO; } if (le16_to_cpu(sli_config->hdr.status)) return le16_to_cpu(sli_config->hdr.status); if (le32_to_cpu(sli_config->dw1_flags) & SLI4_SLICONF_EMB) return sli_config->payload.embed[4]; efc_log_info(sli4, "external buffers not supported\n"); return -EIO; } int __sli_create_queue(struct sli4 *sli4, struct sli4_queue *q) { struct sli4_rsp_cmn_create_queue *res_q = NULL; if (sli_bmbx_command(sli4)) { efc_log_crit(sli4, "bootstrap mailbox write fail %s\n", SLI4_QNAME[q->type]); return -EIO; } if (sli_res_sli_config(sli4, sli4->bmbx.virt)) { efc_log_err(sli4, "bad status create %s\n", SLI4_QNAME[q->type]); return -EIO; } res_q = (void *)((u8 *)sli4->bmbx.virt + offsetof(struct sli4_cmd_sli_config, payload)); if (res_q->hdr.status) { efc_log_err(sli4, "bad create %s status=%#x addl=%#x\n", SLI4_QNAME[q->type], res_q->hdr.status, res_q->hdr.additional_status); return -EIO; } q->id = le16_to_cpu(res_q->q_id); switch (q->type) { case SLI4_QTYPE_EQ: if (sli4->if_type == SLI4_INTF_IF_TYPE_6) q->db_regaddr = sli4->reg[1] + SLI4_IF6_EQ_DB_REG; else q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG; break; case SLI4_QTYPE_CQ: if (sli4->if_type == SLI4_INTF_IF_TYPE_6) q->db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG; else q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG; break; case SLI4_QTYPE_MQ: if (sli4->if_type == SLI4_INTF_IF_TYPE_6) q->db_regaddr = sli4->reg[1] + SLI4_IF6_MQ_DB_REG; else q->db_regaddr = sli4->reg[0] + SLI4_MQ_DB_REG; break; case SLI4_QTYPE_RQ: if (sli4->if_type == SLI4_INTF_IF_TYPE_6) q->db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG; else q->db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG; break; case SLI4_QTYPE_WQ: if (sli4->if_type == SLI4_INTF_IF_TYPE_6) q->db_regaddr = sli4->reg[1] + SLI4_IF6_WQ_DB_REG; else q->db_regaddr = sli4->reg[0] + SLI4_IO_WQ_DB_REG; break; default: break; } return 0; } int sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype) { u32 size = 0; switch (qtype) { case SLI4_QTYPE_EQ: size = sizeof(u32); break; case SLI4_QTYPE_CQ: size = 16; break; case SLI4_QTYPE_MQ: size = 256; break; case SLI4_QTYPE_WQ: size = sli4->wqe_size; break; case SLI4_QTYPE_RQ: size = SLI4_RQE_SIZE; break; default: efc_log_info(sli4, "unknown queue type %d\n", qtype); return -1; } return size; } int sli_queue_alloc(struct sli4 *sli4, u32 qtype, struct sli4_queue *q, u32 n_entries, struct sli4_queue *assoc) { int size; u32 align = 0; /* get queue size */ size = sli_get_queue_entry_size(sli4, qtype); if (size < 0) return -EIO; align = SLI_PAGE_SIZE; if (__sli_queue_init(sli4, q, qtype, size, n_entries, align)) return -EIO; switch (qtype) { case SLI4_QTYPE_EQ: if (!sli_cmd_common_create_eq(sli4, sli4->bmbx.virt, &q->dma) && !__sli_create_queue(sli4, q)) return 0; break; case SLI4_QTYPE_CQ: if (!sli_cmd_common_create_cq(sli4, sli4->bmbx.virt, &q->dma, assoc ? assoc->id : 0) && !__sli_create_queue(sli4, q)) return 0; break; case SLI4_QTYPE_MQ: assoc->u.flag |= SLI4_QUEUE_FLAG_MQ; if (!sli_cmd_common_create_mq_ext(sli4, sli4->bmbx.virt, &q->dma, assoc->id) && !__sli_create_queue(sli4, q)) return 0; break; case SLI4_QTYPE_WQ: if (!sli_cmd_wq_create(sli4, sli4->bmbx.virt, &q->dma, assoc ? assoc->id : 0) && !__sli_create_queue(sli4, q)) return 0; break; default: efc_log_info(sli4, "unknown queue type %d\n", qtype); } __sli_queue_destroy(sli4, q); return -EIO; } static int sli_cmd_cq_set_create(struct sli4 *sli4, struct sli4_queue *qs[], u32 num_cqs, struct sli4_queue *eqs[], struct efc_dma *dma) { struct sli4_rqst_cmn_create_cq_set_v0 *req = NULL; uintptr_t addr; u32 i, offset = 0, page_bytes = 0, payload_size; u32 p = 0, page_size = 0, n_cqe = 0, num_pages_cq; u32 dw5_flags = 0; u16 dw6w1_flags = 0; __le32 req_len; n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES; switch (n_cqe) { case 256: case 512: case 1024: case 2048: page_size = 1; break; case 4096: page_size = 2; break; default: return -EIO; } page_bytes = page_size * SLI_PAGE_SIZE; num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes); payload_size = max(SLI4_RQST_CMDSZ(cmn_create_cq_set_v0) + (SZ_DMAADDR * num_pages_cq * num_cqs), sizeof(struct sli4_rsp_cmn_create_queue_set)); dma->size = payload_size; dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size, &dma->phys, GFP_KERNEL); if (!dma->virt) return -EIO; memset(dma->virt, 0, payload_size); req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma); if (!req) return -EIO; req_len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_set_v0, SZ_DMAADDR * num_pages_cq * num_cqs); sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_CREATE_CQ_SET, SLI4_SUBSYSTEM_FC, CMD_V0, req_len); req->page_size = page_size; req->num_pages = cpu_to_le16(num_pages_cq); switch (num_pages_cq) { case 1: dw5_flags |= SLI4_CQ_CNT_VAL(256); break; case 2: dw5_flags |= SLI4_CQ_CNT_VAL(512); break; case 4: dw5_flags |= SLI4_CQ_CNT_VAL(1024); break; case 8: dw5_flags |= SLI4_CQ_CNT_VAL(LARGE); dw6w1_flags |= (n_cqe & SLI4_CREATE_CQSETV0_CQE_COUNT); break; default: efc_log_info(sli4, "num_pages %d not valid\n", num_pages_cq); return -EIO; } dw5_flags |= SLI4_CREATE_CQSETV0_EVT; dw5_flags |= SLI4_CREATE_CQSETV0_VALID; if (sli4->if_type == SLI4_INTF_IF_TYPE_6) dw5_flags |= SLI4_CREATE_CQSETV0_AUTOVALID; dw6w1_flags &= ~SLI4_CREATE_CQSETV0_ARM; req->dw5_flags = cpu_to_le32(dw5_flags); req->dw6w1_flags = cpu_to_le16(dw6w1_flags); req->num_cq_req = cpu_to_le16(num_cqs); /* Fill page addresses of all the CQs. */ for (i = 0; i < num_cqs; i++) { req->eq_id[i] = cpu_to_le16(eqs[i]->id); for (p = 0, addr = qs[i]->dma.phys; p < num_pages_cq; p++, addr += page_bytes) { req->page_phys_addr[offset].low = cpu_to_le32(lower_32_bits(addr)); req->page_phys_addr[offset].high = cpu_to_le32(upper_32_bits(addr)); offset++; } } return 0; } int sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[], u32 num_cqs, u32 n_entries, struct sli4_queue *eqs[]) { u32 i; struct efc_dma dma = {0}; struct sli4_rsp_cmn_create_queue_set *res; void __iomem *db_regaddr; /* Align the queue DMA memory */ for (i = 0; i < num_cqs; i++) { if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_CQ, SLI4_CQE_BYTES, n_entries, SLI_PAGE_SIZE)) goto error; } if (sli_cmd_cq_set_create(sli4, qs, num_cqs, eqs, &dma)) goto error; if (sli_bmbx_command(sli4)) goto error; if (sli4->if_type == SLI4_INTF_IF_TYPE_6) db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG; else db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG; res = dma.virt; if (res->hdr.status) { efc_log_err(sli4, "bad create CQSet status=%#x addl=%#x\n", res->hdr.status, res->hdr.additional_status); goto error; } /* Check if we got all requested CQs. */ if (le16_to_cpu(res->num_q_allocated) != num_cqs) { efc_log_crit(sli4, "Requested count CQs doesn't match.\n"); goto error; } /* Fill the resp cq ids. */ for (i = 0; i < num_cqs; i++) { qs[i]->id = le16_to_cpu(res->q_id) + i; qs[i]->db_regaddr = db_regaddr; } dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys); return 0; error: for (i = 0; i < num_cqs; i++) __sli_queue_destroy(sli4, qs[i]); if (dma.virt) dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys); return -EIO; } static int sli_cmd_common_destroy_q(struct sli4 *sli4, u8 opc, u8 subsystem, u16 q_id) { struct sli4_rqst_cmn_destroy_q *req; /* Payload length must accommodate both request and response */ req = sli_config_cmd_init(sli4, sli4->bmbx.virt, SLI4_CFG_PYLD_LENGTH(cmn_destroy_q), NULL); if (!req) return -EIO; sli_cmd_fill_hdr(&req->hdr, opc, subsystem, CMD_V0, SLI4_RQST_PYLD_LEN(cmn_destroy_q)); req->q_id = cpu_to_le16(q_id); return 0; } int sli_queue_free(struct sli4 *sli4, struct sli4_queue *q, u32 destroy_queues, u32 free_memory) { int rc = 0; u8 opcode, subsystem; struct sli4_rsp_hdr *res; if (!q) { efc_log_err(sli4, "bad parameter sli4=%p q=%p\n", sli4, q); return -EIO; } if (!destroy_queues) goto free_mem; switch (q->type) { case SLI4_QTYPE_EQ: opcode = SLI4_CMN_DESTROY_EQ; subsystem = SLI4_SUBSYSTEM_COMMON; break; case SLI4_QTYPE_CQ: opcode = SLI4_CMN_DESTROY_CQ; subsystem = SLI4_SUBSYSTEM_COMMON; break; case SLI4_QTYPE_MQ: opcode = SLI4_CMN_DESTROY_MQ; subsystem = SLI4_SUBSYSTEM_COMMON; break; case SLI4_QTYPE_WQ: opcode = SLI4_OPC_WQ_DESTROY; subsystem = SLI4_SUBSYSTEM_FC; break; case SLI4_QTYPE_RQ: opcode = SLI4_OPC_RQ_DESTROY; subsystem = SLI4_SUBSYSTEM_FC; break; default: efc_log_info(sli4, "bad queue type %d\n", q->type); rc = -EIO; goto free_mem; } rc = sli_cmd_common_destroy_q(sli4, opcode, subsystem, q->id); if (rc) goto free_mem; rc = sli_bmbx_command(sli4); if (rc) goto free_mem; rc = sli_res_sli_config(sli4, sli4->bmbx.virt); if (rc) goto free_mem; res = (void *)((u8 *)sli4->bmbx.virt + offsetof(struct sli4_cmd_sli_config, payload)); if (res->status) { efc_log_err(sli4, "destroy %s st=%#x addl=%#x\n", SLI4_QNAME[q->type], res->status, res->additional_status); rc = -EIO; goto free_mem; } free_mem: if (free_memory) __sli_queue_destroy(sli4, q); return rc; } int sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm) { u32 val; unsigned long flags = 0; u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM; spin_lock_irqsave(&q->lock, flags); if (sli4->if_type == SLI4_INTF_IF_TYPE_6) val = sli_format_if6_eq_db_data(q->n_posted, q->id, a); else val = sli_format_eq_db_data(q->n_posted, q->id, a); writel(val, q->db_regaddr); q->n_posted = 0; spin_unlock_irqrestore(&q->lock, flags); return 0; } int sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm) { u32 val = 0; unsigned long flags = 0; u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM; spin_lock_irqsave(&q->lock, flags); switch (q->type) { case SLI4_QTYPE_EQ: if (sli4->if_type == SLI4_INTF_IF_TYPE_6) val = sli_format_if6_eq_db_data(q->n_posted, q->id, a); else val = sli_format_eq_db_data(q->n_posted, q->id, a); writel(val, q->db_regaddr); q->n_posted = 0; break; case SLI4_QTYPE_CQ: if (sli4->if_type == SLI4_INTF_IF_TYPE_6) val = sli_format_if6_cq_db_data(q->n_posted, q->id, a); else val = sli_format_cq_db_data(q->n_posted, q->id, a); writel(val, q->db_regaddr); q->n_posted = 0; break; default: efc_log_info(sli4, "should only be used for EQ/CQ, not %s\n", SLI4_QNAME[q->type]); } spin_unlock_irqrestore(&q->lock, flags); return 0; } int sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) { u8 *qe = q->dma.virt; u32 qindex; u32 val = 0; qindex = q->index; qe += q->index * q->size; if (sli4->params.perf_wq_id_association) sli_set_wq_id_association(entry, q->id); memcpy(qe, entry, q->size); val = sli_format_wq_db_data(q->id); writel(val, q->db_regaddr); q->index = (q->index + 1) & (q->length - 1); return qindex; } int sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) { u8 *qe = q->dma.virt; u32 qindex; u32 val = 0; unsigned long flags; spin_lock_irqsave(&q->lock, flags); qindex = q->index; qe += q->index * q->size; memcpy(qe, entry, q->size); val = sli_format_mq_db_data(q->id); writel(val, q->db_regaddr); q->index = (q->index + 1) & (q->length - 1); spin_unlock_irqrestore(&q->lock, flags); return qindex; } int sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) { u8 *qe = q->dma.virt; u32 qindex; u32 val = 0; qindex = q->index; qe += q->index * q->size; memcpy(qe, entry, q->size); /* * In RQ-pair, an RQ either contains the FC header * (i.e. is_hdr == TRUE) or the payload. * * Don't ring doorbell for payload RQ */ if (!(q->u.flag & SLI4_QUEUE_FLAG_HDR)) goto skip; val = sli_format_rq_db_data(q->id); writel(val, q->db_regaddr); skip: q->index = (q->index + 1) & (q->length - 1); return qindex; } int sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) { u8 *qe = q->dma.virt; unsigned long flags = 0; u16 wflags = 0; spin_lock_irqsave(&q->lock, flags); qe += q->index * q->size; /* Check if eqe is valid */ wflags = le16_to_cpu(((struct sli4_eqe *)qe)->dw0w0_flags); if ((wflags & SLI4_EQE_VALID) != q->phase) { spin_unlock_irqrestore(&q->lock, flags); return -EIO; } if (sli4->if_type != SLI4_INTF_IF_TYPE_6) { wflags &= ~SLI4_EQE_VALID; ((struct sli4_eqe *)qe)->dw0w0_flags = cpu_to_le16(wflags); } memcpy(entry, qe, q->size); q->index = (q->index + 1) & (q->length - 1); q->n_posted++; /* * For prism, the phase value will be used * to check the validity of eq/cq entries. * The value toggles after a complete sweep * through the queue. */ if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0) q->phase ^= (u16)0x1; spin_unlock_irqrestore(&q->lock, flags); return 0; } int sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) { u8 *qe = q->dma.virt; unsigned long flags = 0; u32 dwflags = 0; bool valid_bit_set; spin_lock_irqsave(&q->lock, flags); qe += q->index * q->size; /* Check if cqe is valid */ dwflags = le32_to_cpu(((struct sli4_mcqe *)qe)->dw3_flags); valid_bit_set = (dwflags & SLI4_MCQE_VALID) != 0; if (valid_bit_set != q->phase) { spin_unlock_irqrestore(&q->lock, flags); return -EIO; } if (sli4->if_type != SLI4_INTF_IF_TYPE_6) { dwflags &= ~SLI4_MCQE_VALID; ((struct sli4_mcqe *)qe)->dw3_flags = cpu_to_le32(dwflags); } memcpy(entry, qe, q->size); q->index = (q->index + 1) & (q->length - 1); q->n_posted++; /* * For prism, the phase value will be used * to check the validity of eq/cq entries. * The value toggles after a complete sweep * through the queue. */ if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0) q->phase ^= (u16)0x1; spin_unlock_irqrestore(&q->lock, flags); return 0; } int sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) { u8 *qe = q->dma.virt; unsigned long flags = 0; spin_lock_irqsave(&q->lock, flags); qe += q->u.r_idx * q->size; /* Check if mqe is valid */ if (q->index == q->u.r_idx) { spin_unlock_irqrestore(&q->lock, flags); return -EIO; } memcpy(entry, qe, q->size); q->u.r_idx = (q->u.r_idx + 1) & (q->length - 1); spin_unlock_irqrestore(&q->lock, flags); return 0; } int sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id) { struct sli4_eqe *eqe = (void *)buf; int rc = 0; u16 flags = 0; u16 majorcode; u16 minorcode; if (!buf || !cq_id) { efc_log_err(sli4, "bad parameters sli4=%p buf=%p cq_id=%p\n", sli4, buf, cq_id); return -EIO; } flags = le16_to_cpu(eqe->dw0w0_flags); majorcode = (flags & SLI4_EQE_MJCODE) >> 1; minorcode = (flags & SLI4_EQE_MNCODE) >> 4; switch (majorcode) { case SLI4_MAJOR_CODE_STANDARD: *cq_id = le16_to_cpu(eqe->resource_id); break; case SLI4_MAJOR_CODE_SENTINEL: efc_log_info(sli4, "sentinel EQE\n"); rc = SLI4_EQE_STATUS_EQ_FULL; break; default: efc_log_info(sli4, "Unsupported EQE: major %x minor %x\n", majorcode, minorcode); rc = -EIO; } return rc; } int sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe, enum sli4_qentry *etype, u16 *q_id) { int rc = 0; if (!cq || !cqe || !etype) { efc_log_err(sli4, "bad params sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n", sli4, cq, cqe, etype, q_id); return -EINVAL; } /* Parse a CQ entry to retrieve the event type and the queue id */ if (cq->u.flag & SLI4_QUEUE_FLAG_MQ) { struct sli4_mcqe *mcqe = (void *)cqe; if (le32_to_cpu(mcqe->dw3_flags) & SLI4_MCQE_AE) { *etype = SLI4_QENTRY_ASYNC; } else { *etype = SLI4_QENTRY_MQ; rc = sli_cqe_mq(sli4, mcqe); } *q_id = -1; } else { rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id); } return rc; } int sli_abort_wqe(struct sli4 *sli, void *buf, enum sli4_abort_type type, bool send_abts, u32 ids, u32 mask, u16 tag, u16 cq_id) { struct sli4_abort_wqe *abort = buf; memset(buf, 0, sli->wqe_size); switch (type) { case SLI4_ABORT_XRI: abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG; if (mask) { efc_log_warn(sli, "%#x aborting XRI %#x warning non-zero mask", mask, ids); mask = 0; } break; case SLI4_ABORT_ABORT_ID: abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG; break; case SLI4_ABORT_REQUEST_ID: abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG; break; default: efc_log_info(sli, "unsupported type %#x\n", type); return -EIO; } abort->ia_ir_byte |= send_abts ? 0 : 1; /* Suppress ABTS retries */ abort->ia_ir_byte |= SLI4_ABRT_WQE_IR; abort->t_mask = cpu_to_le32(mask); abort->t_tag = cpu_to_le32(ids); abort->command = SLI4_WQE_ABORT; abort->request_tag = cpu_to_le16(tag); abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD); abort->cq_id = cpu_to_le16(cq_id); abort->cmdtype_wqec_byte |= SLI4_CMD_ABORT_WQE; return 0; } int sli_els_request64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, struct sli_els_params *params) { struct sli4_els_request64_wqe *els = buf; struct sli4_sge *sge = sgl->virt; bool is_fabric = false; struct sli4_bde *bptr; memset(buf, 0, sli->wqe_size); bptr = &els->els_request_payload; if (sli->params.sgl_pre_registered) { els->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_REQ_WQE_XBL; els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_DBDE; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (params->xmit_len & SLI4_BDE_LEN_MASK)); bptr->u.data.low = sge[0].buffer_address_low; bptr->u.data.high = sge[0].buffer_address_high; } else { els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_XBL; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | ((2 * sizeof(struct sli4_sge)) & SLI4_BDE_LEN_MASK)); bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); } els->els_request_payload_length = cpu_to_le32(params->xmit_len); els->max_response_payload_length = cpu_to_le32(params->rsp_len); els->xri_tag = cpu_to_le16(params->xri); els->timer = params->timeout; els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3; els->command = SLI4_WQE_ELS_REQUEST64; els->request_tag = cpu_to_le16(params->tag); els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_IOD; els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_QOSD; /* figure out the ELS_ID value from the request buffer */ switch (params->cmd) { case ELS_LOGO: els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_LOGO << SLI4_REQ_WQE_ELSID_SHFT; if (params->rpi_registered) { els->ct_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_REQ_WQE_CT_SHFT; els->context_tag = cpu_to_le16(params->rpi); } else { els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; els->context_tag = cpu_to_le16(params->vpi); } if (params->d_id == FC_FID_FLOGI) is_fabric = true; break; case ELS_FDISC: if (params->d_id == FC_FID_FLOGI) is_fabric = true; if (params->s_id == 0) { els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_FDISC << SLI4_REQ_WQE_ELSID_SHFT; is_fabric = true; } else { els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT; } els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; els->context_tag = cpu_to_le16(params->vpi); els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT); break; case ELS_FLOGI: els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; els->context_tag = cpu_to_le16(params->vpi); /* * Set SP here ... we haven't done a REG_VPI yet * need to maybe not set this when we have * completed VFI/VPI registrations ... * * Use the FC_ID of the SPORT if it has been allocated, * otherwise use an S_ID of zero. */ els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT); if (params->s_id != U32_MAX) els->sid_sp_dword |= cpu_to_le32(params->s_id); break; case ELS_PLOGI: els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_PLOGI << SLI4_REQ_WQE_ELSID_SHFT; els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; els->context_tag = cpu_to_le16(params->vpi); break; case ELS_SCR: els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT; els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; els->context_tag = cpu_to_le16(params->vpi); break; default: els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT; if (params->rpi_registered) { els->ct_byte |= (SLI4_GENERIC_CONTEXT_RPI << SLI4_REQ_WQE_CT_SHFT); els->context_tag = cpu_to_le16(params->vpi); } else { els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; els->context_tag = cpu_to_le16(params->vpi); } break; } if (is_fabric) els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_FABRIC; else els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_NON_FABRIC; els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) != SLI4_GENERIC_CONTEXT_RPI) els->remote_id_dword = cpu_to_le32(params->d_id); if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) == SLI4_GENERIC_CONTEXT_VPI) els->temporary_rpi = cpu_to_le16(params->rpi); return 0; } int sli_fcp_icmnd64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, u16 xri, u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 timeout) { struct sli4_fcp_icmnd64_wqe *icmnd = buf; struct sli4_sge *sge = NULL; struct sli4_bde *bptr; u32 len; memset(buf, 0, sli->wqe_size); if (!sgl || !sgl->virt) { efc_log_err(sli, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -EIO; } sge = sgl->virt; bptr = &icmnd->bde; if (sli->params.sgl_pre_registered) { icmnd->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_ICMD_WQE_XBL; icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_DBDE; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (le32_to_cpu(sge[0].buffer_length) & SLI4_BDE_LEN_MASK)); bptr->u.data.low = sge[0].buffer_address_low; bptr->u.data.high = sge[0].buffer_address_high; } else { icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_XBL; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | (sgl->size & SLI4_BDE_LEN_MASK)); bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); } len = le32_to_cpu(sge[0].buffer_length) + le32_to_cpu(sge[1].buffer_length); icmnd->payload_offset_length = cpu_to_le16(len); icmnd->xri_tag = cpu_to_le16(xri); icmnd->context_tag = cpu_to_le16(rpi); icmnd->timer = timeout; /* WQE word 4 contains read transfer length */ icmnd->class_pu_byte |= 2 << SLI4_ICMD_WQE_PU_SHFT; icmnd->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3; icmnd->command = SLI4_WQE_FCP_ICMND64; icmnd->dif_ct_bs_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_ICMD_WQE_CT_SHFT; icmnd->abort_tag = cpu_to_le32(xri); icmnd->request_tag = cpu_to_le16(tag); icmnd->len_loc1_byte |= SLI4_ICMD_WQE_LEN_LOC_BIT1; icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_LEN_LOC_BIT2; icmnd->cmd_type_byte |= SLI4_CMD_FCP_ICMND64_WQE; icmnd->cq_id = cpu_to_le16(cq_id); return 0; } int sli_fcp_iread64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, u32 first_data_sge, u32 xfer_len, u16 xri, u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 dif, u8 bs, u8 timeout) { struct sli4_fcp_iread64_wqe *iread = buf; struct sli4_sge *sge = NULL; struct sli4_bde *bptr; u32 sge_flags, len; memset(buf, 0, sli->wqe_size); if (!sgl || !sgl->virt) { efc_log_err(sli, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -EIO; } sge = sgl->virt; bptr = &iread->bde; if (sli->params.sgl_pre_registered) { iread->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IR_WQE_XBL; iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_DBDE; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (le32_to_cpu(sge[0].buffer_length) & SLI4_BDE_LEN_MASK)); bptr->u.blp.low = sge[0].buffer_address_low; bptr->u.blp.high = sge[0].buffer_address_high; } else { iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_XBL; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | (sgl->size & SLI4_BDE_LEN_MASK)); bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); /* * fill out fcp_cmnd buffer len and change resp buffer to be of * type "skip" (note: response will still be written to sge[1] * if necessary) */ len = le32_to_cpu(sge[0].buffer_length); iread->fcp_cmd_buffer_length = cpu_to_le16(len); sge_flags = le32_to_cpu(sge[1].dw2_flags); sge_flags &= (~SLI4_SGE_TYPE_MASK); sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT); sge[1].dw2_flags = cpu_to_le32(sge_flags); } len = le32_to_cpu(sge[0].buffer_length) + le32_to_cpu(sge[1].buffer_length); iread->payload_offset_length = cpu_to_le16(len); iread->total_transfer_length = cpu_to_le32(xfer_len); iread->xri_tag = cpu_to_le16(xri); iread->context_tag = cpu_to_le16(rpi); iread->timer = timeout; /* WQE word 4 contains read transfer length */ iread->class_pu_byte |= 2 << SLI4_IR_WQE_PU_SHFT; iread->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3; iread->command = SLI4_WQE_FCP_IREAD64; iread->dif_ct_bs_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_IR_WQE_CT_SHFT; iread->dif_ct_bs_byte |= dif; iread->dif_ct_bs_byte |= bs << SLI4_IR_WQE_BS_SHFT; iread->abort_tag = cpu_to_le32(xri); iread->request_tag = cpu_to_le16(tag); iread->len_loc1_byte |= SLI4_IR_WQE_LEN_LOC_BIT1; iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_LEN_LOC_BIT2; iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_IOD; iread->cmd_type_byte |= SLI4_CMD_FCP_IREAD64_WQE; iread->cq_id = cpu_to_le16(cq_id); if (sli->params.perf_hint) { bptr = &iread->first_data_bde; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (le32_to_cpu(sge[first_data_sge].buffer_length) & SLI4_BDE_LEN_MASK)); bptr->u.data.low = sge[first_data_sge].buffer_address_low; bptr->u.data.high = sge[first_data_sge].buffer_address_high; } return 0; } int sli_fcp_iwrite64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, u32 first_data_sge, u32 xfer_len, u32 first_burst, u16 xri, u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 dif, u8 bs, u8 timeout) { struct sli4_fcp_iwrite64_wqe *iwrite = buf; struct sli4_sge *sge = NULL; struct sli4_bde *bptr; u32 sge_flags, min, len; memset(buf, 0, sli->wqe_size); if (!sgl || !sgl->virt) { efc_log_err(sli, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -EIO; } sge = sgl->virt; bptr = &iwrite->bde; if (sli->params.sgl_pre_registered) { iwrite->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IWR_WQE_XBL; iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_DBDE; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (le32_to_cpu(sge[0].buffer_length) & SLI4_BDE_LEN_MASK)); bptr->u.data.low = sge[0].buffer_address_low; bptr->u.data.high = sge[0].buffer_address_high; } else { iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_XBL; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (sgl->size & SLI4_BDE_LEN_MASK)); bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); /* * fill out fcp_cmnd buffer len and change resp buffer to be of * type "skip" (note: response will still be written to sge[1] * if necessary) */ len = le32_to_cpu(sge[0].buffer_length); iwrite->fcp_cmd_buffer_length = cpu_to_le16(len); sge_flags = le32_to_cpu(sge[1].dw2_flags); sge_flags &= ~SLI4_SGE_TYPE_MASK; sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT); sge[1].dw2_flags = cpu_to_le32(sge_flags); } len = le32_to_cpu(sge[0].buffer_length) + le32_to_cpu(sge[1].buffer_length); iwrite->payload_offset_length = cpu_to_le16(len); iwrite->total_transfer_length = cpu_to_le16(xfer_len); min = (xfer_len < first_burst) ? xfer_len : first_burst; iwrite->initial_transfer_length = cpu_to_le16(min); iwrite->xri_tag = cpu_to_le16(xri); iwrite->context_tag = cpu_to_le16(rpi); iwrite->timer = timeout; /* WQE word 4 contains read transfer length */ iwrite->class_pu_byte |= 2 << SLI4_IWR_WQE_PU_SHFT; iwrite->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3; iwrite->command = SLI4_WQE_FCP_IWRITE64; iwrite->dif_ct_bs_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_IWR_WQE_CT_SHFT; iwrite->dif_ct_bs_byte |= dif; iwrite->dif_ct_bs_byte |= bs << SLI4_IWR_WQE_BS_SHFT; iwrite->abort_tag = cpu_to_le32(xri); iwrite->request_tag = cpu_to_le16(tag); iwrite->len_loc1_byte |= SLI4_IWR_WQE_LEN_LOC_BIT1; iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_LEN_LOC_BIT2; iwrite->cmd_type_byte |= SLI4_CMD_FCP_IWRITE64_WQE; iwrite->cq_id = cpu_to_le16(cq_id); if (sli->params.perf_hint) { bptr = &iwrite->first_data_bde; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (le32_to_cpu(sge[first_data_sge].buffer_length) & SLI4_BDE_LEN_MASK)); bptr->u.data.low = sge[first_data_sge].buffer_address_low; bptr->u.data.high = sge[first_data_sge].buffer_address_high; } return 0; } int sli_fcp_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, u32 first_data_sge, u16 cq_id, u8 dif, u8 bs, struct sli_fcp_tgt_params *params) { struct sli4_fcp_treceive64_wqe *trecv = buf; struct sli4_fcp_128byte_wqe *trecv_128 = buf; struct sli4_sge *sge = NULL; struct sli4_bde *bptr; memset(buf, 0, sli->wqe_size); if (!sgl || !sgl->virt) { efc_log_err(sli, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -EIO; } sge = sgl->virt; bptr = &trecv->bde; if (sli->params.sgl_pre_registered) { trecv->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_TRCV_WQE_XBL; trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (le32_to_cpu(sge[0].buffer_length) & SLI4_BDE_LEN_MASK)); bptr->u.data.low = sge[0].buffer_address_low; bptr->u.data.high = sge[0].buffer_address_high; trecv->payload_offset_length = sge[0].buffer_length; } else { trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_XBL; /* if data is a single physical address, use a BDE */ if (!dif && params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) { trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (le32_to_cpu(sge[2].buffer_length) & SLI4_BDE_LEN_MASK)); bptr->u.data.low = sge[2].buffer_address_low; bptr->u.data.high = sge[2].buffer_address_high; } else { bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | (sgl->size & SLI4_BDE_LEN_MASK)); bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); } } trecv->relative_offset = cpu_to_le32(params->offset); if (params->flags & SLI4_IO_CONTINUATION) trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_XC; trecv->xri_tag = cpu_to_le16(params->xri); trecv->context_tag = cpu_to_le16(params->rpi); /* WQE uses relative offset */ trecv->class_ar_pu_byte |= 1 << SLI4_TRCV_WQE_PU_SHFT; if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) trecv->class_ar_pu_byte |= SLI4_TRCV_WQE_AR; trecv->command = SLI4_WQE_FCP_TRECEIVE64; trecv->class_ar_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3; trecv->dif_ct_bs_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_TRCV_WQE_CT_SHFT; trecv->dif_ct_bs_byte |= bs << SLI4_TRCV_WQE_BS_SHFT; trecv->remote_xid = cpu_to_le16(params->ox_id); trecv->request_tag = cpu_to_le16(params->tag); trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_IOD; trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_LEN_LOC_BIT2; trecv->cmd_type_byte |= SLI4_CMD_FCP_TRECEIVE64_WQE; trecv->cq_id = cpu_to_le16(cq_id); trecv->fcp_data_receive_length = cpu_to_le32(params->xmit_len); if (sli->params.perf_hint) { bptr = &trecv->first_data_bde; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (le32_to_cpu(sge[first_data_sge].buffer_length) & SLI4_BDE_LEN_MASK)); bptr->u.data.low = sge[first_data_sge].buffer_address_low; bptr->u.data.high = sge[first_data_sge].buffer_address_high; } /* The upper 7 bits of csctl is the priority */ if (params->cs_ctl & SLI4_MASK_CCP) { trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_CCPE; trecv->ccp = (params->cs_ctl & SLI4_MASK_CCP); } if (params->app_id && sli->wqe_size == SLI4_WQE_EXT_BYTES && !(trecv->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) { trecv->lloc1_appid |= SLI4_TRCV_WQE_APPID; trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_WQES; trecv_128->dw[31] = params->app_id; } return 0; } int sli_fcp_cont_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, u32 first_data_sge, u16 sec_xri, u16 cq_id, u8 dif, u8 bs, struct sli_fcp_tgt_params *params) { int rc; rc = sli_fcp_treceive64_wqe(sli, buf, sgl, first_data_sge, cq_id, dif, bs, params); if (!rc) { struct sli4_fcp_treceive64_wqe *trecv = buf; trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64; trecv->dword5.sec_xri_tag = cpu_to_le16(sec_xri); } return rc; } int sli_fcp_trsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, u16 cq_id, u8 port_owned, struct sli_fcp_tgt_params *params) { struct sli4_fcp_trsp64_wqe *trsp = buf; struct sli4_fcp_128byte_wqe *trsp_128 = buf; memset(buf, 0, sli4->wqe_size); if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) { trsp->class_ag_byte |= SLI4_TRSP_WQE_AG; } else { struct sli4_sge *sge = sgl->virt; struct sli4_bde *bptr; if (sli4->params.sgl_pre_registered || port_owned) trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_DBDE; else trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_XBL; bptr = &trsp->bde; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (le32_to_cpu(sge[0].buffer_length) & SLI4_BDE_LEN_MASK)); bptr->u.data.low = sge[0].buffer_address_low; bptr->u.data.high = sge[0].buffer_address_high; trsp->fcp_response_length = cpu_to_le32(params->xmit_len); } if (params->flags & SLI4_IO_CONTINUATION) trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_XC; trsp->xri_tag = cpu_to_le16(params->xri); trsp->rpi = cpu_to_le16(params->rpi); trsp->command = SLI4_WQE_FCP_TRSP64; trsp->class_ag_byte |= SLI4_GENERIC_CLASS_CLASS_3; trsp->remote_xid = cpu_to_le16(params->ox_id); trsp->request_tag = cpu_to_le16(params->tag); if (params->flags & SLI4_IO_DNRX) trsp->ct_dnrx_byte |= SLI4_TRSP_WQE_DNRX; else trsp->ct_dnrx_byte &= ~SLI4_TRSP_WQE_DNRX; trsp->lloc1_appid |= 0x1; trsp->cq_id = cpu_to_le16(cq_id); trsp->cmd_type_byte = SLI4_CMD_FCP_TRSP64_WQE; /* The upper 7 bits of csctl is the priority */ if (params->cs_ctl & SLI4_MASK_CCP) { trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_CCPE; trsp->ccp = (params->cs_ctl & SLI4_MASK_CCP); } if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES && !(trsp->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) { trsp->lloc1_appid |= SLI4_TRSP_WQE_APPID; trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_WQES; trsp_128->dw[31] = params->app_id; } return 0; } int sli_fcp_tsend64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, u32 first_data_sge, u16 cq_id, u8 dif, u8 bs, struct sli_fcp_tgt_params *params) { struct sli4_fcp_tsend64_wqe *tsend = buf; struct sli4_fcp_128byte_wqe *tsend_128 = buf; struct sli4_sge *sge = NULL; struct sli4_bde *bptr; memset(buf, 0, sli4->wqe_size); if (!sgl || !sgl->virt) { efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -EIO; } sge = sgl->virt; bptr = &tsend->bde; if (sli4->params.sgl_pre_registered) { tsend->ll_qd_xbl_hlm_iod_dbde &= ~SLI4_TSEND_WQE_XBL; tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (le32_to_cpu(sge[2].buffer_length) & SLI4_BDE_LEN_MASK)); /* TSEND64_WQE specifies first two SGE are skipped (3rd is * valid) */ bptr->u.data.low = sge[2].buffer_address_low; bptr->u.data.high = sge[2].buffer_address_high; } else { tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_XBL; /* if data is a single physical address, use a BDE */ if (!dif && params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) { tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (le32_to_cpu(sge[2].buffer_length) & SLI4_BDE_LEN_MASK)); /* * TSEND64_WQE specifies first two SGE are skipped * (i.e. 3rd is valid) */ bptr->u.data.low = sge[2].buffer_address_low; bptr->u.data.high = sge[2].buffer_address_high; } else { bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | (sgl->size & SLI4_BDE_LEN_MASK)); bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); } } tsend->relative_offset = cpu_to_le32(params->offset); if (params->flags & SLI4_IO_CONTINUATION) tsend->dw10byte2 |= SLI4_TSEND_XC; tsend->xri_tag = cpu_to_le16(params->xri); tsend->rpi = cpu_to_le16(params->rpi); /* WQE uses relative offset */ tsend->class_pu_ar_byte |= 1 << SLI4_TSEND_WQE_PU_SHFT; if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) tsend->class_pu_ar_byte |= SLI4_TSEND_WQE_AR; tsend->command = SLI4_WQE_FCP_TSEND64; tsend->class_pu_ar_byte |= SLI4_GENERIC_CLASS_CLASS_3; tsend->ct_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_TSEND_CT_SHFT; tsend->ct_byte |= dif; tsend->ct_byte |= bs << SLI4_TSEND_BS_SHFT; tsend->remote_xid = cpu_to_le16(params->ox_id); tsend->request_tag = cpu_to_le16(params->tag); tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_LEN_LOC_BIT2; tsend->cq_id = cpu_to_le16(cq_id); tsend->cmd_type_byte |= SLI4_CMD_FCP_TSEND64_WQE; tsend->fcp_data_transmit_length = cpu_to_le32(params->xmit_len); if (sli4->params.perf_hint) { bptr = &tsend->first_data_bde; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (le32_to_cpu(sge[first_data_sge].buffer_length) & SLI4_BDE_LEN_MASK)); bptr->u.data.low = sge[first_data_sge].buffer_address_low; bptr->u.data.high = sge[first_data_sge].buffer_address_high; } /* The upper 7 bits of csctl is the priority */ if (params->cs_ctl & SLI4_MASK_CCP) { tsend->dw10byte2 |= SLI4_TSEND_CCPE; tsend->ccp = (params->cs_ctl & SLI4_MASK_CCP); } if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES && !(tsend->dw10byte2 & SLI4_TSEND_EAT)) { tsend->dw10byte0 |= SLI4_TSEND_APPID_VALID; tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQES; tsend_128->dw[31] = params->app_id; } return 0; } int sli_gen_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, struct sli_ct_params *params) { struct sli4_gen_request64_wqe *gen = buf; struct sli4_sge *sge = NULL; struct sli4_bde *bptr; memset(buf, 0, sli4->wqe_size); if (!sgl || !sgl->virt) { efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n", sgl, sgl ? sgl->virt : NULL); return -EIO; } sge = sgl->virt; bptr = &gen->bde; if (sli4->params.sgl_pre_registered) { gen->dw10flags1 &= ~SLI4_GEN_REQ64_WQE_XBL; gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_DBDE; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (params->xmit_len & SLI4_BDE_LEN_MASK)); bptr->u.data.low = sge[0].buffer_address_low; bptr->u.data.high = sge[0].buffer_address_high; } else { gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_XBL; bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | ((2 * sizeof(struct sli4_sge)) & SLI4_BDE_LEN_MASK)); bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); } gen->request_payload_length = cpu_to_le32(params->xmit_len); gen->max_response_payload_length = cpu_to_le32(params->rsp_len); gen->df_ctl = params->df_ctl; gen->type = params->type; gen->r_ctl = params->r_ctl; gen->xri_tag = cpu_to_le16(params->xri); gen->ct_byte = SLI4_GENERIC_CONTEXT_RPI << SLI4_GEN_REQ64_CT_SHFT; gen->context_tag = cpu_to_le16(params->rpi); gen->class_byte = SLI4_GENERIC_CLASS_CLASS_3; gen->command = SLI4_WQE_GEN_REQUEST64; gen->timer = params->timeout; gen->request_tag = cpu_to_le16(params->tag); gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_IOD; gen->dw10flags0 |= SLI4_GEN_REQ64_WQE_QOSD; gen->cmd_type_byte = SLI4_CMD_GEN_REQUEST64_WQE; gen->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); return 0; } int sli_send_frame_wqe(struct sli4 *sli, void *buf, u8 sof, u8 eof, u32 *hdr, struct efc_dma *payload, u32 req_len, u8 timeout, u16 xri, u16 req_tag) { struct sli4_send_frame_wqe *sf = buf; memset(buf, 0, sli->wqe_size); sf->dw10flags1 |= SLI4_SF_WQE_DBDE; sf->bde.bde_type_buflen = cpu_to_le32(req_len & SLI4_BDE_LEN_MASK); sf->bde.u.data.low = cpu_to_le32(lower_32_bits(payload->phys)); sf->bde.u.data.high = cpu_to_le32(upper_32_bits(payload->phys)); /* Copy FC header */ sf->fc_header_0_1[0] = cpu_to_le32(hdr[0]); sf->fc_header_0_1[1] = cpu_to_le32(hdr[1]); sf->fc_header_2_5[0] = cpu_to_le32(hdr[2]); sf->fc_header_2_5[1] = cpu_to_le32(hdr[3]); sf->fc_header_2_5[2] = cpu_to_le32(hdr[4]); sf->fc_header_2_5[3] = cpu_to_le32(hdr[5]); sf->frame_length = cpu_to_le32(req_len); sf->xri_tag = cpu_to_le16(xri); sf->dw7flags0 &= ~SLI4_SF_PU; sf->context_tag = 0; sf->ct_byte &= ~SLI4_SF_CT; sf->command = SLI4_WQE_SEND_FRAME; sf->dw7flags0 |= SLI4_GENERIC_CLASS_CLASS_3; sf->timer = timeout; sf->request_tag = cpu_to_le16(req_tag); sf->eof = eof; sf->sof = sof; sf->dw10flags1 &= ~SLI4_SF_QOSD; sf->dw10flags0 |= SLI4_SF_LEN_LOC_BIT1; sf->dw10flags2 &= ~SLI4_SF_XC; sf->dw10flags1 |= SLI4_SF_XBL; sf->cmd_type_byte |= SLI4_CMD_SEND_FRAME_WQE; sf->cq_id = cpu_to_le16(0xffff); return 0; } int sli_xmit_bls_rsp64_wqe(struct sli4 *sli, void *buf, struct sli_bls_payload *payload, struct sli_bls_params *params) { struct sli4_xmit_bls_rsp_wqe *bls = buf; u32 dw_ridflags = 0; /* * Callers can either specify RPI or S_ID, but not both */ if (params->rpi_registered && params->s_id != U32_MAX) { efc_log_info(sli, "S_ID specified for attached remote node %d\n", params->rpi); return -EIO; } memset(buf, 0, sli->wqe_size); if (payload->type == SLI4_SLI_BLS_ACC) { bls->payload_word0 = cpu_to_le32((payload->u.acc.seq_id_last << 16) | (payload->u.acc.seq_id_validity << 24)); bls->high_seq_cnt = payload->u.acc.high_seq_cnt; bls->low_seq_cnt = payload->u.acc.low_seq_cnt; } else if (payload->type == SLI4_SLI_BLS_RJT) { bls->payload_word0 = cpu_to_le32(*((u32 *)&payload->u.rjt)); dw_ridflags |= SLI4_BLS_RSP_WQE_AR; } else { efc_log_info(sli, "bad BLS type %#x\n", payload->type); return -EIO; } bls->ox_id = payload->ox_id; bls->rx_id = payload->rx_id; if (params->rpi_registered) { bls->dw8flags0 |= SLI4_GENERIC_CONTEXT_RPI << SLI4_BLS_RSP_WQE_CT_SHFT; bls->context_tag = cpu_to_le16(params->rpi); } else { bls->dw8flags0 |= SLI4_GENERIC_CONTEXT_VPI << SLI4_BLS_RSP_WQE_CT_SHFT; bls->context_tag = cpu_to_le16(params->vpi); bls->local_n_port_id_dword |= cpu_to_le32(params->s_id & 0x00ffffff); dw_ridflags = (dw_ridflags & ~SLI4_BLS_RSP_RID) | (params->d_id & SLI4_BLS_RSP_RID); bls->temporary_rpi = cpu_to_le16(params->rpi); } bls->xri_tag = cpu_to_le16(params->xri); bls->dw8flags1 |= SLI4_GENERIC_CLASS_CLASS_3; bls->command = SLI4_WQE_XMIT_BLS_RSP; bls->request_tag = cpu_to_le16(params->tag); bls->dw11flags1 |= SLI4_BLS_RSP_WQE_QOSD; bls->remote_id_dword = cpu_to_le32(dw_ridflags); bls->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); bls->dw12flags0 |= SLI4_CMD_XMIT_BLS_RSP64_WQE; return 0; } int sli_xmit_els_rsp64_wqe(struct sli4 *sli, void *buf, struct efc_dma *rsp, struct sli_els_params *params) { struct sli4_xmit_els_rsp64_wqe *els = buf; memset(buf, 0, sli->wqe_size); if (sli->params.sgl_pre_registered) els->flags2 |= SLI4_ELS_DBDE; else els->flags2 |= SLI4_ELS_XBL; els->els_response_payload.bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (params->rsp_len & SLI4_BDE_LEN_MASK)); els->els_response_payload.u.data.low = cpu_to_le32(lower_32_bits(rsp->phys)); els->els_response_payload.u.data.high = cpu_to_le32(upper_32_bits(rsp->phys)); els->els_response_payload_length = cpu_to_le32(params->rsp_len); els->xri_tag = cpu_to_le16(params->xri); els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3; els->command = SLI4_WQE_ELS_RSP64; els->request_tag = cpu_to_le16(params->tag); els->ox_id = cpu_to_le16(params->ox_id); els->flags2 |= SLI4_ELS_QOSD; els->cmd_type_wqec = SLI4_ELS_REQUEST64_CMD_GEN; els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); if (params->rpi_registered) { els->ct_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_ELS_CT_OFFSET; els->context_tag = cpu_to_le16(params->rpi); return 0; } els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_ELS_CT_OFFSET; els->context_tag = cpu_to_le16(params->vpi); els->rid_dw = cpu_to_le32(params->d_id & SLI4_ELS_RID); els->temporary_rpi = cpu_to_le16(params->rpi); if (params->s_id != U32_MAX) { els->sid_dw |= cpu_to_le32(SLI4_ELS_SP | (params->s_id & SLI4_ELS_SID)); } return 0; } int sli_xmit_sequence64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *payload, struct sli_ct_params *params) { struct sli4_xmit_sequence64_wqe *xmit = buf; memset(buf, 0, sli4->wqe_size); if (!payload || !payload->virt) { efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n", payload, payload ? payload->virt : NULL); return -EIO; } if (sli4->params.sgl_pre_registered) xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_DBDE); else xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_XBL); xmit->bde.bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (params->rsp_len & SLI4_BDE_LEN_MASK)); xmit->bde.u.data.low = cpu_to_le32(lower_32_bits(payload->phys)); xmit->bde.u.data.high = cpu_to_le32(upper_32_bits(payload->phys)); xmit->sequence_payload_len = cpu_to_le32(params->rsp_len); xmit->remote_n_port_id_dword |= cpu_to_le32(params->d_id & 0x00ffffff); xmit->relative_offset = 0; /* sequence initiative - this matches what is seen from * FC switches in response to FCGS commands */ xmit->dw5flags0 &= (~SLI4_SEQ_WQE_SI); xmit->dw5flags0 &= (~SLI4_SEQ_WQE_FT);/* force transmit */ xmit->dw5flags0 &= (~SLI4_SEQ_WQE_XO);/* exchange responder */ xmit->dw5flags0 |= SLI4_SEQ_WQE_LS;/* last in seqence */ xmit->df_ctl = params->df_ctl; xmit->type = params->type; xmit->r_ctl = params->r_ctl; xmit->xri_tag = cpu_to_le16(params->xri); xmit->context_tag = cpu_to_le16(params->rpi); xmit->dw7flags0 &= ~SLI4_SEQ_WQE_DIF; xmit->dw7flags0 |= SLI4_GENERIC_CONTEXT_RPI << SLI4_SEQ_WQE_CT_SHIFT; xmit->dw7flags0 &= ~SLI4_SEQ_WQE_BS; xmit->command = SLI4_WQE_XMIT_SEQUENCE64; xmit->dw7flags1 |= SLI4_GENERIC_CLASS_CLASS_3; xmit->dw7flags1 &= ~SLI4_SEQ_WQE_PU; xmit->timer = params->timeout; xmit->abort_tag = 0; xmit->request_tag = cpu_to_le16(params->tag); xmit->remote_xid = cpu_to_le16(params->ox_id); xmit->dw10w0 |= cpu_to_le16(SLI4_ELS_REQUEST64_DIR_READ << SLI4_SEQ_WQE_IOD_SHIFT); xmit->cmd_type_wqec_byte |= SLI4_CMD_XMIT_SEQUENCE64_WQE; xmit->dw10w0 |= cpu_to_le16(2 << SLI4_SEQ_WQE_LEN_LOC_SHIFT); xmit->cq_id = cpu_to_le16(0xFFFF); return 0; } int sli_requeue_xri_wqe(struct sli4 *sli4, void *buf, u16 xri, u16 tag, u16 cq_id) { struct sli4_requeue_xri_wqe *requeue = buf; memset(buf, 0, sli4->wqe_size); requeue->command = SLI4_WQE_REQUEUE_XRI; requeue->xri_tag = cpu_to_le16(xri); requeue->request_tag = cpu_to_le16(tag); requeue->flags2 |= cpu_to_le16(SLI4_REQU_XRI_WQE_XC); requeue->flags1 |= cpu_to_le16(SLI4_REQU_XRI_WQE_QOSD); requeue->cq_id = cpu_to_le16(cq_id); requeue->cmd_type_wqec_byte = SLI4_CMD_REQUEUE_XRI_WQE; return 0; } int sli_fc_process_link_attention(struct sli4 *sli4, void *acqe) { struct sli4_link_attention *link_attn = acqe; struct sli4_link_event event = { 0 }; efc_log_info(sli4, "link=%d attn_type=%#x top=%#x speed=%#x pfault=%#x\n", link_attn->link_number, link_attn->attn_type, link_attn->topology, link_attn->port_speed, link_attn->port_fault); efc_log_info(sli4, "shared_lnk_status=%#x logl_lnk_speed=%#x evttag=%#x\n", link_attn->shared_link_status, le16_to_cpu(link_attn->logical_link_speed), le32_to_cpu(link_attn->event_tag)); if (!sli4->link) return -EIO; event.medium = SLI4_LINK_MEDIUM_FC; switch (link_attn->attn_type) { case SLI4_LNK_ATTN_TYPE_LINK_UP: event.status = SLI4_LINK_STATUS_UP; break; case SLI4_LNK_ATTN_TYPE_LINK_DOWN: event.status = SLI4_LINK_STATUS_DOWN; break; case SLI4_LNK_ATTN_TYPE_NO_HARD_ALPA: efc_log_info(sli4, "attn_type: no hard alpa\n"); event.status = SLI4_LINK_STATUS_NO_ALPA; break; default: efc_log_info(sli4, "attn_type: unknown\n"); break; } switch (link_attn->event_type) { case SLI4_EVENT_LINK_ATTENTION: break; case SLI4_EVENT_SHARED_LINK_ATTENTION: efc_log_info(sli4, "event_type: FC shared link event\n"); break; default: efc_log_info(sli4, "event_type: unknown\n"); break; } switch (link_attn->topology) { case SLI4_LNK_ATTN_P2P: event.topology = SLI4_LINK_TOPO_NON_FC_AL; break; case SLI4_LNK_ATTN_FC_AL: event.topology = SLI4_LINK_TOPO_FC_AL; break; case SLI4_LNK_ATTN_INTERNAL_LOOPBACK: efc_log_info(sli4, "topology Internal loopback\n"); event.topology = SLI4_LINK_TOPO_LOOPBACK_INTERNAL; break; case SLI4_LNK_ATTN_SERDES_LOOPBACK: efc_log_info(sli4, "topology serdes loopback\n"); event.topology = SLI4_LINK_TOPO_LOOPBACK_EXTERNAL; break; default: efc_log_info(sli4, "topology: unknown\n"); break; } event.speed = link_attn->port_speed * 1000; sli4->link(sli4->link_arg, (void *)&event); return 0; } int sli_fc_cqe_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe, enum sli4_qentry *etype, u16 *r_id) { u8 code = cqe[SLI4_CQE_CODE_OFFSET]; int rc; switch (code) { case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION: { struct sli4_fc_wcqe *wcqe = (void *)cqe; *etype = SLI4_QENTRY_WQ; *r_id = le16_to_cpu(wcqe->request_tag); rc = wcqe->status; /* Flag errors except for FCP_RSP_FAILURE */ if (rc && rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE) { efc_log_info(sli4, "WCQE: status=%#x hw_status=%#x tag=%#x\n", wcqe->status, wcqe->hw_status, le16_to_cpu(wcqe->request_tag)); efc_log_info(sli4, "w1=%#x w2=%#x xb=%d\n", le32_to_cpu(wcqe->wqe_specific_1), le32_to_cpu(wcqe->wqe_specific_2), (wcqe->flags & SLI4_WCQE_XB)); efc_log_info(sli4, " %08X %08X %08X %08X\n", ((u32 *)cqe)[0], ((u32 *)cqe)[1], ((u32 *)cqe)[2], ((u32 *)cqe)[3]); } break; } case SLI4_CQE_CODE_RQ_ASYNC: { struct sli4_fc_async_rcqe *rcqe = (void *)cqe; *etype = SLI4_QENTRY_RQ; *r_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID; rc = rcqe->status; break; } case SLI4_CQE_CODE_RQ_ASYNC_V1: { struct sli4_fc_async_rcqe_v1 *rcqe = (void *)cqe; *etype = SLI4_QENTRY_RQ; *r_id = le16_to_cpu(rcqe->rq_id); rc = rcqe->status; break; } case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD: { struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe; *etype = SLI4_QENTRY_OPT_WRITE_CMD; *r_id = le16_to_cpu(optcqe->rq_id); rc = optcqe->status; break; } case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA: { struct sli4_fc_optimized_write_data_cqe *dcqe = (void *)cqe; *etype = SLI4_QENTRY_OPT_WRITE_DATA; *r_id = le16_to_cpu(dcqe->xri); rc = dcqe->status; /* Flag errors */ if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) { efc_log_info(sli4, "Optimized DATA CQE: status=%#x\n", dcqe->status); efc_log_info(sli4, "hstat=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n", dcqe->hw_status, le16_to_cpu(dcqe->xri), le32_to_cpu(dcqe->total_data_placed), ((u32 *)cqe)[3], (dcqe->flags & SLI4_OCQE_XB)); } break; } case SLI4_CQE_CODE_RQ_COALESCING: { struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe; *etype = SLI4_QENTRY_RQ; *r_id = le16_to_cpu(rcqe->rq_id); rc = rcqe->status; break; } case SLI4_CQE_CODE_XRI_ABORTED: { struct sli4_fc_xri_aborted_cqe *xa = (void *)cqe; *etype = SLI4_QENTRY_XABT; *r_id = le16_to_cpu(xa->xri); rc = 0; break; } case SLI4_CQE_CODE_RELEASE_WQE: { struct sli4_fc_wqec *wqec = (void *)cqe; *etype = SLI4_QENTRY_WQ_RELEASE; *r_id = le16_to_cpu(wqec->wq_id); rc = 0; break; } default: efc_log_info(sli4, "CQE completion code %d not handled\n", code); *etype = SLI4_QENTRY_MAX; *r_id = U16_MAX; rc = -EINVAL; } return rc; } u32 sli_fc_response_length(struct sli4 *sli4, u8 *cqe) { struct sli4_fc_wcqe *wcqe = (void *)cqe; return le32_to_cpu(wcqe->wqe_specific_1); } u32 sli_fc_io_length(struct sli4 *sli4, u8 *cqe) { struct sli4_fc_wcqe *wcqe = (void *)cqe; return le32_to_cpu(wcqe->wqe_specific_1); } int sli_fc_els_did(struct sli4 *sli4, u8 *cqe, u32 *d_id) { struct sli4_fc_wcqe *wcqe = (void *)cqe; *d_id = 0; if (wcqe->status) return -EIO; *d_id = le32_to_cpu(wcqe->wqe_specific_2) & 0x00ffffff; return 0; } u32 sli_fc_ext_status(struct sli4 *sli4, u8 *cqe) { struct sli4_fc_wcqe *wcqe = (void *)cqe; u32 mask; switch (wcqe->status) { case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE: mask = U32_MAX; break; case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: case SLI4_FC_WCQE_STATUS_CMD_REJECT: mask = 0xff; break; case SLI4_FC_WCQE_STATUS_NPORT_RJT: case SLI4_FC_WCQE_STATUS_FABRIC_RJT: case SLI4_FC_WCQE_STATUS_NPORT_BSY: case SLI4_FC_WCQE_STATUS_FABRIC_BSY: case SLI4_FC_WCQE_STATUS_LS_RJT: mask = U32_MAX; break; case SLI4_FC_WCQE_STATUS_DI_ERROR: mask = U32_MAX; break; default: mask = 0; } return le32_to_cpu(wcqe->wqe_specific_2) & mask; } int sli_fc_rqe_rqid_and_index(struct sli4 *sli4, u8 *cqe, u16 *rq_id, u32 *index) { int rc = -EIO; u8 code = 0; u16 rq_element_index; *rq_id = 0; *index = U32_MAX; code = cqe[SLI4_CQE_CODE_OFFSET]; /* Retrieve the RQ index from the completion */ if (code == SLI4_CQE_CODE_RQ_ASYNC) { struct sli4_fc_async_rcqe *rcqe = (void *)cqe; *rq_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID; rq_element_index = le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX; *index = rq_element_index; if (rcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) { rc = 0; } else { rc = rcqe->status; efc_log_info(sli4, "status=%02x (%s) rq_id=%d\n", rcqe->status, sli_fc_get_status_string(rcqe->status), le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID); efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n", le16_to_cpu(rcqe->data_placement_length), rcqe->sof_byte, rcqe->eof_byte, rcqe->hdpl_byte & SLI4_RACQE_HDPL); } } else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) { struct sli4_fc_async_rcqe_v1 *rcqe_v1 = (void *)cqe; *rq_id = le16_to_cpu(rcqe_v1->rq_id); rq_element_index = (le16_to_cpu(rcqe_v1->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX); *index = rq_element_index; if (rcqe_v1->status == SLI4_FC_ASYNC_RQ_SUCCESS) { rc = 0; } else { rc = rcqe_v1->status; efc_log_info(sli4, "status=%02x (%s) rq_id=%d, index=%x\n", rcqe_v1->status, sli_fc_get_status_string(rcqe_v1->status), le16_to_cpu(rcqe_v1->rq_id), rq_element_index); efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n", le16_to_cpu(rcqe_v1->data_placement_length), rcqe_v1->sof_byte, rcqe_v1->eof_byte, rcqe_v1->hdpl_byte & SLI4_RACQE_HDPL); } } else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) { struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe; *rq_id = le16_to_cpu(optcqe->rq_id); *index = le16_to_cpu(optcqe->w1) & SLI4_OCQE_RQ_EL_INDX; if (optcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) { rc = 0; } else { rc = optcqe->status; efc_log_info(sli4, "stat=%02x (%s) rqid=%d, idx=%x pdpl=%x\n", optcqe->status, sli_fc_get_status_string(optcqe->status), le16_to_cpu(optcqe->rq_id), *index, le16_to_cpu(optcqe->data_placement_length)); efc_log_info(sli4, "hdpl=%x oox=%d agxr=%d xri=0x%x rpi=%x\n", (optcqe->hdpl_vld & SLI4_OCQE_HDPL), (optcqe->flags1 & SLI4_OCQE_OOX), (optcqe->flags1 & SLI4_OCQE_AGXR), optcqe->xri, le16_to_cpu(optcqe->rpi)); } } else if (code == SLI4_CQE_CODE_RQ_COALESCING) { struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe; rq_element_index = (le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RCQE_RQ_EL_INDX); *rq_id = le16_to_cpu(rcqe->rq_id); if (rcqe->status == SLI4_FC_COALESCE_RQ_SUCCESS) { *index = rq_element_index; rc = 0; } else { *index = U32_MAX; rc = rcqe->status; efc_log_info(sli4, "stat=%02x (%s) rq_id=%d, idx=%x\n", rcqe->status, sli_fc_get_status_string(rcqe->status), le16_to_cpu(rcqe->rq_id), rq_element_index); efc_log_info(sli4, "rq_id=%#x sdpl=%x\n", le16_to_cpu(rcqe->rq_id), le16_to_cpu(rcqe->seq_placement_length)); } } else { struct sli4_fc_async_rcqe *rcqe = (void *)cqe; *index = U32_MAX; rc = rcqe->status; efc_log_info(sli4, "status=%02x rq_id=%d, index=%x pdpl=%x\n", rcqe->status, le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID, (le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX), le16_to_cpu(rcqe->data_placement_length)); efc_log_info(sli4, "sof=%02x eof=%02x hdpl=%x\n", rcqe->sof_byte, rcqe->eof_byte, rcqe->hdpl_byte & SLI4_RACQE_HDPL); } return rc; } static int sli_bmbx_wait(struct sli4 *sli4, u32 msec) { u32 val; unsigned long end; /* Wait for the bootstrap mailbox to report "ready" */ end = jiffies + msecs_to_jiffies(msec); do { val = readl(sli4->reg[0] + SLI4_BMBX_REG); if (val & SLI4_BMBX_RDY) return 0; usleep_range(1000, 2000); } while (time_before(jiffies, end)); return -EIO; } static int sli_bmbx_write(struct sli4 *sli4) { u32 val; /* write buffer location to bootstrap mailbox register */ val = sli_bmbx_write_hi(sli4->bmbx.phys); writel(val, (sli4->reg[0] + SLI4_BMBX_REG)); if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) { efc_log_crit(sli4, "BMBX WRITE_HI failed\n"); return -EIO; } val = sli_bmbx_write_lo(sli4->bmbx.phys); writel(val, (sli4->reg[0] + SLI4_BMBX_REG)); /* wait for SLI Port to set ready bit */ return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC); } int sli_bmbx_command(struct sli4 *sli4) { void *cqe = (u8 *)sli4->bmbx.virt + SLI4_BMBX_SIZE; if (sli_fw_error_status(sli4) > 0) { efc_log_crit(sli4, "Chip is in an error state -Mailbox command rejected"); efc_log_crit(sli4, " status=%#x error1=%#x error2=%#x\n", sli_reg_read_status(sli4), sli_reg_read_err1(sli4), sli_reg_read_err2(sli4)); return -EIO; } /* Submit a command to the bootstrap mailbox and check the status */ if (sli_bmbx_write(sli4)) { efc_log_crit(sli4, "bmbx write fail phys=%pad reg=%#x\n", &sli4->bmbx.phys, readl(sli4->reg[0] + SLI4_BMBX_REG)); return -EIO; } /* check completion queue entry status */ if (le32_to_cpu(((struct sli4_mcqe *)cqe)->dw3_flags) & SLI4_MCQE_VALID) { return sli_cqe_mq(sli4, cqe); } efc_log_crit(sli4, "invalid or wrong type\n"); return -EIO; } int sli_cmd_config_link(struct sli4 *sli4, void *buf) { struct sli4_cmd_config_link *config_link = buf; memset(buf, 0, SLI4_BMBX_SIZE); config_link->hdr.command = SLI4_MBX_CMD_CONFIG_LINK; /* Port interprets zero in a field as "use default value" */ return 0; } int sli_cmd_down_link(struct sli4 *sli4, void *buf) { struct sli4_mbox_command_header *hdr = buf; memset(buf, 0, SLI4_BMBX_SIZE); hdr->command = SLI4_MBX_CMD_DOWN_LINK; /* Port interprets zero in a field as "use default value" */ return 0; } int sli_cmd_dump_type4(struct sli4 *sli4, void *buf, u16 wki) { struct sli4_cmd_dump4 *cmd = buf; memset(buf, 0, SLI4_BMBX_SIZE); cmd->hdr.command = SLI4_MBX_CMD_DUMP; cmd->type_dword = cpu_to_le32(0x4); cmd->wki_selection = cpu_to_le16(wki); return 0; } int sli_cmd_common_read_transceiver_data(struct sli4 *sli4, void *buf, u32 page_num, struct efc_dma *dma) { struct sli4_rqst_cmn_read_transceiver_data *req = NULL; u32 psize; if (!dma) psize = SLI4_CFG_PYLD_LENGTH(cmn_read_transceiver_data); else psize = dma->size; req = sli_config_cmd_init(sli4, buf, psize, dma); if (!req) return -EIO; sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_READ_TRANS_DATA, SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN(cmn_read_transceiver_data)); req->page_number = cpu_to_le32(page_num); req->port = cpu_to_le32(sli4->port_number); return 0; } int sli_cmd_read_link_stats(struct sli4 *sli4, void *buf, u8 req_ext_counters, u8 clear_overflow_flags, u8 clear_all_counters) { struct sli4_cmd_read_link_stats *cmd = buf; u32 flags; memset(buf, 0, SLI4_BMBX_SIZE); cmd->hdr.command = SLI4_MBX_CMD_READ_LNK_STAT; flags = 0; if (req_ext_counters) flags |= SLI4_READ_LNKSTAT_REC; if (clear_all_counters) flags |= SLI4_READ_LNKSTAT_CLRC; if (clear_overflow_flags) flags |= SLI4_READ_LNKSTAT_CLOF; cmd->dw1_flags = cpu_to_le32(flags); return 0; } int sli_cmd_read_status(struct sli4 *sli4, void *buf, u8 clear_counters) { struct sli4_cmd_read_status *cmd = buf; u32 flags = 0; memset(buf, 0, SLI4_BMBX_SIZE); cmd->hdr.command = SLI4_MBX_CMD_READ_STATUS; if (clear_counters) flags |= SLI4_READSTATUS_CLEAR_COUNTERS; else flags &= ~SLI4_READSTATUS_CLEAR_COUNTERS; cmd->dw1_flags = cpu_to_le32(flags); return 0; } int sli_cmd_init_link(struct sli4 *sli4, void *buf, u32 speed, u8 reset_alpa) { struct sli4_cmd_init_link *init_link = buf; u32 flags = 0; memset(buf, 0, SLI4_BMBX_SIZE); init_link->hdr.command = SLI4_MBX_CMD_INIT_LINK; init_link->sel_reset_al_pa_dword = cpu_to_le32(reset_alpa); flags &= ~SLI4_INIT_LINK_F_LOOPBACK; init_link->link_speed_sel_code = cpu_to_le32(speed); switch (speed) { case SLI4_LINK_SPEED_1G: case SLI4_LINK_SPEED_2G: case SLI4_LINK_SPEED_4G: case SLI4_LINK_SPEED_8G: case SLI4_LINK_SPEED_16G: case SLI4_LINK_SPEED_32G: case SLI4_LINK_SPEED_64G: flags |= SLI4_INIT_LINK_F_FIXED_SPEED; break; case SLI4_LINK_SPEED_10G: efc_log_info(sli4, "unsupported FC speed %d\n", speed); init_link->flags0 = cpu_to_le32(flags); return -EIO; } switch (sli4->topology) { case SLI4_READ_CFG_TOPO_FC: /* Attempt P2P but failover to FC-AL */ flags |= SLI4_INIT_LINK_F_FAIL_OVER; flags |= SLI4_INIT_LINK_F_P2P_FAIL_OVER; break; case SLI4_READ_CFG_TOPO_FC_AL: flags |= SLI4_INIT_LINK_F_FCAL_ONLY; if (speed == SLI4_LINK_SPEED_16G || speed == SLI4_LINK_SPEED_32G) { efc_log_info(sli4, "unsupported FC-AL speed %d\n", speed); init_link->flags0 = cpu_to_le32(flags); return -EIO; } break; case SLI4_READ_CFG_TOPO_NON_FC_AL: flags |= SLI4_INIT_LINK_F_P2P_ONLY; break; default: efc_log_info(sli4, "unsupported topology %#x\n", sli4->topology); init_link->flags0 = cpu_to_le32(flags); return -EIO; } flags &= ~SLI4_INIT_LINK_F_UNFAIR; flags &= ~SLI4_INIT_LINK_F_NO_LIRP; flags &= ~SLI4_INIT_LINK_F_LOOP_VALID_CHK; flags &= ~SLI4_INIT_LINK_F_NO_LISA; flags &= ~SLI4_INIT_LINK_F_PICK_HI_ALPA; init_link->flags0 = cpu_to_le32(flags); return 0; } int sli_cmd_init_vfi(struct sli4 *sli4, void *buf, u16 vfi, u16 fcfi, u16 vpi) { struct sli4_cmd_init_vfi *init_vfi = buf; u16 flags = 0; memset(buf, 0, SLI4_BMBX_SIZE); init_vfi->hdr.command = SLI4_MBX_CMD_INIT_VFI; init_vfi->vfi = cpu_to_le16(vfi); init_vfi->fcfi = cpu_to_le16(fcfi); /* * If the VPI is valid, initialize it at the same time as * the VFI */ if (vpi != U16_MAX) { flags |= SLI4_INIT_VFI_FLAG_VP; init_vfi->flags0_word = cpu_to_le16(flags); init_vfi->vpi = cpu_to_le16(vpi); } return 0; } int sli_cmd_init_vpi(struct sli4 *sli4, void *buf, u16 vpi, u16 vfi) { struct sli4_cmd_init_vpi *init_vpi = buf; memset(buf, 0, SLI4_BMBX_SIZE); init_vpi->hdr.command = SLI4_MBX_CMD_INIT_VPI; init_vpi->vpi = cpu_to_le16(vpi); init_vpi->vfi = cpu_to_le16(vfi); return 0; } int sli_cmd_post_xri(struct sli4 *sli4, void *buf, u16 xri_base, u16 xri_count) { struct sli4_cmd_post_xri *post_xri = buf; u16 xri_count_flags = 0; memset(buf, 0, SLI4_BMBX_SIZE); post_xri->hdr.command = SLI4_MBX_CMD_POST_XRI; post_xri->xri_base = cpu_to_le16(xri_base); xri_count_flags = xri_count & SLI4_POST_XRI_COUNT; xri_count_flags |= SLI4_POST_XRI_FLAG_ENX; xri_count_flags |= SLI4_POST_XRI_FLAG_VAL; post_xri->xri_count_flags = cpu_to_le16(xri_count_flags); return 0; } int sli_cmd_release_xri(struct sli4 *sli4, void *buf, u8 num_xri) { struct sli4_cmd_release_xri *release_xri = buf; memset(buf, 0, SLI4_BMBX_SIZE); release_xri->hdr.command = SLI4_MBX_CMD_RELEASE_XRI; release_xri->xri_count_word = cpu_to_le16(num_xri & SLI4_RELEASE_XRI_COUNT); return 0; } static int sli_cmd_read_config(struct sli4 *sli4, void *buf) { struct sli4_cmd_read_config *read_config = buf; memset(buf, 0, SLI4_BMBX_SIZE); read_config->hdr.command = SLI4_MBX_CMD_READ_CONFIG; return 0; } int sli_cmd_read_nvparms(struct sli4 *sli4, void *buf) { struct sli4_cmd_read_nvparms *read_nvparms = buf; memset(buf, 0, SLI4_BMBX_SIZE); read_nvparms->hdr.command = SLI4_MBX_CMD_READ_NVPARMS; return 0; } int sli_cmd_write_nvparms(struct sli4 *sli4, void *buf, u8 *wwpn, u8 *wwnn, u8 hard_alpa, u32 preferred_d_id) { struct sli4_cmd_write_nvparms *write_nvparms = buf; memset(buf, 0, SLI4_BMBX_SIZE); write_nvparms->hdr.command = SLI4_MBX_CMD_WRITE_NVPARMS; memcpy(write_nvparms->wwpn, wwpn, 8); memcpy(write_nvparms->wwnn, wwnn, 8); write_nvparms->hard_alpa_d_id = cpu_to_le32((preferred_d_id << 8) | hard_alpa); return 0; } static int sli_cmd_read_rev(struct sli4 *sli4, void *buf, struct efc_dma *vpd) { struct sli4_cmd_read_rev *read_rev = buf; memset(buf, 0, SLI4_BMBX_SIZE); read_rev->hdr.command = SLI4_MBX_CMD_READ_REV; if (vpd && vpd->size) { read_rev->flags0_word |= cpu_to_le16(SLI4_READ_REV_FLAG_VPD); read_rev->available_length_dword = cpu_to_le32(vpd->size & SLI4_READ_REV_AVAILABLE_LENGTH); read_rev->hostbuf.low = cpu_to_le32(lower_32_bits(vpd->phys)); read_rev->hostbuf.high = cpu_to_le32(upper_32_bits(vpd->phys)); } return 0; } int sli_cmd_read_sparm64(struct sli4 *sli4, void *buf, struct efc_dma *dma, u16 vpi) { struct sli4_cmd_read_sparm64 *read_sparm64 = buf; if (vpi == U16_MAX) { efc_log_err(sli4, "special VPI not supported!!!\n"); return -EIO; } if (!dma || !dma->phys) { efc_log_err(sli4, "bad DMA buffer\n"); return -EIO; } memset(buf, 0, SLI4_BMBX_SIZE); read_sparm64->hdr.command = SLI4_MBX_CMD_READ_SPARM64; read_sparm64->bde_64.bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (dma->size & SLI4_BDE_LEN_MASK)); read_sparm64->bde_64.u.data.low = cpu_to_le32(lower_32_bits(dma->phys)); read_sparm64->bde_64.u.data.high = cpu_to_le32(upper_32_bits(dma->phys)); read_sparm64->vpi = cpu_to_le16(vpi); return 0; } int sli_cmd_read_topology(struct sli4 *sli4, void *buf, struct efc_dma *dma) { struct sli4_cmd_read_topology *read_topo = buf; if (!dma || !dma->size) return -EIO; if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) { efc_log_err(sli4, "loop map buffer too small %zx\n", dma->size); return -EIO; } memset(buf, 0, SLI4_BMBX_SIZE); read_topo->hdr.command = SLI4_MBX_CMD_READ_TOPOLOGY; memset(dma->virt, 0, dma->size); read_topo->bde_loop_map.bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (dma->size & SLI4_BDE_LEN_MASK)); read_topo->bde_loop_map.u.data.low = cpu_to_le32(lower_32_bits(dma->phys)); read_topo->bde_loop_map.u.data.high = cpu_to_le32(upper_32_bits(dma->phys)); return 0; } int sli_cmd_reg_fcfi(struct sli4 *sli4, void *buf, u16 index, struct sli4_cmd_rq_cfg *rq_cfg) { struct sli4_cmd_reg_fcfi *reg_fcfi = buf; u32 i; memset(buf, 0, SLI4_BMBX_SIZE); reg_fcfi->hdr.command = SLI4_MBX_CMD_REG_FCFI; reg_fcfi->fcf_index = cpu_to_le16(index); for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { switch (i) { case 0: reg_fcfi->rqid0 = rq_cfg[0].rq_id; break; case 1: reg_fcfi->rqid1 = rq_cfg[1].rq_id; break; case 2: reg_fcfi->rqid2 = rq_cfg[2].rq_id; break; case 3: reg_fcfi->rqid3 = rq_cfg[3].rq_id; break; } reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask; reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match; reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask; reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match; } return 0; } int sli_cmd_reg_fcfi_mrq(struct sli4 *sli4, void *buf, u8 mode, u16 fcf_index, u8 rq_selection_policy, u8 mrq_bit_mask, u16 num_mrqs, struct sli4_cmd_rq_cfg *rq_cfg) { struct sli4_cmd_reg_fcfi_mrq *reg_fcfi_mrq = buf; u32 i; u32 mrq_flags = 0; memset(buf, 0, SLI4_BMBX_SIZE); reg_fcfi_mrq->hdr.command = SLI4_MBX_CMD_REG_FCFI_MRQ; if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) { reg_fcfi_mrq->fcf_index = cpu_to_le16(fcf_index); goto done; } reg_fcfi_mrq->dw8_vlan = cpu_to_le32(SLI4_REGFCFI_MRQ_MODE); for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) { reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask; reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match; reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask; reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match; switch (i) { case 3: reg_fcfi_mrq->rqid3 = rq_cfg[i].rq_id; break; case 2: reg_fcfi_mrq->rqid2 = rq_cfg[i].rq_id; break; case 1: reg_fcfi_mrq->rqid1 = rq_cfg[i].rq_id; break; case 0: reg_fcfi_mrq->rqid0 = rq_cfg[i].rq_id; break; } } mrq_flags = num_mrqs & SLI4_REGFCFI_MRQ_MASK_NUM_PAIRS; mrq_flags |= (mrq_bit_mask << 8); mrq_flags |= (rq_selection_policy << 12); reg_fcfi_mrq->dw9_mrqflags = cpu_to_le32(mrq_flags); done: return 0; } int sli_cmd_reg_rpi(struct sli4 *sli4, void *buf, u32 rpi, u32 vpi, u32 fc_id, struct efc_dma *dma, u8 update, u8 enable_t10_pi) { struct sli4_cmd_reg_rpi *reg_rpi = buf; u32 rportid_flags = 0; memset(buf, 0, SLI4_BMBX_SIZE); reg_rpi->hdr.command = SLI4_MBX_CMD_REG_RPI; reg_rpi->rpi = cpu_to_le16(rpi); rportid_flags = fc_id & SLI4_REGRPI_REMOTE_N_PORTID; if (update) rportid_flags |= SLI4_REGRPI_UPD; else rportid_flags &= ~SLI4_REGRPI_UPD; if (enable_t10_pi) rportid_flags |= SLI4_REGRPI_ETOW; else rportid_flags &= ~SLI4_REGRPI_ETOW; reg_rpi->dw2_rportid_flags = cpu_to_le32(rportid_flags); reg_rpi->bde_64.bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK)); reg_rpi->bde_64.u.data.low = cpu_to_le32(lower_32_bits(dma->phys)); reg_rpi->bde_64.u.data.high = cpu_to_le32(upper_32_bits(dma->phys)); reg_rpi->vpi = cpu_to_le16(vpi); return 0; } int sli_cmd_reg_vfi(struct sli4 *sli4, void *buf, size_t size, u16 vfi, u16 fcfi, struct efc_dma dma, u16 vpi, __be64 sli_wwpn, u32 fc_id) { struct sli4_cmd_reg_vfi *reg_vfi = buf; memset(buf, 0, SLI4_BMBX_SIZE); reg_vfi->hdr.command = SLI4_MBX_CMD_REG_VFI; reg_vfi->vfi = cpu_to_le16(vfi); reg_vfi->fcfi = cpu_to_le16(fcfi); reg_vfi->sparm.bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK)); reg_vfi->sparm.u.data.low = cpu_to_le32(lower_32_bits(dma.phys)); reg_vfi->sparm.u.data.high = cpu_to_le32(upper_32_bits(dma.phys)); reg_vfi->e_d_tov = cpu_to_le32(sli4->e_d_tov); reg_vfi->r_a_tov = cpu_to_le32(sli4->r_a_tov); reg_vfi->dw0w1_flags |= cpu_to_le16(SLI4_REGVFI_VP); reg_vfi->vpi = cpu_to_le16(vpi); memcpy(reg_vfi->wwpn, &sli_wwpn, sizeof(reg_vfi->wwpn)); reg_vfi->dw10_lportid_flags = cpu_to_le32(fc_id); return 0; } int sli_cmd_reg_vpi(struct sli4 *sli4, void *buf, u32 fc_id, __be64 sli_wwpn, u16 vpi, u16 vfi, bool update) { struct sli4_cmd_reg_vpi *reg_vpi = buf; u32 flags = 0; memset(buf, 0, SLI4_BMBX_SIZE); reg_vpi->hdr.command = SLI4_MBX_CMD_REG_VPI; flags = (fc_id & SLI4_REGVPI_LOCAL_N_PORTID); if (update) flags |= SLI4_REGVPI_UPD; else flags &= ~SLI4_REGVPI_UPD; reg_vpi->dw2_lportid_flags = cpu_to_le32(flags); memcpy(reg_vpi->wwpn, &sli_wwpn, sizeof(reg_vpi->wwpn)); reg_vpi->vpi = cpu_to_le16(vpi); reg_vpi->vfi = cpu_to_le16(vfi); return 0; } static int sli_cmd_request_features(struct sli4 *sli4, void *buf, u32 features_mask, bool query) { struct sli4_cmd_request_features *req_features = buf; memset(buf, 0, SLI4_BMBX_SIZE); req_features->hdr.command = SLI4_MBX_CMD_RQST_FEATURES; if (query) req_features->dw1_qry = cpu_to_le32(SLI4_REQFEAT_QRY); req_features->cmd = cpu_to_le32(features_mask); return 0; } int sli_cmd_unreg_fcfi(struct sli4 *sli4, void *buf, u16 indicator) { struct sli4_cmd_unreg_fcfi *unreg_fcfi = buf; memset(buf, 0, SLI4_BMBX_SIZE); unreg_fcfi->hdr.command = SLI4_MBX_CMD_UNREG_FCFI; unreg_fcfi->fcfi = cpu_to_le16(indicator); return 0; } int sli_cmd_unreg_rpi(struct sli4 *sli4, void *buf, u16 indicator, enum sli4_resource which, u32 fc_id) { struct sli4_cmd_unreg_rpi *unreg_rpi = buf; u32 flags = 0; memset(buf, 0, SLI4_BMBX_SIZE); unreg_rpi->hdr.command = SLI4_MBX_CMD_UNREG_RPI; switch (which) { case SLI4_RSRC_RPI: flags |= SLI4_UNREG_RPI_II_RPI; if (fc_id == U32_MAX) break; flags |= SLI4_UNREG_RPI_DP; unreg_rpi->dw2_dest_n_portid = cpu_to_le32(fc_id & SLI4_UNREG_RPI_DEST_N_PORTID_MASK); break; case SLI4_RSRC_VPI: flags |= SLI4_UNREG_RPI_II_VPI; break; case SLI4_RSRC_VFI: flags |= SLI4_UNREG_RPI_II_VFI; break; case SLI4_RSRC_FCFI: flags |= SLI4_UNREG_RPI_II_FCFI; break; default: efc_log_info(sli4, "unknown type %#x\n", which); return -EIO; } unreg_rpi->dw1w1_flags = cpu_to_le16(flags); unreg_rpi->index = cpu_to_le16(indicator); return 0; } int sli_cmd_unreg_vfi(struct sli4 *sli4, void *buf, u16 index, u32 which) { struct sli4_cmd_unreg_vfi *unreg_vfi = buf; memset(buf, 0, SLI4_BMBX_SIZE); unreg_vfi->hdr.command = SLI4_MBX_CMD_UNREG_VFI; switch (which) { case SLI4_UNREG_TYPE_DOMAIN: unreg_vfi->index = cpu_to_le16(index); break; case SLI4_UNREG_TYPE_FCF: unreg_vfi->index = cpu_to_le16(index); break; case SLI4_UNREG_TYPE_ALL: unreg_vfi->index = cpu_to_le16(U32_MAX); break; default: return -EIO; } if (which != SLI4_UNREG_TYPE_DOMAIN) unreg_vfi->dw2_flags = cpu_to_le16(SLI4_UNREG_VFI_II_FCFI); return 0; } int sli_cmd_unreg_vpi(struct sli4 *sli4, void *buf, u16 indicator, u32 which) { struct sli4_cmd_unreg_vpi *unreg_vpi = buf; u32 flags = 0; memset(buf, 0, SLI4_BMBX_SIZE); unreg_vpi->hdr.command = SLI4_MBX_CMD_UNREG_VPI; unreg_vpi->index = cpu_to_le16(indicator); switch (which) { case SLI4_UNREG_TYPE_PORT: flags |= SLI4_UNREG_VPI_II_VPI; break; case SLI4_UNREG_TYPE_DOMAIN: flags |= SLI4_UNREG_VPI_II_VFI; break; case SLI4_UNREG_TYPE_FCF: flags |= SLI4_UNREG_VPI_II_FCFI; break; case SLI4_UNREG_TYPE_ALL: /* override indicator */ unreg_vpi->index = cpu_to_le16(U32_MAX); flags |= SLI4_UNREG_VPI_II_FCFI; break; default: return -EIO; } unreg_vpi->dw2w0_flags = cpu_to_le16(flags); return 0; } static int sli_cmd_common_modify_eq_delay(struct sli4 *sli4, void *buf, struct sli4_queue *q, int num_q, u32 shift, u32 delay_mult) { struct sli4_rqst_cmn_modify_eq_delay *req = NULL; int i; req = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_modify_eq_delay), NULL); if (!req) return -EIO; sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_MODIFY_EQ_DELAY, SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN(cmn_modify_eq_delay)); req->num_eq = cpu_to_le32(num_q); for (i = 0; i < num_q; i++) { req->eq_delay_record[i].eq_id = cpu_to_le32(q[i].id); req->eq_delay_record[i].phase = cpu_to_le32(shift); req->eq_delay_record[i].delay_multiplier = cpu_to_le32(delay_mult); } return 0; } void sli4_cmd_lowlevel_set_watchdog(struct sli4 *sli4, void *buf, size_t size, u16 timeout) { struct sli4_rqst_lowlevel_set_watchdog *req = NULL; req = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(lowlevel_set_watchdog), NULL); if (!req) return; sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_LOWLEVEL_SET_WATCHDOG, SLI4_SUBSYSTEM_LOWLEVEL, CMD_V0, SLI4_RQST_PYLD_LEN(lowlevel_set_watchdog)); req->watchdog_timeout = cpu_to_le16(timeout); } static int sli_cmd_common_get_cntl_attributes(struct sli4 *sli4, void *buf, struct efc_dma *dma) { struct sli4_rqst_hdr *hdr = NULL; hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma); if (!hdr) return -EIO; hdr->opcode = SLI4_CMN_GET_CNTL_ATTRIBUTES; hdr->subsystem = SLI4_SUBSYSTEM_COMMON; hdr->request_length = cpu_to_le32(dma->size); return 0; } static int sli_cmd_common_get_cntl_addl_attributes(struct sli4 *sli4, void *buf, struct efc_dma *dma) { struct sli4_rqst_hdr *hdr = NULL; hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma); if (!hdr) return -EIO; hdr->opcode = SLI4_CMN_GET_CNTL_ADDL_ATTRS; hdr->subsystem = SLI4_SUBSYSTEM_COMMON; hdr->request_length = cpu_to_le32(dma->size); return 0; } int sli_cmd_common_nop(struct sli4 *sli4, void *buf, uint64_t context) { struct sli4_rqst_cmn_nop *nop = NULL; nop = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_nop), NULL); if (!nop) return -EIO; sli_cmd_fill_hdr(&nop->hdr, SLI4_CMN_NOP, SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN(cmn_nop)); memcpy(&nop->context, &context, sizeof(context)); return 0; } int sli_cmd_common_get_resource_extent_info(struct sli4 *sli4, void *buf, u16 rtype) { struct sli4_rqst_cmn_get_resource_extent_info *ext = NULL; ext = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(cmn_get_resource_extent_info), NULL); if (!ext) return -EIO; sli_cmd_fill_hdr(&ext->hdr, SLI4_CMN_GET_RSC_EXTENT_INFO, SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN(cmn_get_resource_extent_info)); ext->resource_type = cpu_to_le16(rtype); return 0; } int sli_cmd_common_get_sli4_parameters(struct sli4 *sli4, void *buf) { struct sli4_rqst_hdr *hdr = NULL; hdr = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_get_sli4_params), NULL); if (!hdr) return -EIO; hdr->opcode = SLI4_CMN_GET_SLI4_PARAMS; hdr->subsystem = SLI4_SUBSYSTEM_COMMON; hdr->request_length = SLI4_RQST_PYLD_LEN(cmn_get_sli4_params); return 0; } static int sli_cmd_common_get_port_name(struct sli4 *sli4, void *buf) { struct sli4_rqst_cmn_get_port_name *pname; pname = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_get_port_name), NULL); if (!pname) return -EIO; sli_cmd_fill_hdr(&pname->hdr, SLI4_CMN_GET_PORT_NAME, SLI4_SUBSYSTEM_COMMON, CMD_V1, SLI4_RQST_PYLD_LEN(cmn_get_port_name)); /* Set the port type value (ethernet=0, FC=1) for V1 commands */ pname->port_type = SLI4_PORT_TYPE_FC; return 0; } int sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc, u16 eof, u32 desired_write_length, u32 offset, char *obj_name, struct efc_dma *dma) { struct sli4_rqst_cmn_write_object *wr_obj = NULL; struct sli4_bde *bde; u32 dwflags = 0; wr_obj = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(cmn_write_object) + sizeof(*bde), NULL); if (!wr_obj) return -EIO; sli_cmd_fill_hdr(&wr_obj->hdr, SLI4_CMN_WRITE_OBJECT, SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN_VAR(cmn_write_object, sizeof(*bde))); if (noc) dwflags |= SLI4_RQ_DES_WRITE_LEN_NOC; if (eof) dwflags |= SLI4_RQ_DES_WRITE_LEN_EOF; dwflags |= (desired_write_length & SLI4_RQ_DES_WRITE_LEN); wr_obj->desired_write_len_dword = cpu_to_le32(dwflags); wr_obj->write_offset = cpu_to_le32(offset); strncpy(wr_obj->object_name, obj_name, sizeof(wr_obj->object_name) - 1); wr_obj->host_buffer_descriptor_count = cpu_to_le32(1); bde = (struct sli4_bde *)wr_obj->host_buffer_descriptor; /* Setup to transfer xfer_size bytes to device */ bde->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (desired_write_length & SLI4_BDE_LEN_MASK)); bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys)); bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys)); return 0; } int sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *obj_name) { struct sli4_rqst_cmn_delete_object *req = NULL; req = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(cmn_delete_object), NULL); if (!req) return -EIO; sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_DELETE_OBJECT, SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN(cmn_delete_object)); strncpy(req->object_name, obj_name, sizeof(req->object_name) - 1); return 0; } int sli_cmd_common_read_object(struct sli4 *sli4, void *buf, u32 desired_read_len, u32 offset, char *obj_name, struct efc_dma *dma) { struct sli4_rqst_cmn_read_object *rd_obj = NULL; struct sli4_bde *bde; rd_obj = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(cmn_read_object) + sizeof(*bde), NULL); if (!rd_obj) return -EIO; sli_cmd_fill_hdr(&rd_obj->hdr, SLI4_CMN_READ_OBJECT, SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN_VAR(cmn_read_object, sizeof(*bde))); rd_obj->desired_read_length_dword = cpu_to_le32(desired_read_len & SLI4_REQ_DESIRE_READLEN); rd_obj->read_offset = cpu_to_le32(offset); strncpy(rd_obj->object_name, obj_name, sizeof(rd_obj->object_name) - 1); rd_obj->host_buffer_descriptor_count = cpu_to_le32(1); bde = (struct sli4_bde *)rd_obj->host_buffer_descriptor; /* Setup to transfer xfer_size bytes to device */ bde->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | (desired_read_len & SLI4_BDE_LEN_MASK)); if (dma) { bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys)); bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys)); } else { bde->u.data.low = 0; bde->u.data.high = 0; } return 0; } int sli_cmd_dmtf_exec_clp_cmd(struct sli4 *sli4, void *buf, struct efc_dma *cmd, struct efc_dma *resp) { struct sli4_rqst_dmtf_exec_clp_cmd *clp_cmd = NULL; clp_cmd = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(dmtf_exec_clp_cmd), NULL); if (!clp_cmd) return -EIO; sli_cmd_fill_hdr(&clp_cmd->hdr, DMTF_EXEC_CLP_CMD, SLI4_SUBSYSTEM_DMTF, CMD_V0, SLI4_RQST_PYLD_LEN(dmtf_exec_clp_cmd)); clp_cmd->cmd_buf_length = cpu_to_le32(cmd->size); clp_cmd->cmd_buf_addr_low = cpu_to_le32(lower_32_bits(cmd->phys)); clp_cmd->cmd_buf_addr_high = cpu_to_le32(upper_32_bits(cmd->phys)); clp_cmd->resp_buf_length = cpu_to_le32(resp->size); clp_cmd->resp_buf_addr_low = cpu_to_le32(lower_32_bits(resp->phys)); clp_cmd->resp_buf_addr_high = cpu_to_le32(upper_32_bits(resp->phys)); return 0; } int sli_cmd_common_set_dump_location(struct sli4 *sli4, void *buf, bool query, bool is_buffer_list, struct efc_dma *buffer, u8 fdb) { struct sli4_rqst_cmn_set_dump_location *set_dump_loc = NULL; u32 buffer_length_flag = 0; set_dump_loc = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(cmn_set_dump_location), NULL); if (!set_dump_loc) return -EIO; sli_cmd_fill_hdr(&set_dump_loc->hdr, SLI4_CMN_SET_DUMP_LOCATION, SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN(cmn_set_dump_location)); if (is_buffer_list) buffer_length_flag |= SLI4_CMN_SET_DUMP_BLP; if (query) buffer_length_flag |= SLI4_CMN_SET_DUMP_QRY; if (fdb) buffer_length_flag |= SLI4_CMN_SET_DUMP_FDB; if (buffer) { set_dump_loc->buf_addr_low = cpu_to_le32(lower_32_bits(buffer->phys)); set_dump_loc->buf_addr_high = cpu_to_le32(upper_32_bits(buffer->phys)); buffer_length_flag |= buffer->len & SLI4_CMN_SET_DUMP_BUFFER_LEN; } else { set_dump_loc->buf_addr_low = 0; set_dump_loc->buf_addr_high = 0; set_dump_loc->buffer_length_dword = 0; } set_dump_loc->buffer_length_dword = cpu_to_le32(buffer_length_flag); return 0; } int sli_cmd_common_set_features(struct sli4 *sli4, void *buf, u32 feature, u32 param_len, void *parameter) { struct sli4_rqst_cmn_set_features *cmd = NULL; cmd = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(cmn_set_features), NULL); if (!cmd) return -EIO; sli_cmd_fill_hdr(&cmd->hdr, SLI4_CMN_SET_FEATURES, SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN(cmn_set_features)); cmd->feature = cpu_to_le32(feature); cmd->param_len = cpu_to_le32(param_len); memcpy(cmd->params, parameter, param_len); return 0; } int sli_cqe_mq(struct sli4 *sli4, void *buf) { struct sli4_mcqe *mcqe = buf; u32 dwflags = le32_to_cpu(mcqe->dw3_flags); /* * Firmware can split mbx completions into two MCQEs: first with only * the "consumed" bit set and a second with the "complete" bit set. * Thus, ignore MCQE unless "complete" is set. */ if (!(dwflags & SLI4_MCQE_COMPLETED)) return SLI4_MCQE_STATUS_NOT_COMPLETED; if (le16_to_cpu(mcqe->completion_status)) { efc_log_info(sli4, "status(st=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n", le16_to_cpu(mcqe->completion_status), le16_to_cpu(mcqe->extended_status), (dwflags & SLI4_MCQE_CONSUMED), (dwflags & SLI4_MCQE_COMPLETED), (dwflags & SLI4_MCQE_AE), (dwflags & SLI4_MCQE_VALID)); } return le16_to_cpu(mcqe->completion_status); } int sli_cqe_async(struct sli4 *sli4, void *buf) { struct sli4_acqe *acqe = buf; int rc = -EIO; if (!buf) { efc_log_err(sli4, "bad parameter sli4=%p buf=%p\n", sli4, buf); return -EIO; } switch (acqe->event_code) { case SLI4_ACQE_EVENT_CODE_LINK_STATE: efc_log_info(sli4, "Unsupported by FC link, evt code:%#x\n", acqe->event_code); break; case SLI4_ACQE_EVENT_CODE_GRP_5: efc_log_info(sli4, "ACQE GRP5\n"); break; case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT: efc_log_info(sli4, "ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n", acqe->event_type, le32_to_cpu(acqe->event_data[0]), le32_to_cpu(acqe->event_data[1])); break; case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT: rc = sli_fc_process_link_attention(sli4, buf); break; default: efc_log_info(sli4, "ACQE unknown=%#x\n", acqe->event_code); } return rc; } bool sli_fw_ready(struct sli4 *sli4) { u32 val; /* Determine if the chip FW is in a ready state */ val = sli_reg_read_status(sli4); return (val & SLI4_PORT_STATUS_RDY) ? 1 : 0; } static bool sli_wait_for_fw_ready(struct sli4 *sli4, u32 timeout_ms) { unsigned long end; end = jiffies + msecs_to_jiffies(timeout_ms); do { if (sli_fw_ready(sli4)) return true; usleep_range(1000, 2000); } while (time_before(jiffies, end)); return false; } static bool sli_sliport_reset(struct sli4 *sli4) { bool rc; u32 val; val = SLI4_PORT_CTRL_IP; /* Initialize port, endian */ writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG)); rc = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC); if (!rc) efc_log_crit(sli4, "port failed to become ready after initialization\n"); return rc; } static bool sli_fw_init(struct sli4 *sli4) { /* * Is firmware ready for operation? */ if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) { efc_log_crit(sli4, "FW status is NOT ready\n"); return false; } /* * Reset port to a known state */ return sli_sliport_reset(sli4); } static int sli_request_features(struct sli4 *sli4, u32 *features, bool query) { struct sli4_cmd_request_features *req_features = sli4->bmbx.virt; if (sli_cmd_request_features(sli4, sli4->bmbx.virt, *features, query)) { efc_log_err(sli4, "bad REQUEST_FEATURES write\n"); return -EIO; } if (sli_bmbx_command(sli4)) { efc_log_crit(sli4, "bootstrap mailbox write fail\n"); return -EIO; } if (le16_to_cpu(req_features->hdr.status)) { efc_log_err(sli4, "REQUEST_FEATURES bad status %#x\n", le16_to_cpu(req_features->hdr.status)); return -EIO; } *features = le32_to_cpu(req_features->resp); return 0; } void sli_calc_max_qentries(struct sli4 *sli4) { enum sli4_qtype q; u32 qentries; for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) { sli4->qinfo.max_qentries[q] = sli_convert_mask_to_count(sli4->qinfo.count_method[q], sli4->qinfo.count_mask[q]); } /* single, contiguous DMA allocations will be called for each queue * of size (max_qentries * queue entry size); since these can be large, * check against the OS max DMA allocation size */ for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) { qentries = sli4->qinfo.max_qentries[q]; efc_log_info(sli4, "[%s]: max_qentries from %d to %d\n", SLI4_QNAME[q], sli4->qinfo.max_qentries[q], qentries); sli4->qinfo.max_qentries[q] = qentries; } } static int sli_get_read_config(struct sli4 *sli4) { struct sli4_rsp_read_config *conf = sli4->bmbx.virt; u32 i, total; u32 *base; if (sli_cmd_read_config(sli4, sli4->bmbx.virt)) { efc_log_err(sli4, "bad READ_CONFIG write\n"); return -EIO; } if (sli_bmbx_command(sli4)) { efc_log_crit(sli4, "bootstrap mailbox fail (READ_CONFIG)\n"); return -EIO; } if (le16_to_cpu(conf->hdr.status)) { efc_log_err(sli4, "READ_CONFIG bad status %#x\n", le16_to_cpu(conf->hdr.status)); return -EIO; } sli4->params.has_extents = le32_to_cpu(conf->ext_dword) & SLI4_READ_CFG_RESP_RESOURCE_EXT; if (sli4->params.has_extents) { efc_log_err(sli4, "extents not supported\n"); return -EIO; } base = sli4->ext[0].base; if (!base) { int size = SLI4_RSRC_MAX * sizeof(u32); base = kzalloc(size, GFP_KERNEL); if (!base) return -EIO; } for (i = 0; i < SLI4_RSRC_MAX; i++) { sli4->ext[i].number = 1; sli4->ext[i].n_alloc = 0; sli4->ext[i].base = &base[i]; } sli4->ext[SLI4_RSRC_VFI].base[0] = le16_to_cpu(conf->vfi_base); sli4->ext[SLI4_RSRC_VFI].size = le16_to_cpu(conf->vfi_count); sli4->ext[SLI4_RSRC_VPI].base[0] = le16_to_cpu(conf->vpi_base); sli4->ext[SLI4_RSRC_VPI].size = le16_to_cpu(conf->vpi_count); sli4->ext[SLI4_RSRC_RPI].base[0] = le16_to_cpu(conf->rpi_base); sli4->ext[SLI4_RSRC_RPI].size = le16_to_cpu(conf->rpi_count); sli4->ext[SLI4_RSRC_XRI].base[0] = le16_to_cpu(conf->xri_base); sli4->ext[SLI4_RSRC_XRI].size = le16_to_cpu(conf->xri_count); sli4->ext[SLI4_RSRC_FCFI].base[0] = 0; sli4->ext[SLI4_RSRC_FCFI].size = le16_to_cpu(conf->fcfi_count); for (i = 0; i < SLI4_RSRC_MAX; i++) { total = sli4->ext[i].number * sli4->ext[i].size; sli4->ext[i].use_map = bitmap_zalloc(total, GFP_KERNEL); if (!sli4->ext[i].use_map) { efc_log_err(sli4, "bitmap memory allocation failed %d\n", i); return -EIO; } sli4->ext[i].map_size = total; } sli4->topology = (le32_to_cpu(conf->topology_dword) & SLI4_READ_CFG_RESP_TOPOLOGY) >> 24; switch (sli4->topology) { case SLI4_READ_CFG_TOPO_FC: efc_log_info(sli4, "FC (unknown)\n"); break; case SLI4_READ_CFG_TOPO_NON_FC_AL: efc_log_info(sli4, "FC (direct attach)\n"); break; case SLI4_READ_CFG_TOPO_FC_AL: efc_log_info(sli4, "FC (arbitrated loop)\n"); break; default: efc_log_info(sli4, "bad topology %#x\n", sli4->topology); } sli4->e_d_tov = le16_to_cpu(conf->e_d_tov); sli4->r_a_tov = le16_to_cpu(conf->r_a_tov); sli4->link_module_type = le16_to_cpu(conf->lmt); sli4->qinfo.max_qcount[SLI4_QTYPE_EQ] = le16_to_cpu(conf->eq_count); sli4->qinfo.max_qcount[SLI4_QTYPE_CQ] = le16_to_cpu(conf->cq_count); sli4->qinfo.max_qcount[SLI4_QTYPE_WQ] = le16_to_cpu(conf->wq_count); sli4->qinfo.max_qcount[SLI4_QTYPE_RQ] = le16_to_cpu(conf->rq_count); /* * READ_CONFIG doesn't give the max number of MQ. Applications * will typically want 1, but we may need another at some future * date. Dummy up a "max" MQ count here. */ sli4->qinfo.max_qcount[SLI4_QTYPE_MQ] = SLI4_USER_MQ_COUNT; return 0; } static int sli_get_sli4_parameters(struct sli4 *sli4) { struct sli4_rsp_cmn_get_sli4_params *parms; u32 dw_loopback; u32 dw_eq_pg_cnt; u32 dw_cq_pg_cnt; u32 dw_mq_pg_cnt; u32 dw_wq_pg_cnt; u32 dw_rq_pg_cnt; u32 dw_sgl_pg_cnt; if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt)) return -EIO; parms = (struct sli4_rsp_cmn_get_sli4_params *) (((u8 *)sli4->bmbx.virt) + offsetof(struct sli4_cmd_sli_config, payload.embed)); if (sli_bmbx_command(sli4)) { efc_log_crit(sli4, "bootstrap mailbox write fail\n"); return -EIO; } if (parms->hdr.status) { efc_log_err(sli4, "COMMON_GET_SLI4_PARAMETERS bad status %#x", parms->hdr.status); efc_log_err(sli4, "additional status %#x\n", parms->hdr.additional_status); return -EIO; } dw_loopback = le32_to_cpu(parms->dw16_loopback_scope); dw_eq_pg_cnt = le32_to_cpu(parms->dw6_eq_page_cnt); dw_cq_pg_cnt = le32_to_cpu(parms->dw8_cq_page_cnt); dw_mq_pg_cnt = le32_to_cpu(parms->dw10_mq_page_cnt); dw_wq_pg_cnt = le32_to_cpu(parms->dw12_wq_page_cnt); dw_rq_pg_cnt = le32_to_cpu(parms->dw14_rq_page_cnt); sli4->params.auto_reg = (dw_loopback & SLI4_PARAM_AREG); sli4->params.auto_xfer_rdy = (dw_loopback & SLI4_PARAM_AGXF); sli4->params.hdr_template_req = (dw_loopback & SLI4_PARAM_HDRR); sli4->params.t10_dif_inline_capable = (dw_loopback & SLI4_PARAM_TIMM); sli4->params.t10_dif_separate_capable = (dw_loopback & SLI4_PARAM_TSMM); sli4->params.mq_create_version = GET_Q_CREATE_VERSION(dw_mq_pg_cnt); sli4->params.cq_create_version = GET_Q_CREATE_VERSION(dw_cq_pg_cnt); sli4->rq_min_buf_size = le16_to_cpu(parms->min_rq_buffer_size); sli4->rq_max_buf_size = le32_to_cpu(parms->max_rq_buffer_size); sli4->qinfo.qpage_count[SLI4_QTYPE_EQ] = (dw_eq_pg_cnt & SLI4_PARAM_EQ_PAGE_CNT_MASK); sli4->qinfo.qpage_count[SLI4_QTYPE_CQ] = (dw_cq_pg_cnt & SLI4_PARAM_CQ_PAGE_CNT_MASK); sli4->qinfo.qpage_count[SLI4_QTYPE_MQ] = (dw_mq_pg_cnt & SLI4_PARAM_MQ_PAGE_CNT_MASK); sli4->qinfo.qpage_count[SLI4_QTYPE_WQ] = (dw_wq_pg_cnt & SLI4_PARAM_WQ_PAGE_CNT_MASK); sli4->qinfo.qpage_count[SLI4_QTYPE_RQ] = (dw_rq_pg_cnt & SLI4_PARAM_RQ_PAGE_CNT_MASK); /* save count methods and masks for each queue type */ sli4->qinfo.count_mask[SLI4_QTYPE_EQ] = le16_to_cpu(parms->eqe_count_mask); sli4->qinfo.count_method[SLI4_QTYPE_EQ] = GET_Q_CNT_METHOD(dw_eq_pg_cnt); sli4->qinfo.count_mask[SLI4_QTYPE_CQ] = le16_to_cpu(parms->cqe_count_mask); sli4->qinfo.count_method[SLI4_QTYPE_CQ] = GET_Q_CNT_METHOD(dw_cq_pg_cnt); sli4->qinfo.count_mask[SLI4_QTYPE_MQ] = le16_to_cpu(parms->mqe_count_mask); sli4->qinfo.count_method[SLI4_QTYPE_MQ] = GET_Q_CNT_METHOD(dw_mq_pg_cnt); sli4->qinfo.count_mask[SLI4_QTYPE_WQ] = le16_to_cpu(parms->wqe_count_mask); sli4->qinfo.count_method[SLI4_QTYPE_WQ] = GET_Q_CNT_METHOD(dw_wq_pg_cnt); sli4->qinfo.count_mask[SLI4_QTYPE_RQ] = le16_to_cpu(parms->rqe_count_mask); sli4->qinfo.count_method[SLI4_QTYPE_RQ] = GET_Q_CNT_METHOD(dw_rq_pg_cnt); /* now calculate max queue entries */ sli_calc_max_qentries(sli4); dw_sgl_pg_cnt = le32_to_cpu(parms->dw18_sgl_page_cnt); /* max # of pages */ sli4->max_sgl_pages = (dw_sgl_pg_cnt & SLI4_PARAM_SGL_PAGE_CNT_MASK); /* bit map of available sizes */ sli4->sgl_page_sizes = (dw_sgl_pg_cnt & SLI4_PARAM_SGL_PAGE_SZS_MASK) >> 8; /* ignore HLM here. Use value from REQUEST_FEATURES */ sli4->sge_supported_length = le32_to_cpu(parms->sge_supported_length); sli4->params.sgl_pre_reg_required = (dw_loopback & SLI4_PARAM_SGLR); /* default to using pre-registered SGL's */ sli4->params.sgl_pre_registered = true; sli4->params.perf_hint = dw_loopback & SLI4_PARAM_PHON; sli4->params.perf_wq_id_association = (dw_loopback & SLI4_PARAM_PHWQ); sli4->rq_batch = (le16_to_cpu(parms->dw15w1_rq_db_window) & SLI4_PARAM_RQ_DB_WINDOW_MASK) >> 12; /* Use the highest available WQE size. */ if (((dw_wq_pg_cnt & SLI4_PARAM_WQE_SZS_MASK) >> 8) & SLI4_128BYTE_WQE_SUPPORT) sli4->wqe_size = SLI4_WQE_EXT_BYTES; else sli4->wqe_size = SLI4_WQE_BYTES; return 0; } static int sli_get_ctrl_attributes(struct sli4 *sli4) { struct sli4_rsp_cmn_get_cntl_attributes *attr; struct sli4_rsp_cmn_get_cntl_addl_attributes *add_attr; struct efc_dma data; u32 psize; /* * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily * uses VPD DMA buffer as the response won't fit in the embedded * buffer. */ memset(sli4->vpd_data.virt, 0, sli4->vpd_data.size); if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt, &sli4->vpd_data)) { efc_log_err(sli4, "bad COMMON_GET_CNTL_ATTRIBUTES write\n"); return -EIO; } attr = sli4->vpd_data.virt; if (sli_bmbx_command(sli4)) { efc_log_crit(sli4, "bootstrap mailbox write fail\n"); return -EIO; } if (attr->hdr.status) { efc_log_err(sli4, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x", attr->hdr.status); efc_log_err(sli4, "additional status %#x\n", attr->hdr.additional_status); return -EIO; } sli4->port_number = attr->port_num_type_flags & SLI4_CNTL_ATTR_PORTNUM; memcpy(sli4->bios_version_string, attr->bios_version_str, sizeof(sli4->bios_version_string)); /* get additional attributes */ psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes); data.size = psize; data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size, &data.phys, GFP_KERNEL); if (!data.virt) { memset(&data, 0, sizeof(struct efc_dma)); efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n"); return -EIO; } if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt, &data)) { efc_log_err(sli4, "bad GET_CNTL_ADDL_ATTR write\n"); dma_free_coherent(&sli4->pci->dev, data.size, data.virt, data.phys); return -EIO; } if (sli_bmbx_command(sli4)) { efc_log_crit(sli4, "mailbox fail (GET_CNTL_ADDL_ATTR)\n"); dma_free_coherent(&sli4->pci->dev, data.size, data.virt, data.phys); return -EIO; } add_attr = data.virt; if (add_attr->hdr.status) { efc_log_err(sli4, "GET_CNTL_ADDL_ATTR bad status %#x\n", add_attr->hdr.status); dma_free_coherent(&sli4->pci->dev, data.size, data.virt, data.phys); return -EIO; } memcpy(sli4->ipl_name, add_attr->ipl_file_name, sizeof(sli4->ipl_name)); efc_log_info(sli4, "IPL:%s\n", (char *)sli4->ipl_name); dma_free_coherent(&sli4->pci->dev, data.size, data.virt, data.phys); memset(&data, 0, sizeof(struct efc_dma)); return 0; } static int sli_get_fw_rev(struct sli4 *sli4) { struct sli4_cmd_read_rev *read_rev = sli4->bmbx.virt; if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, &sli4->vpd_data)) return -EIO; if (sli_bmbx_command(sli4)) { efc_log_crit(sli4, "bootstrap mailbox write fail (READ_REV)\n"); return -EIO; } if (le16_to_cpu(read_rev->hdr.status)) { efc_log_err(sli4, "READ_REV bad status %#x\n", le16_to_cpu(read_rev->hdr.status)); return -EIO; } sli4->fw_rev[0] = le32_to_cpu(read_rev->first_fw_id); memcpy(sli4->fw_name[0], read_rev->first_fw_name, sizeof(sli4->fw_name[0])); sli4->fw_rev[1] = le32_to_cpu(read_rev->second_fw_id); memcpy(sli4->fw_name[1], read_rev->second_fw_name, sizeof(sli4->fw_name[1])); sli4->hw_rev[0] = le32_to_cpu(read_rev->first_hw_rev); sli4->hw_rev[1] = le32_to_cpu(read_rev->second_hw_rev); sli4->hw_rev[2] = le32_to_cpu(read_rev->third_hw_rev); efc_log_info(sli4, "FW1:%s (%08x) / FW2:%s (%08x)\n", read_rev->first_fw_name, le32_to_cpu(read_rev->first_fw_id), read_rev->second_fw_name, le32_to_cpu(read_rev->second_fw_id)); efc_log_info(sli4, "HW1: %08x / HW2: %08x\n", le32_to_cpu(read_rev->first_hw_rev), le32_to_cpu(read_rev->second_hw_rev)); /* Check that all VPD data was returned */ if (le32_to_cpu(read_rev->returned_vpd_length) != le32_to_cpu(read_rev->actual_vpd_length)) { efc_log_info(sli4, "VPD length: avail=%d return=%d actual=%d\n", le32_to_cpu(read_rev->available_length_dword) & SLI4_READ_REV_AVAILABLE_LENGTH, le32_to_cpu(read_rev->returned_vpd_length), le32_to_cpu(read_rev->actual_vpd_length)); } sli4->vpd_length = le32_to_cpu(read_rev->returned_vpd_length); return 0; } static int sli_get_config(struct sli4 *sli4) { struct sli4_rsp_cmn_get_port_name *port_name; struct sli4_cmd_read_nvparms *read_nvparms; /* * Read the device configuration */ if (sli_get_read_config(sli4)) return -EIO; if (sli_get_sli4_parameters(sli4)) return -EIO; if (sli_get_ctrl_attributes(sli4)) return -EIO; if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt)) return -EIO; port_name = (struct sli4_rsp_cmn_get_port_name *) (((u8 *)sli4->bmbx.virt) + offsetof(struct sli4_cmd_sli_config, payload.embed)); if (sli_bmbx_command(sli4)) { efc_log_crit(sli4, "bootstrap mailbox fail (GET_PORT_NAME)\n"); return -EIO; } sli4->port_name[0] = port_name->port_name[sli4->port_number]; sli4->port_name[1] = '\0'; if (sli_get_fw_rev(sli4)) return -EIO; if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt)) { efc_log_err(sli4, "bad READ_NVPARMS write\n"); return -EIO; } if (sli_bmbx_command(sli4)) { efc_log_crit(sli4, "bootstrap mailbox fail (READ_NVPARMS)\n"); return -EIO; } read_nvparms = sli4->bmbx.virt; if (le16_to_cpu(read_nvparms->hdr.status)) { efc_log_err(sli4, "READ_NVPARMS bad status %#x\n", le16_to_cpu(read_nvparms->hdr.status)); return -EIO; } memcpy(sli4->wwpn, read_nvparms->wwpn, sizeof(sli4->wwpn)); memcpy(sli4->wwnn, read_nvparms->wwnn, sizeof(sli4->wwnn)); efc_log_info(sli4, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", sli4->wwpn[0], sli4->wwpn[1], sli4->wwpn[2], sli4->wwpn[3], sli4->wwpn[4], sli4->wwpn[5], sli4->wwpn[6], sli4->wwpn[7]); efc_log_info(sli4, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", sli4->wwnn[0], sli4->wwnn[1], sli4->wwnn[2], sli4->wwnn[3], sli4->wwnn[4], sli4->wwnn[5], sli4->wwnn[6], sli4->wwnn[7]); return 0; } int sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev, void __iomem *reg[]) { u32 intf = U32_MAX; u32 pci_class_rev = 0; u32 rev_id = 0; u32 family = 0; u32 asic_id = 0; u32 i; struct sli4_asic_entry_t *asic; memset(sli4, 0, sizeof(struct sli4)); sli4->os = os; sli4->pci = pdev; for (i = 0; i < 6; i++) sli4->reg[i] = reg[i]; /* * Read the SLI_INTF register to discover the register layout * and other capability information */ if (pci_read_config_dword(pdev, SLI4_INTF_REG, &intf)) return -EIO; if ((intf & SLI4_INTF_VALID_MASK) != (u32)SLI4_INTF_VALID_VALUE) { efc_log_err(sli4, "SLI_INTF is not valid\n"); return -EIO; } /* driver only support SLI-4 */ if ((intf & SLI4_INTF_REV_MASK) != SLI4_INTF_REV_S4) { efc_log_err(sli4, "Unsupported SLI revision (intf=%#x)\n", intf); return -EIO; } sli4->sli_family = intf & SLI4_INTF_FAMILY_MASK; sli4->if_type = intf & SLI4_INTF_IF_TYPE_MASK; efc_log_info(sli4, "status=%#x error1=%#x error2=%#x\n", sli_reg_read_status(sli4), sli_reg_read_err1(sli4), sli_reg_read_err2(sli4)); /* * set the ASIC type and revision */ if (pci_read_config_dword(pdev, PCI_CLASS_REVISION, &pci_class_rev)) return -EIO; rev_id = pci_class_rev & 0xff; family = sli4->sli_family; if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) { if (!pci_read_config_dword(pdev, SLI4_ASIC_ID_REG, &asic_id)) family = asic_id & SLI4_ASIC_GEN_MASK; } for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table); i++, asic++) { if (rev_id == asic->rev_id && family == asic->family) { sli4->asic_type = family; sli4->asic_rev = rev_id; break; } } /* Fail if no matching asic type/rev was found */ if (!sli4->asic_type) { efc_log_err(sli4, "no matching asic family/rev found: %02x/%02x\n", family, rev_id); return -EIO; } /* * The bootstrap mailbox is equivalent to a MQ with a single 256 byte * entry, a CQ with a single 16 byte entry, and no event queue. * Alignment must be 16 bytes as the low order address bits in the * address register are also control / status. */ sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe); sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size, &sli4->bmbx.phys, GFP_KERNEL); if (!sli4->bmbx.virt) { memset(&sli4->bmbx, 0, sizeof(struct efc_dma)); efc_log_err(sli4, "bootstrap mailbox allocation failed\n"); return -EIO; } if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) { efc_log_err(sli4, "bad alignment for bootstrap mailbox\n"); return -EIO; } efc_log_info(sli4, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt, upper_32_bits(sli4->bmbx.phys), lower_32_bits(sli4->bmbx.phys), sli4->bmbx.size); /* 4096 is arbitrary. What should this value actually be? */ sli4->vpd_data.size = 4096; sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev, sli4->vpd_data.size, &sli4->vpd_data.phys, GFP_KERNEL); if (!sli4->vpd_data.virt) { memset(&sli4->vpd_data, 0, sizeof(struct efc_dma)); /* Note that failure isn't fatal in this specific case */ efc_log_info(sli4, "VPD buffer allocation failed\n"); } if (!sli_fw_init(sli4)) { efc_log_err(sli4, "FW initialization failed\n"); return -EIO; } /* * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true * in addition to any other desired features */ sli4->features = (SLI4_REQFEAT_IAAB | SLI4_REQFEAT_NPIV | SLI4_REQFEAT_DIF | SLI4_REQFEAT_VF | SLI4_REQFEAT_FCPC | SLI4_REQFEAT_IAAR | SLI4_REQFEAT_HLM | SLI4_REQFEAT_PERFH | SLI4_REQFEAT_RXSEQ | SLI4_REQFEAT_RXRI | SLI4_REQFEAT_MRQP); /* use performance hints if available */ if (sli4->params.perf_hint) sli4->features |= SLI4_REQFEAT_PERFH; if (sli_request_features(sli4, &sli4->features, true)) return -EIO; if (sli_get_config(sli4)) return -EIO; return 0; } int sli_init(struct sli4 *sli4) { if (sli4->params.has_extents) { efc_log_info(sli4, "extend allocation not supported\n"); return -EIO; } sli4->features &= (~SLI4_REQFEAT_HLM); sli4->features &= (~SLI4_REQFEAT_RXSEQ); sli4->features &= (~SLI4_REQFEAT_RXRI); if (sli_request_features(sli4, &sli4->features, false)) return -EIO; return 0; } int sli_reset(struct sli4 *sli4) { u32 i; if (!sli_fw_init(sli4)) { efc_log_crit(sli4, "FW initialization failed\n"); return -EIO; } kfree(sli4->ext[0].base); sli4->ext[0].base = NULL; for (i = 0; i < SLI4_RSRC_MAX; i++) { bitmap_free(sli4->ext[i].use_map); sli4->ext[i].use_map = NULL; sli4->ext[i].base = NULL; } return sli_get_config(sli4); } int sli_fw_reset(struct sli4 *sli4) { /* * Firmware must be ready before issuing the reset. */ if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) { efc_log_crit(sli4, "FW status is NOT ready\n"); return -EIO; } /* Lancer uses PHYDEV_CONTROL */ writel(SLI4_PHYDEV_CTRL_FRST, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG)); /* wait for the FW to become ready after the reset */ if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) { efc_log_crit(sli4, "Failed to be ready after firmware reset\n"); return -EIO; } return 0; } void sli_teardown(struct sli4 *sli4) { u32 i; kfree(sli4->ext[0].base); sli4->ext[0].base = NULL; for (i = 0; i < SLI4_RSRC_MAX; i++) { sli4->ext[i].base = NULL; bitmap_free(sli4->ext[i].use_map); sli4->ext[i].use_map = NULL; } if (!sli_sliport_reset(sli4)) efc_log_err(sli4, "FW deinitialization failed\n"); dma_free_coherent(&sli4->pci->dev, sli4->vpd_data.size, sli4->vpd_data.virt, sli4->vpd_data.phys); memset(&sli4->vpd_data, 0, sizeof(struct efc_dma)); dma_free_coherent(&sli4->pci->dev, sli4->bmbx.size, sli4->bmbx.virt, sli4->bmbx.phys); memset(&sli4->bmbx, 0, sizeof(struct efc_dma)); } int sli_callback(struct sli4 *sli4, enum sli4_callback which, void *func, void *arg) { if (!func) { efc_log_err(sli4, "bad parameter sli4=%p which=%#x func=%p\n", sli4, which, func); return -EIO; } switch (which) { case SLI4_CB_LINK: sli4->link = func; sli4->link_arg = arg; break; default: efc_log_info(sli4, "unknown callback %#x\n", which); return -EIO; } return 0; } int sli_eq_modify_delay(struct sli4 *sli4, struct sli4_queue *eq, u32 num_eq, u32 shift, u32 delay_mult) { sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, eq, num_eq, shift, delay_mult); if (sli_bmbx_command(sli4)) { efc_log_crit(sli4, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n"); return -EIO; } if (sli_res_sli_config(sli4, sli4->bmbx.virt)) { efc_log_err(sli4, "bad status MODIFY EQ DELAY\n"); return -EIO; } return 0; } int sli_resource_alloc(struct sli4 *sli4, enum sli4_resource rtype, u32 *rid, u32 *index) { int rc = 0; u32 size; u32 ext_idx; u32 item_idx; u32 position; *rid = U32_MAX; *index = U32_MAX; switch (rtype) { case SLI4_RSRC_VFI: case SLI4_RSRC_VPI: case SLI4_RSRC_RPI: case SLI4_RSRC_XRI: position = find_first_zero_bit(sli4->ext[rtype].use_map, sli4->ext[rtype].map_size); if (position >= sli4->ext[rtype].map_size) { efc_log_err(sli4, "out of resource %d (alloc=%d)\n", rtype, sli4->ext[rtype].n_alloc); rc = -EIO; break; } set_bit(position, sli4->ext[rtype].use_map); *index = position; size = sli4->ext[rtype].size; ext_idx = *index / size; item_idx = *index % size; *rid = sli4->ext[rtype].base[ext_idx] + item_idx; sli4->ext[rtype].n_alloc++; break; default: rc = -EIO; } return rc; } int sli_resource_free(struct sli4 *sli4, enum sli4_resource rtype, u32 rid) { int rc = -EIO; u32 x; u32 size, *base; switch (rtype) { case SLI4_RSRC_VFI: case SLI4_RSRC_VPI: case SLI4_RSRC_RPI: case SLI4_RSRC_XRI: /* * Figure out which extent contains the resource ID. I.e. find * the extent such that * extent->base <= resource ID < extent->base + extent->size */ base = sli4->ext[rtype].base; size = sli4->ext[rtype].size; /* * In the case of FW reset, this may be cleared * but the force_free path will still attempt to * free the resource. Prevent a NULL pointer access. */ if (!base) break; for (x = 0; x < sli4->ext[rtype].number; x++) { if ((rid < base[x] || (rid >= (base[x] + size)))) continue; rid -= base[x]; clear_bit((x * size) + rid, sli4->ext[rtype].use_map); rc = 0; break; } break; default: break; } return rc; } int sli_resource_reset(struct sli4 *sli4, enum sli4_resource rtype) { int rc = -EIO; u32 i; switch (rtype) { case SLI4_RSRC_VFI: case SLI4_RSRC_VPI: case SLI4_RSRC_RPI: case SLI4_RSRC_XRI: for (i = 0; i < sli4->ext[rtype].map_size; i++) clear_bit(i, sli4->ext[rtype].use_map); rc = 0; break; default: break; } return rc; } int sli_raise_ue(struct sli4 *sli4, u8 dump) { u32 val = 0; if (dump == SLI4_FUNC_DESC_DUMP) { val = SLI4_PORT_CTRL_FDD | SLI4_PORT_CTRL_IP; writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG)); } else { val = SLI4_PHYDEV_CTRL_FRST; if (dump == SLI4_CHIP_LEVEL_DUMP) val |= SLI4_PHYDEV_CTRL_DD; writel(val, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG)); } return 0; } int sli_dump_is_ready(struct sli4 *sli4) { int rc = SLI4_DUMP_READY_STATUS_NOT_READY; u32 port_val; u32 bmbx_val; /* * Ensure that the port is ready AND the mailbox is * ready before signaling that the dump is ready to go. */ port_val = sli_reg_read_status(sli4); bmbx_val = readl(sli4->reg[0] + SLI4_BMBX_REG); if ((bmbx_val & SLI4_BMBX_RDY) && (port_val & SLI4_PORT_STATUS_RDY)) { if (port_val & SLI4_PORT_STATUS_DIP) rc = SLI4_DUMP_READY_STATUS_DD_PRESENT; else if (port_val & SLI4_PORT_STATUS_FDP) rc = SLI4_DUMP_READY_STATUS_FDB_PRESENT; } return rc; } bool sli_reset_required(struct sli4 *sli4) { u32 val; val = sli_reg_read_status(sli4); return (val & SLI4_PORT_STATUS_RN); } int sli_cmd_post_sgl_pages(struct sli4 *sli4, void *buf, u16 xri, u32 xri_count, struct efc_dma *page0[], struct efc_dma *page1[], struct efc_dma *dma) { struct sli4_rqst_post_sgl_pages *post = NULL; u32 i; __le32 req_len; post = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(post_sgl_pages), dma); if (!post) return -EIO; /* payload size calculation */ /* 4 = xri_start + xri_count */ /* xri_count = # of XRI's registered */ /* sizeof(uint64_t) = physical address size */ /* 2 = # of physical addresses per page set */ req_len = cpu_to_le32(4 + (xri_count * (sizeof(uint64_t) * 2))); sli_cmd_fill_hdr(&post->hdr, SLI4_OPC_POST_SGL_PAGES, SLI4_SUBSYSTEM_FC, CMD_V0, req_len); post->xri_start = cpu_to_le16(xri); post->xri_count = cpu_to_le16(xri_count); for (i = 0; i < xri_count; i++) { post->page_set[i].page0_low = cpu_to_le32(lower_32_bits(page0[i]->phys)); post->page_set[i].page0_high = cpu_to_le32(upper_32_bits(page0[i]->phys)); } if (page1) { for (i = 0; i < xri_count; i++) { post->page_set[i].page1_low = cpu_to_le32(lower_32_bits(page1[i]->phys)); post->page_set[i].page1_high = cpu_to_le32(upper_32_bits(page1[i]->phys)); } } return 0; } int sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma, u16 rpi, struct efc_dma *payload_dma) { struct sli4_rqst_post_hdr_templates *req = NULL; uintptr_t phys = 0; u32 i = 0; u32 page_count, payload_size; page_count = sli_page_count(dma->size, SLI_PAGE_SIZE); payload_size = ((sizeof(struct sli4_rqst_post_hdr_templates) + (page_count * SZ_DMAADDR)) - sizeof(struct sli4_rqst_hdr)); if (page_count > 16) { /* * We can't fit more than 16 descriptors into an embedded mbox * command, it has to be non-embedded */ payload_dma->size = payload_size; payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev, payload_dma->size, &payload_dma->phys, GFP_KERNEL); if (!payload_dma->virt) { memset(payload_dma, 0, sizeof(struct efc_dma)); efc_log_err(sli4, "mbox payload memory allocation fail\n"); return -EIO; } req = sli_config_cmd_init(sli4, buf, payload_size, payload_dma); } else { req = sli_config_cmd_init(sli4, buf, payload_size, NULL); } if (!req) return -EIO; if (rpi == U16_MAX) rpi = sli4->ext[SLI4_RSRC_RPI].base[0]; sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_POST_HDR_TEMPLATES, SLI4_SUBSYSTEM_FC, CMD_V0, SLI4_RQST_PYLD_LEN(post_hdr_templates)); req->rpi_offset = cpu_to_le16(rpi); req->page_count = cpu_to_le16(page_count); phys = dma->phys; for (i = 0; i < page_count; i++) { req->page_descriptor[i].low = cpu_to_le32(lower_32_bits(phys)); req->page_descriptor[i].high = cpu_to_le32(upper_32_bits(phys)); phys += SLI_PAGE_SIZE; } return 0; } u32 sli_fc_get_rpi_requirements(struct sli4 *sli4, u32 n_rpi) { u32 bytes = 0; /* Check if header templates needed */ if (sli4->params.hdr_template_req) /* round up to a page */ bytes = round_up(n_rpi * SLI4_HDR_TEMPLATE_SIZE, SLI_PAGE_SIZE); return bytes; } const char * sli_fc_get_status_string(u32 status) { static struct { u32 code; const char *label; } lookup[] = { {SLI4_FC_WCQE_STATUS_SUCCESS, "SUCCESS"}, {SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE, "FCP_RSP_FAILURE"}, {SLI4_FC_WCQE_STATUS_REMOTE_STOP, "REMOTE_STOP"}, {SLI4_FC_WCQE_STATUS_LOCAL_REJECT, "LOCAL_REJECT"}, {SLI4_FC_WCQE_STATUS_NPORT_RJT, "NPORT_RJT"}, {SLI4_FC_WCQE_STATUS_FABRIC_RJT, "FABRIC_RJT"}, {SLI4_FC_WCQE_STATUS_NPORT_BSY, "NPORT_BSY"}, {SLI4_FC_WCQE_STATUS_FABRIC_BSY, "FABRIC_BSY"}, {SLI4_FC_WCQE_STATUS_LS_RJT, "LS_RJT"}, {SLI4_FC_WCQE_STATUS_CMD_REJECT, "CMD_REJECT"}, {SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK, "FCP_TGT_LENCHECK"}, {SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"}, {SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED, "RQ_INSUFF_BUF_NEEDED"}, {SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"}, {SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE, "RQ_DMA_FAILURE"}, {SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE, "FCP_RSP_TRUNCATE"}, {SLI4_FC_WCQE_STATUS_DI_ERROR, "DI_ERROR"}, {SLI4_FC_WCQE_STATUS_BA_RJT, "BA_RJT"}, {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED, "RQ_INSUFF_XRI_NEEDED"}, {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"}, {SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT, "RX_ERROR_DETECT"}, {SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST, "RX_ABORT_REQUEST"}, }; u32 i; for (i = 0; i < ARRAY_SIZE(lookup); i++) { if (status == lookup[i].code) return lookup[i].label; } return "unknown"; }
linux-master
drivers/scsi/elx/libefc_sli/sli4.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include <linux/vmalloc.h> #include <linux/delay.h> #include "qla_def.h" #include "qla_gbl.h" #define TIMEOUT_100_MS 100 static const uint32_t qla8044_reg_tbl[] = { QLA8044_PEG_HALT_STATUS1, QLA8044_PEG_HALT_STATUS2, QLA8044_PEG_ALIVE_COUNTER, QLA8044_CRB_DRV_ACTIVE, QLA8044_CRB_DEV_STATE, QLA8044_CRB_DRV_STATE, QLA8044_CRB_DRV_SCRATCH, QLA8044_CRB_DEV_PART_INFO1, QLA8044_CRB_IDC_VER_MAJOR, QLA8044_FW_VER_MAJOR, QLA8044_FW_VER_MINOR, QLA8044_FW_VER_SUB, QLA8044_CMDPEG_STATE, QLA8044_ASIC_TEMP, }; /* 8044 Flash Read/Write functions */ uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) { return readl((void __iomem *) (ha->nx_pcibase + addr)); } void qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val) { writel(val, (void __iomem *)((ha)->nx_pcibase + addr)); } int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg) { struct qla_hw_data *ha = vha->hw; if (crb_reg < CRB_REG_INDEX_MAX) return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]); else return QLA_FUNCTION_FAILED; } void qla8044_wr_direct(struct scsi_qla_host *vha, const uint32_t crb_reg, const uint32_t value) { struct qla_hw_data *ha = vha->hw; if (crb_reg < CRB_REG_INDEX_MAX) qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value); } static int qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr) { uint32_t val; int ret_val = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr); val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum)); if (val != addr) { ql_log(ql_log_warn, vha, 0xb087, "%s: Failed to set register window : " "addr written 0x%x, read 0x%x!\n", __func__, addr, val); ret_val = QLA_FUNCTION_FAILED; } return ret_val; } static int qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) { int ret_val = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; ret_val = qla8044_set_win_base(vha, addr); if (!ret_val) *data = qla8044_rd_reg(ha, QLA8044_WILDCARD); else ql_log(ql_log_warn, vha, 0xb088, "%s: failed read of addr 0x%x!\n", __func__, addr); return ret_val; } static int qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) { int ret_val = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; ret_val = qla8044_set_win_base(vha, addr); if (!ret_val) qla8044_wr_reg(ha, QLA8044_WILDCARD, data); else ql_log(ql_log_warn, vha, 0xb089, "%s: failed wrt to addr 0x%x, data 0x%x\n", __func__, addr, data); return ret_val; } /* * qla8044_read_write_crb_reg - Read from raddr and write value to waddr. * * @ha : Pointer to adapter structure * @raddr : CRB address to read from * @waddr : CRB address to write to * */ static void qla8044_read_write_crb_reg(struct scsi_qla_host *vha, uint32_t raddr, uint32_t waddr) { uint32_t value; qla8044_rd_reg_indirect(vha, raddr, &value); qla8044_wr_reg_indirect(vha, waddr, value); } static int qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1, uint32_t mask) { unsigned long timeout; uint32_t temp = 0; /* jiffies after 100ms */ timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); do { qla8044_rd_reg_indirect(vha, addr1, &temp); if ((temp & mask) != 0) break; if (time_after_eq(jiffies, timeout)) { ql_log(ql_log_warn, vha, 0xb151, "Error in processing rdmdio entry\n"); return -1; } } while (1); return 0; } static uint32_t qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha, uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr) { uint32_t temp; int ret = 0; ret = qla8044_poll_wait_for_ready(vha, addr1, mask); if (ret == -1) return -1; temp = (0x40000000 | addr); qla8044_wr_reg_indirect(vha, addr1, temp); ret = qla8044_poll_wait_for_ready(vha, addr1, mask); if (ret == -1) return 0; qla8044_rd_reg_indirect(vha, addr3, &ret); return ret; } static int qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha, uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask) { unsigned long timeout; uint32_t temp; /* jiffies after 100 msecs */ timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); do { temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2); if ((temp & 0x1) != 1) break; if (time_after_eq(jiffies, timeout)) { ql_log(ql_log_warn, vha, 0xb152, "Error in processing mdiobus idle\n"); return -1; } } while (1); return 0; } static int qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value) { int ret = 0; ret = qla8044_poll_wait_for_ready(vha, addr1, mask); if (ret == -1) return -1; qla8044_wr_reg_indirect(vha, addr3, value); qla8044_wr_reg_indirect(vha, addr1, addr); ret = qla8044_poll_wait_for_ready(vha, addr1, mask); if (ret == -1) return -1; return 0; } /* * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask, * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. * * @vha : Pointer to adapter structure * @raddr : CRB address to read from * @waddr : CRB address to write to * @p_rmw_hdr : header with shift/or/xor values. * */ static void qla8044_rmw_crb_reg(struct scsi_qla_host *vha, uint32_t raddr, uint32_t waddr, struct qla8044_rmw *p_rmw_hdr) { uint32_t value; if (p_rmw_hdr->index_a) value = vha->reset_tmplt.array[p_rmw_hdr->index_a]; else qla8044_rd_reg_indirect(vha, raddr, &value); value &= p_rmw_hdr->test_mask; value <<= p_rmw_hdr->shl; value >>= p_rmw_hdr->shr; value |= p_rmw_hdr->or_value; value ^= p_rmw_hdr->xor_value; qla8044_wr_reg_indirect(vha, waddr, value); return; } static inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha) { uint32_t qsnt_state; struct qla_hw_data *ha = vha->hw; qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); qsnt_state |= (1 << ha->portnum); qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state); ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n", __func__, vha->host_no, qsnt_state); } void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha) { uint32_t qsnt_state; struct qla_hw_data *ha = vha->hw; qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); qsnt_state &= ~(1 << ha->portnum); qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state); ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n", __func__, vha->host_no, qsnt_state); } /** * qla8044_lock_recovery - Recovers the idc_lock. * @vha : Pointer to adapter structure * * Lock Recovery Register * 5-2 Lock recovery owner: Function ID of driver doing lock recovery, * valid if bits 1..0 are set by driver doing lock recovery. * 1-0 1 - Driver intends to force unlock the IDC lock. * 2 - Driver is moving forward to unlock the IDC lock. Driver clears * this field after force unlocking the IDC lock. * * Lock Recovery process * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is * greater than 0, then wait for the other driver to unlock otherwise * move to the next step. * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY * register bits 1..0 and also set the function# in bits 5..2. * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms. * Wait for the other driver to perform lock recovery if the function * number in bits 5..2 has changed, otherwise move to the next step. * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0 * leaving your function# in bits 5..2. * e. Force unlock using the DRIVER_UNLOCK register and immediately clear * the IDC_LOCK_RECOVERY bits 5..0 by writing 0. **/ static int qla8044_lock_recovery(struct scsi_qla_host *vha) { uint32_t lock = 0, lockid; struct qla_hw_data *ha = vha->hw; lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY); /* Check for other Recovery in progress, go wait */ if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0) return QLA_FUNCTION_FAILED; /* Intent to Recover */ qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER); msleep(200); /* Check Intent to Recover is advertised */ lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY); if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n" , __func__, ha->portnum); /* Proceed to Recover */ qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | PROCEED_TO_RECOVER); /* Force Unlock() */ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF); qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK); /* Clear bits 0-5 in IDC_RECOVERY register*/ qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0); /* Get lock() */ lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK); if (lock) { lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum; qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid); return QLA_SUCCESS; } else return QLA_FUNCTION_FAILED; } int qla8044_idc_lock(struct qla_hw_data *ha) { uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0; uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (status == 0) { /* acquire semaphore5 from PCI HW block */ status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK); if (status) { /* Increment Counter (8-31) and update func_num (0-7) on * getting a successful lock */ lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum; qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id); break; } if (timeout == 0) first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); if (++timeout >= (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) { tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); func_num = tmo_owner & 0xFF; lock_cnt = tmo_owner >> 8; ql_log(ql_log_warn, vha, 0xb114, "%s: Lock by func %d failed after 2s, lock held " "by func %d, lock count %d, first_owner %d\n", __func__, ha->portnum, func_num, lock_cnt, (first_owner & 0xFF)); if (first_owner != tmo_owner) { /* Some other driver got lock, * OR same driver got lock again (counter * value changed), when we were waiting for * lock. Retry for another 2 sec */ ql_dbg(ql_dbg_p3p, vha, 0xb115, "%s: %d: IDC lock failed\n", __func__, ha->portnum); timeout = 0; } else { /* Same driver holding lock > 2sec. * Force Recovery */ if (qla8044_lock_recovery(vha) == QLA_SUCCESS) { /* Recovered and got lock */ ret_val = QLA_SUCCESS; ql_dbg(ql_dbg_p3p, vha, 0xb116, "%s:IDC lock Recovery by %d" "successful...\n", __func__, ha->portnum); } /* Recovery Failed, some other function * has the lock, wait for 2secs * and retry */ ql_dbg(ql_dbg_p3p, vha, 0xb08a, "%s: IDC lock Recovery by %d " "failed, Retrying timeout\n", __func__, ha->portnum); timeout = 0; } } msleep(QLA8044_DRV_LOCK_MSLEEP); } return ret_val; } void qla8044_idc_unlock(struct qla_hw_data *ha) { int id; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); if ((id & 0xFF) != ha->portnum) { ql_log(ql_log_warn, vha, 0xb118, "%s: IDC Unlock by %d failed, lock owner is %d!\n", __func__, ha->portnum, (id & 0xFF)); return; } /* Keep lock counter value, update the ha->func_num to 0xFF */ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF)); qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK); } /* 8044 Flash Lock/Unlock functions */ static int qla8044_flash_lock(scsi_qla_host_t *vha) { int lock_owner; int timeout = 0; uint32_t lock_status = 0; int ret_val = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; while (lock_status == 0) { lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK); if (lock_status) break; if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) { lock_owner = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK_ID); ql_log(ql_log_warn, vha, 0xb113, "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", __func__, ha->portnum, lock_owner); ret_val = QLA_FUNCTION_FAILED; break; } msleep(20); } qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum); return ret_val; } static void qla8044_flash_unlock(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; /* Reading FLASH_UNLOCK register unlocks the Flash */ qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF); qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK); } static void qla8044_flash_lock_recovery(struct scsi_qla_host *vha) { if (qla8044_flash_lock(vha)) { /* Someone else is holding the lock. */ ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n"); } /* * Either we got the lock, or someone * else died while holding it. * In either case, unlock. */ qla8044_flash_unlock(vha); } /* * Address and length are byte address */ static int qla8044_read_flash_data(scsi_qla_host_t *vha, uint8_t *p_data, uint32_t flash_addr, int u32_word_count) { int i, ret_val = QLA_SUCCESS; uint32_t u32_word; if (qla8044_flash_lock(vha) != QLA_SUCCESS) { ret_val = QLA_FUNCTION_FAILED; goto exit_lock_error; } if (flash_addr & 0x03) { ql_log(ql_log_warn, vha, 0xb117, "%s: Illegal addr = 0x%x\n", __func__, flash_addr); ret_val = QLA_FUNCTION_FAILED; goto exit_flash_read; } for (i = 0; i < u32_word_count; i++) { if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW, (flash_addr & 0xFFFF0000))) { ql_log(ql_log_warn, vha, 0xb119, "%s: failed to write addr 0x%x to " "FLASH_DIRECT_WINDOW\n! ", __func__, flash_addr); ret_val = QLA_FUNCTION_FAILED; goto exit_flash_read; } ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_DIRECT_DATA(flash_addr), &u32_word); if (ret_val != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xb08c, "%s: failed to read addr 0x%x!\n", __func__, flash_addr); goto exit_flash_read; } *(uint32_t *)p_data = u32_word; p_data = p_data + 4; flash_addr = flash_addr + 4; } exit_flash_read: qla8044_flash_unlock(vha); exit_lock_error: return ret_val; } /* * Address and length are byte address */ void * qla8044_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { scsi_block_requests(vha->host); if (qla8044_read_flash_data(vha, buf, offset, length / 4) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xb08d, "%s: Failed to read from flash\n", __func__); } scsi_unblock_requests(vha->host); return buf; } static inline int qla8044_need_reset(struct scsi_qla_host *vha) { uint32_t drv_state, drv_active; int rval; struct qla_hw_data *ha = vha->hw; drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); rval = drv_state & (1 << ha->portnum); if (ha->flags.eeh_busy && drv_active) rval = 1; return rval; } /* * qla8044_write_list - Write the value (p_entry->arg2) to address specified * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between * entries. * * @vha : Pointer to adapter structure * @p_hdr : reset_entry header for WRITE_LIST opcode. * */ static void qla8044_write_list(struct scsi_qla_host *vha, struct qla8044_reset_entry_hdr *p_hdr) { struct qla8044_entry *p_entry; uint32_t i; p_entry = (struct qla8044_entry *)((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); for (i = 0; i < p_hdr->count; i++, p_entry++) { qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2); if (p_hdr->delay) udelay((uint32_t)(p_hdr->delay)); } } /* * qla8044_read_write_list - Read from address specified by p_entry->arg1, * write value read to address specified by p_entry->arg2, for all entries in * header with delay of p_hdr->delay between entries. * * @vha : Pointer to adapter structure * @p_hdr : reset_entry header for READ_WRITE_LIST opcode. * */ static void qla8044_read_write_list(struct scsi_qla_host *vha, struct qla8044_reset_entry_hdr *p_hdr) { struct qla8044_entry *p_entry; uint32_t i; p_entry = (struct qla8044_entry *)((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); for (i = 0; i < p_hdr->count; i++, p_entry++) { qla8044_read_write_crb_reg(vha, p_entry->arg1, p_entry->arg2); if (p_hdr->delay) udelay((uint32_t)(p_hdr->delay)); } } /* * qla8044_poll_reg - Poll the given CRB addr for duration msecs till * value read ANDed with test_mask is equal to test_result. * * @ha : Pointer to adapter structure * @addr : CRB register address * @duration : Poll for total of "duration" msecs * @test_mask : Mask value read with "test_mask" * @test_result : Compare (value&test_mask) with test_result. * * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED */ static int qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr, int duration, uint32_t test_mask, uint32_t test_result) { uint32_t value = 0; int timeout_error; uint8_t retries; int ret_val = QLA_SUCCESS; ret_val = qla8044_rd_reg_indirect(vha, addr, &value); if (ret_val == QLA_FUNCTION_FAILED) { timeout_error = 1; goto exit_poll_reg; } /* poll every 1/10 of the total duration */ retries = duration/10; do { if ((value & test_mask) != test_result) { timeout_error = 1; msleep(duration/10); ret_val = qla8044_rd_reg_indirect(vha, addr, &value); if (ret_val == QLA_FUNCTION_FAILED) { timeout_error = 1; goto exit_poll_reg; } } else { timeout_error = 0; break; } } while (retries--); exit_poll_reg: if (timeout_error) { vha->reset_tmplt.seq_error++; ql_log(ql_log_fatal, vha, 0xb090, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n", __func__, value, test_mask, test_result); } return timeout_error; } /* * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB * register specified by p_entry->arg1 and compare (value AND test_mask) with * test_result to validate it. Wait for p_hdr->delay between processing entries. * * @ha : Pointer to adapter structure * @p_hdr : reset_entry header for POLL_LIST opcode. * */ static void qla8044_poll_list(struct scsi_qla_host *vha, struct qla8044_reset_entry_hdr *p_hdr) { long delay; struct qla8044_entry *p_entry; struct qla8044_poll *p_poll; uint32_t i; uint32_t value; p_poll = (struct qla8044_poll *) ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); /* Entries start after 8 byte qla8044_poll, poll header contains * the test_mask, test_value. */ p_entry = (struct qla8044_entry *)((char *)p_poll + sizeof(struct qla8044_poll)); delay = (long)p_hdr->delay; if (!delay) { for (i = 0; i < p_hdr->count; i++, p_entry++) qla8044_poll_reg(vha, p_entry->arg1, delay, p_poll->test_mask, p_poll->test_value); } else { for (i = 0; i < p_hdr->count; i++, p_entry++) { if (delay) { if (qla8044_poll_reg(vha, p_entry->arg1, delay, p_poll->test_mask, p_poll->test_value)) { /*If * (data_read&test_mask != test_value) * read TIMEOUT_ADDR (arg1) and * ADDR (arg2) registers */ qla8044_rd_reg_indirect(vha, p_entry->arg1, &value); qla8044_rd_reg_indirect(vha, p_entry->arg2, &value); } } } } } /* * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr, * read ar_addr, if (value& test_mask != test_mask) re-read till timeout * expires. * * @vha : Pointer to adapter structure * @p_hdr : reset entry header for POLL_WRITE_LIST opcode. * */ static void qla8044_poll_write_list(struct scsi_qla_host *vha, struct qla8044_reset_entry_hdr *p_hdr) { long delay; struct qla8044_quad_entry *p_entry; struct qla8044_poll *p_poll; uint32_t i; p_poll = (struct qla8044_poll *)((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); p_entry = (struct qla8044_quad_entry *)((char *)p_poll + sizeof(struct qla8044_poll)); delay = (long)p_hdr->delay; for (i = 0; i < p_hdr->count; i++, p_entry++) { qla8044_wr_reg_indirect(vha, p_entry->dr_addr, p_entry->dr_value); qla8044_wr_reg_indirect(vha, p_entry->ar_addr, p_entry->ar_value); if (delay) { if (qla8044_poll_reg(vha, p_entry->ar_addr, delay, p_poll->test_mask, p_poll->test_value)) { ql_dbg(ql_dbg_p3p, vha, 0xb091, "%s: Timeout Error: poll list, ", __func__); ql_dbg(ql_dbg_p3p, vha, 0xb092, "item_num %d, entry_num %d\n", i, vha->reset_tmplt.seq_index); } } } } /* * qla8044_read_modify_write - Read value from p_entry->arg1, modify the * value, write value to p_entry->arg2. Process entries with p_hdr->delay * between entries. * * @vha : Pointer to adapter structure * @p_hdr : header with shift/or/xor values. * */ static void qla8044_read_modify_write(struct scsi_qla_host *vha, struct qla8044_reset_entry_hdr *p_hdr) { struct qla8044_entry *p_entry; struct qla8044_rmw *p_rmw_hdr; uint32_t i; p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr + sizeof(struct qla8044_rmw)); for (i = 0; i < p_hdr->count; i++, p_entry++) { qla8044_rmw_crb_reg(vha, p_entry->arg1, p_entry->arg2, p_rmw_hdr); if (p_hdr->delay) udelay((uint32_t)(p_hdr->delay)); } } /* * qla8044_pause - Wait for p_hdr->delay msecs, called between processing * two entries of a sequence. * * @vha : Pointer to adapter structure * @p_hdr : Common reset entry header. * */ static void qla8044_pause(struct scsi_qla_host *vha, struct qla8044_reset_entry_hdr *p_hdr) { if (p_hdr->delay) mdelay((uint32_t)((long)p_hdr->delay)); } /* * qla8044_template_end - Indicates end of reset sequence processing. * * @vha : Pointer to adapter structure * @p_hdr : Common reset entry header. * */ static void qla8044_template_end(struct scsi_qla_host *vha, struct qla8044_reset_entry_hdr *p_hdr) { vha->reset_tmplt.template_end = 1; if (vha->reset_tmplt.seq_error == 0) { ql_dbg(ql_dbg_p3p, vha, 0xb093, "%s: Reset sequence completed SUCCESSFULLY.\n", __func__); } else { ql_log(ql_log_fatal, vha, 0xb094, "%s: Reset sequence completed with some timeout " "errors.\n", __func__); } } /* * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr, * if (value & test_mask != test_value) re-read till timeout value expires, * read dr_addr register and assign to reset_tmplt.array. * * @vha : Pointer to adapter structure * @p_hdr : Common reset entry header. * */ static void qla8044_poll_read_list(struct scsi_qla_host *vha, struct qla8044_reset_entry_hdr *p_hdr) { long delay; int index; struct qla8044_quad_entry *p_entry; struct qla8044_poll *p_poll; uint32_t i; uint32_t value; p_poll = (struct qla8044_poll *) ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); p_entry = (struct qla8044_quad_entry *) ((char *)p_poll + sizeof(struct qla8044_poll)); delay = (long)p_hdr->delay; for (i = 0; i < p_hdr->count; i++, p_entry++) { qla8044_wr_reg_indirect(vha, p_entry->ar_addr, p_entry->ar_value); if (delay) { if (qla8044_poll_reg(vha, p_entry->ar_addr, delay, p_poll->test_mask, p_poll->test_value)) { ql_dbg(ql_dbg_p3p, vha, 0xb095, "%s: Timeout Error: poll " "list, ", __func__); ql_dbg(ql_dbg_p3p, vha, 0xb096, "Item_num %d, " "entry_num %d\n", i, vha->reset_tmplt.seq_index); } else { index = vha->reset_tmplt.array_index; qla8044_rd_reg_indirect(vha, p_entry->dr_addr, &value); vha->reset_tmplt.array[index++] = value; if (index == QLA8044_MAX_RESET_SEQ_ENTRIES) vha->reset_tmplt.array_index = 1; } } } } /* * qla8031_process_reset_template - Process all entries in reset template * till entry with SEQ_END opcode, which indicates end of the reset template * processing. Each entry has a Reset Entry header, entry opcode/command, with * size of the entry, number of entries in sub-sequence and delay in microsecs * or timeout in millisecs. * * @ha : Pointer to adapter structure * @p_buff : Common reset entry header. * */ static void qla8044_process_reset_template(struct scsi_qla_host *vha, char *p_buff) { int index, entries; struct qla8044_reset_entry_hdr *p_hdr; char *p_entry = p_buff; vha->reset_tmplt.seq_end = 0; vha->reset_tmplt.template_end = 0; entries = vha->reset_tmplt.hdr->entries; index = vha->reset_tmplt.seq_index; for (; (!vha->reset_tmplt.seq_end) && (index < entries); index++) { p_hdr = (struct qla8044_reset_entry_hdr *)p_entry; switch (p_hdr->cmd) { case OPCODE_NOP: break; case OPCODE_WRITE_LIST: qla8044_write_list(vha, p_hdr); break; case OPCODE_READ_WRITE_LIST: qla8044_read_write_list(vha, p_hdr); break; case OPCODE_POLL_LIST: qla8044_poll_list(vha, p_hdr); break; case OPCODE_POLL_WRITE_LIST: qla8044_poll_write_list(vha, p_hdr); break; case OPCODE_READ_MODIFY_WRITE: qla8044_read_modify_write(vha, p_hdr); break; case OPCODE_SEQ_PAUSE: qla8044_pause(vha, p_hdr); break; case OPCODE_SEQ_END: vha->reset_tmplt.seq_end = 1; break; case OPCODE_TMPL_END: qla8044_template_end(vha, p_hdr); break; case OPCODE_POLL_READ_LIST: qla8044_poll_read_list(vha, p_hdr); break; default: ql_log(ql_log_fatal, vha, 0xb097, "%s: Unknown command ==> 0x%04x on " "entry = %d\n", __func__, p_hdr->cmd, index); break; } /* *Set pointer to next entry in the sequence. */ p_entry += p_hdr->size; } vha->reset_tmplt.seq_index = index; } static void qla8044_process_init_seq(struct scsi_qla_host *vha) { qla8044_process_reset_template(vha, vha->reset_tmplt.init_offset); if (vha->reset_tmplt.seq_end != 1) ql_log(ql_log_fatal, vha, 0xb098, "%s: Abrupt INIT Sub-Sequence end.\n", __func__); } static void qla8044_process_stop_seq(struct scsi_qla_host *vha) { vha->reset_tmplt.seq_index = 0; qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset); if (vha->reset_tmplt.seq_end != 1) ql_log(ql_log_fatal, vha, 0xb099, "%s: Abrupt STOP Sub-Sequence end.\n", __func__); } static void qla8044_process_start_seq(struct scsi_qla_host *vha) { qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset); if (vha->reset_tmplt.template_end != 1) ql_log(ql_log_fatal, vha, 0xb09a, "%s: Abrupt START Sub-Sequence end.\n", __func__); } static int qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha, uint32_t flash_addr, uint8_t *p_data, int u32_word_count) { uint32_t i; uint32_t u32_word; uint32_t flash_offset; uint32_t addr = flash_addr; int ret_val = QLA_SUCCESS; flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1); if (addr & 0x3) { ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n", __func__, addr); ret_val = QLA_FUNCTION_FAILED; goto exit_lockless_read; } ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW, (addr)); if (ret_val != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0xb09c, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", __func__, addr); goto exit_lockless_read; } /* Check if data is spread across multiple sectors */ if ((flash_offset + (u32_word_count * sizeof(uint32_t))) > (QLA8044_FLASH_SECTOR_SIZE - 1)) { /* Multi sector read */ for (i = 0; i < u32_word_count; i++) { ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_DIRECT_DATA(addr), &u32_word); if (ret_val != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0xb09d, "%s: failed to read addr 0x%x!\n", __func__, addr); goto exit_lockless_read; } *(uint32_t *)p_data = u32_word; p_data = p_data + 4; addr = addr + 4; flash_offset = flash_offset + 4; if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) { /* This write is needed once for each sector */ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW, (addr)); if (ret_val != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0xb09f, "%s: failed to write addr " "0x%x to FLASH_DIRECT_WINDOW!\n", __func__, addr); goto exit_lockless_read; } flash_offset = 0; } } } else { /* Single sector read */ for (i = 0; i < u32_word_count; i++) { ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_DIRECT_DATA(addr), &u32_word); if (ret_val != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0xb0a0, "%s: failed to read addr 0x%x!\n", __func__, addr); goto exit_lockless_read; } *(uint32_t *)p_data = u32_word; p_data = p_data + 4; addr = addr + 4; } } exit_lockless_read: return ret_val; } /* * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory * * @vha : Pointer to adapter structure * addr : Flash address to write to * data : Data to be written * count : word_count to be written * * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED */ static int qla8044_ms_mem_write_128b(struct scsi_qla_host *vha, uint64_t addr, uint32_t *data, uint32_t count) { int i, j, ret_val = QLA_SUCCESS; uint32_t agt_ctrl; unsigned long flags; struct qla_hw_data *ha = vha->hw; /* Only 128-bit aligned access */ if (addr & 0xF) { ret_val = QLA_FUNCTION_FAILED; goto exit_ms_mem_write; } write_lock_irqsave(&ha->hw_lock, flags); /* Write address */ ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0); if (ret_val == QLA_FUNCTION_FAILED) { ql_log(ql_log_fatal, vha, 0xb0a1, "%s: write to AGT_ADDR_HI failed!\n", __func__); goto exit_ms_mem_write_unlock; } for (i = 0; i < count; i++, addr += 16) { if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET, QLA8044_ADDR_QDR_NET_MAX)) || (addr_in_range(addr, QLA8044_ADDR_DDR_NET, QLA8044_ADDR_DDR_NET_MAX)))) { ret_val = QLA_FUNCTION_FAILED; goto exit_ms_mem_write_unlock; } ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, addr); /* Write data */ ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_WRDATA_LO, *data++); ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_WRDATA_HI, *data++); ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_WRDATA_ULO, *data++); ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_WRDATA_UHI, *data++); if (ret_val == QLA_FUNCTION_FAILED) { ql_log(ql_log_fatal, vha, 0xb0a2, "%s: write to AGT_WRDATA failed!\n", __func__); goto exit_ms_mem_write_unlock; } /* Check write status */ ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, MIU_TA_CTL_WRITE_ENABLE); ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, MIU_TA_CTL_WRITE_START); if (ret_val == QLA_FUNCTION_FAILED) { ql_log(ql_log_fatal, vha, 0xb0a3, "%s: write to AGT_CTRL failed!\n", __func__); goto exit_ms_mem_write_unlock; } for (j = 0; j < MAX_CTL_CHECK; j++) { ret_val = qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, &agt_ctrl); if (ret_val == QLA_FUNCTION_FAILED) { ql_log(ql_log_fatal, vha, 0xb0a4, "%s: failed to read " "MD_MIU_TEST_AGT_CTRL!\n", __func__); goto exit_ms_mem_write_unlock; } if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) break; } /* Status check failed */ if (j >= MAX_CTL_CHECK) { ql_log(ql_log_fatal, vha, 0xb0a5, "%s: MS memory write failed!\n", __func__); ret_val = QLA_FUNCTION_FAILED; goto exit_ms_mem_write_unlock; } } exit_ms_mem_write_unlock: write_unlock_irqrestore(&ha->hw_lock, flags); exit_ms_mem_write: return ret_val; } static int qla8044_copy_bootloader(struct scsi_qla_host *vha) { uint8_t *p_cache; uint32_t src, count, size; uint64_t dest; int ret_val = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; src = QLA8044_BOOTLOADER_FLASH_ADDR; dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR); size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE); /* 128 bit alignment check */ if (size & 0xF) size = (size + 16) & ~0xF; /* 16 byte count */ count = size/16; p_cache = vmalloc(size); if (p_cache == NULL) { ql_log(ql_log_fatal, vha, 0xb0a6, "%s: Failed to allocate memory for " "boot loader cache\n", __func__); ret_val = QLA_FUNCTION_FAILED; goto exit_copy_bootloader; } ret_val = qla8044_lockless_flash_read_u32(vha, src, p_cache, size/sizeof(uint32_t)); if (ret_val == QLA_FUNCTION_FAILED) { ql_log(ql_log_fatal, vha, 0xb0a7, "%s: Error reading F/W from flash!!!\n", __func__); goto exit_copy_error; } ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n", __func__); /* 128 bit/16 byte write to MS memory */ ret_val = qla8044_ms_mem_write_128b(vha, dest, (uint32_t *)p_cache, count); if (ret_val == QLA_FUNCTION_FAILED) { ql_log(ql_log_fatal, vha, 0xb0a9, "%s: Error writing F/W to MS !!!\n", __func__); goto exit_copy_error; } ql_dbg(ql_dbg_p3p, vha, 0xb0aa, "%s: Wrote F/W (size %d) to MS !!!\n", __func__, size); exit_copy_error: vfree(p_cache); exit_copy_bootloader: return ret_val; } static int qla8044_restart(struct scsi_qla_host *vha) { int ret_val = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; qla8044_process_stop_seq(vha); /* Collect minidump */ if (ql2xmdenable) qla8044_get_minidump(vha); else ql_log(ql_log_fatal, vha, 0xb14c, "Minidump disabled.\n"); qla8044_process_init_seq(vha); if (qla8044_copy_bootloader(vha)) { ql_log(ql_log_fatal, vha, 0xb0ab, "%s: Copy bootloader, firmware restart failed!\n", __func__); ret_val = QLA_FUNCTION_FAILED; goto exit_restart; } /* * Loads F/W from flash */ qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH); qla8044_process_start_seq(vha); exit_restart: return ret_val; } /* * qla8044_check_cmd_peg_status - Check peg status to see if Peg is * initialized. * * @ha : Pointer to adapter structure * * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED */ static int qla8044_check_cmd_peg_status(struct scsi_qla_host *vha) { uint32_t val, ret_val = QLA_FUNCTION_FAILED; int retries = CRB_CMDPEG_CHECK_RETRY_COUNT; struct qla_hw_data *ha = vha->hw; do { val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE); if (val == PHAN_INITIALIZE_COMPLETE) { ql_dbg(ql_dbg_p3p, vha, 0xb0ac, "%s: Command Peg initialization " "complete! state=0x%x\n", __func__, val); ret_val = QLA_SUCCESS; break; } msleep(CRB_CMDPEG_CHECK_DELAY); } while (--retries); return ret_val; } static int qla8044_start_firmware(struct scsi_qla_host *vha) { int ret_val = QLA_SUCCESS; if (qla8044_restart(vha)) { ql_log(ql_log_fatal, vha, 0xb0ad, "%s: Restart Error!!!, Need Reset!!!\n", __func__); ret_val = QLA_FUNCTION_FAILED; goto exit_start_fw; } else ql_dbg(ql_dbg_p3p, vha, 0xb0af, "%s: Restart done!\n", __func__); ret_val = qla8044_check_cmd_peg_status(vha); if (ret_val) { ql_log(ql_log_fatal, vha, 0xb0b0, "%s: Peg not initialized!\n", __func__); ret_val = QLA_FUNCTION_FAILED; } exit_start_fw: return ret_val; } void qla8044_clear_drv_active(struct qla_hw_data *ha) { uint32_t drv_active; struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); drv_active &= ~(1 << (ha->portnum)); ql_log(ql_log_info, vha, 0xb0b1, "%s(%ld): drv_active: 0x%08x\n", __func__, vha->host_no, drv_active); qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); } /* * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw * @ha: pointer to adapter structure * * Note: IDC lock must be held upon entry **/ static int qla8044_device_bootstrap(struct scsi_qla_host *vha) { int rval = QLA_FUNCTION_FAILED; int i; uint32_t old_count = 0, count = 0; int need_reset = 0; uint32_t idc_ctrl; struct qla_hw_data *ha = vha->hw; need_reset = qla8044_need_reset(vha); if (!need_reset) { old_count = qla8044_rd_direct(vha, QLA8044_PEG_ALIVE_COUNTER_INDEX); for (i = 0; i < 10; i++) { msleep(200); count = qla8044_rd_direct(vha, QLA8044_PEG_ALIVE_COUNTER_INDEX); if (count != old_count) { rval = QLA_SUCCESS; goto dev_ready; } } qla8044_flash_lock_recovery(vha); } else { /* We are trying to perform a recovery here. */ if (ha->flags.isp82xx_fw_hung) qla8044_flash_lock_recovery(vha); } /* set to DEV_INITIALIZING */ ql_log(ql_log_info, vha, 0xb0b2, "%s: HW State: INITIALIZING\n", __func__); qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_INITIALIZING); qla8044_idc_unlock(ha); rval = qla8044_start_firmware(vha); qla8044_idc_lock(ha); if (rval != QLA_SUCCESS) { ql_log(ql_log_info, vha, 0xb0b3, "%s: HW State: FAILED\n", __func__); qla8044_clear_drv_active(ha); qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_FAILED); return rval; } /* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after * device goes to INIT state. */ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); if (idc_ctrl & GRACEFUL_RESET_BIT1) { qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, (idc_ctrl & ~GRACEFUL_RESET_BIT1)); ha->fw_dumped = false; } dev_ready: ql_log(ql_log_info, vha, 0xb0b4, "%s: HW State: READY\n", __func__); qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY); return rval; } /*-------------------------Reset Sequence Functions-----------------------*/ static void qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha) { u8 *phdr; if (!vha->reset_tmplt.buff) { ql_log(ql_log_fatal, vha, 0xb0b5, "%s: Error Invalid reset_seq_template\n", __func__); return; } phdr = vha->reset_tmplt.buff; ql_dbg(ql_dbg_p3p, vha, 0xb0b6, "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X" "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n" "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n", *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4), *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8), *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12), *(phdr+13), *(phdr+14), *(phdr+15)); } /* * qla8044_reset_seq_checksum_test - Validate Reset Sequence template. * * @ha : Pointer to adapter structure * * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED */ static int qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha) { uint32_t sum = 0; uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff; int u16_count = vha->reset_tmplt.hdr->size / sizeof(uint16_t); while (u16_count-- > 0) sum += *buff++; while (sum >> 16) sum = (sum & 0xFFFF) + (sum >> 16); /* checksum of 0 indicates a valid template */ if (~sum) { return QLA_SUCCESS; } else { ql_log(ql_log_fatal, vha, 0xb0b7, "%s: Reset seq checksum failed\n", __func__); return QLA_FUNCTION_FAILED; } } /* * qla8044_read_reset_template - Read Reset Template from Flash, validate * the template and store offsets of stop/start/init offsets in ha->reset_tmplt. * * @ha : Pointer to adapter structure */ void qla8044_read_reset_template(struct scsi_qla_host *vha) { uint8_t *p_buff; uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size; vha->reset_tmplt.seq_error = 0; vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE); if (vha->reset_tmplt.buff == NULL) { ql_log(ql_log_fatal, vha, 0xb0b8, "%s: Failed to allocate reset template resources\n", __func__); goto exit_read_reset_template; } p_buff = vha->reset_tmplt.buff; addr = QLA8044_RESET_TEMPLATE_ADDR; tmplt_hdr_def_size = sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t); ql_dbg(ql_dbg_p3p, vha, 0xb0b9, "%s: Read template hdr size %d from Flash\n", __func__, tmplt_hdr_def_size); /* Copy template header from flash */ if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) { ql_log(ql_log_fatal, vha, 0xb0ba, "%s: Failed to read reset template\n", __func__); goto exit_read_template_error; } vha->reset_tmplt.hdr = (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff; /* Validate the template header size and signature */ tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t); if ((tmplt_hdr_size != tmplt_hdr_def_size) || (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) { ql_log(ql_log_fatal, vha, 0xb0bb, "%s: Template Header size invalid %d " "tmplt_hdr_def_size %d!!!\n", __func__, tmplt_hdr_size, tmplt_hdr_def_size); goto exit_read_template_error; } addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size; p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size; tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size - vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t); ql_dbg(ql_dbg_p3p, vha, 0xb0bc, "%s: Read rest of the template size %d\n", __func__, vha->reset_tmplt.hdr->size); /* Copy rest of the template */ if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) { ql_log(ql_log_fatal, vha, 0xb0bd, "%s: Failed to read reset template\n", __func__); goto exit_read_template_error; } /* Integrity check */ if (qla8044_reset_seq_checksum_test(vha)) { ql_log(ql_log_fatal, vha, 0xb0be, "%s: Reset Seq checksum failed!\n", __func__); goto exit_read_template_error; } ql_dbg(ql_dbg_p3p, vha, 0xb0bf, "%s: Reset Seq checksum passed! Get stop, " "start and init seq offsets\n", __func__); /* Get STOP, START, INIT sequence offsets */ vha->reset_tmplt.init_offset = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->init_seq_offset; vha->reset_tmplt.start_offset = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->start_seq_offset; vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size; qla8044_dump_reset_seq_hdr(vha); goto exit_read_reset_template; exit_read_template_error: vfree(vha->reset_tmplt.buff); exit_read_reset_template: return; } void qla8044_set_idc_dontreset(struct scsi_qla_host *vha) { uint32_t idc_ctrl; struct qla_hw_data *ha = vha->hw; idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); idc_ctrl |= DONTRESET_BIT0; ql_dbg(ql_dbg_p3p, vha, 0xb0c0, "%s: idc_ctrl = %d\n", __func__, idc_ctrl); qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl); } static inline void qla8044_set_rst_ready(struct scsi_qla_host *vha) { uint32_t drv_state; struct qla_hw_data *ha = vha->hw; drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); /* For ISP8044, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function.*/ drv_state |= (1 << ha->portnum); ql_log(ql_log_info, vha, 0xb0c1, "%s(%ld): drv_state: 0x%08x\n", __func__, vha->host_no, drv_state); qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state); } /** * qla8044_need_reset_handler - Code to start reset sequence * @vha: pointer to adapter structure * * Note: IDC lock must be held upon entry */ static void qla8044_need_reset_handler(struct scsi_qla_host *vha) { uint32_t dev_state = 0, drv_state, drv_active; unsigned long reset_timeout; struct qla_hw_data *ha = vha->hw; ql_log(ql_log_fatal, vha, 0xb0c2, "%s: Performing ISP error recovery\n", __func__); if (vha->flags.online) { qla8044_idc_unlock(ha); qla2x00_abort_isp_cleanup(vha); ha->isp_ops->get_flash_version(vha, vha->req->ring); ha->isp_ops->nvram_config(vha); qla8044_idc_lock(ha); } dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); ql_log(ql_log_info, vha, 0xb0c5, "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n", __func__, vha->host_no, drv_state, drv_active, dev_state); qla8044_set_rst_ready(vha); /* wait for 10 seconds for reset ack from all functions */ reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); do { if (time_after_eq(jiffies, reset_timeout)) { ql_log(ql_log_info, vha, 0xb0c4, "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n", __func__, ha->portnum, drv_state, drv_active); break; } qla8044_idc_unlock(ha); msleep(1000); qla8044_idc_lock(ha); dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); } while (((drv_state & drv_active) != drv_active) && (dev_state == QLA8XXX_DEV_NEED_RESET)); /* Remove IDC participation of functions not acknowledging */ if (drv_state != drv_active) { ql_log(ql_log_info, vha, 0xb0c7, "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n", __func__, vha->host_no, ha->portnum, (drv_active ^ drv_state)); drv_active = drv_active & drv_state; qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); } else { /* * Reset owner should execute reset recovery, * if all functions acknowledged */ if ((ha->flags.nic_core_reset_owner) && (dev_state == QLA8XXX_DEV_NEED_RESET)) { ha->flags.nic_core_reset_owner = 0; qla8044_device_bootstrap(vha); return; } } /* Exit if non active function */ if (!(drv_active & (1 << ha->portnum))) { ha->flags.nic_core_reset_owner = 0; return; } /* * Execute Reset Recovery if Reset Owner or Function 7 * is the only active function */ if (ha->flags.nic_core_reset_owner || ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) { ha->flags.nic_core_reset_owner = 0; qla8044_device_bootstrap(vha); } } static void qla8044_set_drv_active(struct scsi_qla_host *vha) { uint32_t drv_active; struct qla_hw_data *ha = vha->hw; drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); /* For ISP8044, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function.*/ drv_active |= (1 << ha->portnum); ql_log(ql_log_info, vha, 0xb0c8, "%s(%ld): drv_active: 0x%08x\n", __func__, vha->host_no, drv_active); qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); } static int qla8044_check_drv_active(struct scsi_qla_host *vha) { uint32_t drv_active; struct qla_hw_data *ha = vha->hw; drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); if (drv_active & (1 << ha->portnum)) return QLA_SUCCESS; else return QLA_TEST_FAILED; } static void qla8044_clear_idc_dontreset(struct scsi_qla_host *vha) { uint32_t idc_ctrl; struct qla_hw_data *ha = vha->hw; idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); idc_ctrl &= ~DONTRESET_BIT0; ql_log(ql_log_info, vha, 0xb0c9, "%s: idc_ctrl = %d\n", __func__, idc_ctrl); qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl); } static int qla8044_set_idc_ver(struct scsi_qla_host *vha) { int idc_ver; uint32_t drv_active; int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); if (drv_active == (1 << ha->portnum)) { idc_ver = qla8044_rd_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX); idc_ver &= (~0xFF); idc_ver |= QLA8044_IDC_VER_MAJ_VALUE; qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX, idc_ver); ql_log(ql_log_info, vha, 0xb0ca, "%s: IDC version updated to %d\n", __func__, idc_ver); } else { idc_ver = qla8044_rd_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX); idc_ver &= 0xFF; if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) { ql_log(ql_log_info, vha, 0xb0cb, "%s: qla4xxx driver IDC version %d " "is not compatible with IDC version %d " "of other drivers!\n", __func__, QLA8044_IDC_VER_MAJ_VALUE, idc_ver); rval = QLA_FUNCTION_FAILED; goto exit_set_idc_ver; } } /* Update IDC_MINOR_VERSION */ idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR); idc_ver &= ~(0x03 << (ha->portnum * 2)); idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2)); qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver); exit_set_idc_ver: return rval; } static int qla8044_update_idc_reg(struct scsi_qla_host *vha) { uint32_t drv_active; int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; if (vha->flags.init_done) goto exit_update_idc_reg; qla8044_idc_lock(ha); qla8044_set_drv_active(vha); drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); /* If we are the first driver to load and * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */ if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba) qla8044_clear_idc_dontreset(vha); rval = qla8044_set_idc_ver(vha); if (rval == QLA_FUNCTION_FAILED) qla8044_clear_drv_active(ha); qla8044_idc_unlock(ha); exit_update_idc_reg: return rval; } /** * qla8044_need_qsnt_handler - Code to start qsnt * @vha: pointer to adapter structure */ static void qla8044_need_qsnt_handler(struct scsi_qla_host *vha) { unsigned long qsnt_timeout; uint32_t drv_state, drv_active, dev_state; struct qla_hw_data *ha = vha->hw; if (vha->flags.online) qla2x00_quiesce_io(vha); else return; qla8044_set_qsnt_ready(vha); /* Wait for 30 secs for all functions to ack qsnt mode */ qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ); drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); /* Shift drv_active by 1 to match drv_state. As quiescent ready bit position is at bit 1 and drv active is at bit 0 */ drv_active = drv_active << 1; while (drv_state != drv_active) { if (time_after_eq(jiffies, qsnt_timeout)) { /* Other functions did not ack, changing state to * DEV_READY */ clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY); qla8044_clear_qsnt_ready(vha); ql_log(ql_log_info, vha, 0xb0cc, "Timeout waiting for quiescent ack!!!\n"); return; } qla8044_idc_unlock(ha); msleep(1000); qla8044_idc_lock(ha); drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); drv_active = drv_active << 1; } /* All functions have Acked. Set quiescent state */ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_QUIESCENT); ql_log(ql_log_info, vha, 0xb0cd, "%s: HW State: QUIESCENT\n", __func__); } } /* * qla8044_device_state_handler - Adapter state machine * @ha: pointer to host adapter structure. * * Note: IDC lock must be UNLOCKED upon entry **/ int qla8044_device_state_handler(struct scsi_qla_host *vha) { uint32_t dev_state; int rval = QLA_SUCCESS; unsigned long dev_init_timeout; struct qla_hw_data *ha = vha->hw; rval = qla8044_update_idc_reg(vha); if (rval == QLA_FUNCTION_FAILED) goto exit_error; dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); ql_dbg(ql_dbg_p3p, vha, 0xb0ce, "Device state is 0x%x = %s\n", dev_state, qdev_state(dev_state)); /* wait for 30 seconds for device to go ready */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); qla8044_idc_lock(ha); while (1) { if (time_after_eq(jiffies, dev_init_timeout)) { if (qla8044_check_drv_active(vha) == QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xb0cf, "%s: Device Init Failed 0x%x = %s\n", QLA2XXX_DRIVER_NAME, dev_state, qdev_state(dev_state)); qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_FAILED); } } dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); ql_log(ql_log_info, vha, 0xb0d0, "Device state is 0x%x = %s\n", dev_state, qdev_state(dev_state)); /* NOTE: Make sure idc unlocked upon exit of switch statement */ switch (dev_state) { case QLA8XXX_DEV_READY: ha->flags.nic_core_reset_owner = 0; goto exit; case QLA8XXX_DEV_COLD: rval = qla8044_device_bootstrap(vha); break; case QLA8XXX_DEV_INITIALIZING: qla8044_idc_unlock(ha); msleep(1000); qla8044_idc_lock(ha); break; case QLA8XXX_DEV_NEED_RESET: /* For ISP8044, if NEED_RESET is set by any driver, * it should be honored, irrespective of IDC_CTRL * DONTRESET_BIT0 */ qla8044_need_reset_handler(vha); break; case QLA8XXX_DEV_NEED_QUIESCENT: /* idc locked/unlocked in handler */ qla8044_need_qsnt_handler(vha); /* Reset the init timeout after qsnt handler */ dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); break; case QLA8XXX_DEV_QUIESCENT: ql_log(ql_log_info, vha, 0xb0d1, "HW State: QUIESCENT\n"); qla8044_idc_unlock(ha); msleep(1000); qla8044_idc_lock(ha); /* Reset the init timeout after qsnt handler */ dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); break; case QLA8XXX_DEV_FAILED: ha->flags.nic_core_reset_owner = 0; qla8044_idc_unlock(ha); qla8xxx_dev_failed_handler(vha); rval = QLA_FUNCTION_FAILED; qla8044_idc_lock(ha); goto exit; default: qla8044_idc_unlock(ha); qla8xxx_dev_failed_handler(vha); rval = QLA_FUNCTION_FAILED; qla8044_idc_lock(ha); goto exit; } } exit: qla8044_idc_unlock(ha); exit_error: return rval; } /** * qla8044_check_temp - Check the ISP82XX temperature. * @vha: adapter block pointer. * * Note: The caller should not hold the idc lock. */ static int qla8044_check_temp(struct scsi_qla_host *vha) { uint32_t temp, temp_state, temp_val; int status = QLA_SUCCESS; temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX); temp_state = qla82xx_get_temp_state(temp); temp_val = qla82xx_get_temp_val(temp); if (temp_state == QLA82XX_TEMP_PANIC) { ql_log(ql_log_warn, vha, 0xb0d2, "Device temperature %d degrees C" " exceeds maximum allowed. Hardware has been shut" " down\n", temp_val); status = QLA_FUNCTION_FAILED; return status; } else if (temp_state == QLA82XX_TEMP_WARN) { ql_log(ql_log_warn, vha, 0xb0d3, "Device temperature %d" " degrees C exceeds operating range." " Immediate action needed.\n", temp_val); } return 0; } int qla8044_read_temperature(scsi_qla_host_t *vha) { uint32_t temp; temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX); return qla82xx_get_temp_val(temp); } /** * qla8044_check_fw_alive - Check firmware health * @vha: Pointer to host adapter structure. * * Context: Interrupt */ int qla8044_check_fw_alive(struct scsi_qla_host *vha) { uint32_t fw_heartbeat_counter; uint32_t halt_status1, halt_status2; int status = QLA_SUCCESS; fw_heartbeat_counter = qla8044_rd_direct(vha, QLA8044_PEG_ALIVE_COUNTER_INDEX); /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ if (fw_heartbeat_counter == 0xffffffff) { ql_dbg(ql_dbg_p3p, vha, 0xb0d4, "scsi%ld: %s: Device in frozen " "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", vha->host_no, __func__); return status; } if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat++; /* FW not alive after 2 seconds */ if (vha->seconds_since_last_heartbeat == 2) { vha->seconds_since_last_heartbeat = 0; halt_status1 = qla8044_rd_direct(vha, QLA8044_PEG_HALT_STATUS1_INDEX); halt_status2 = qla8044_rd_direct(vha, QLA8044_PEG_HALT_STATUS2_INDEX); ql_log(ql_log_info, vha, 0xb0d5, "scsi(%ld): %s, ISP8044 " "Dumping hw/fw registers:\n" " PEG_HALT_STATUS1: 0x%x, " "PEG_HALT_STATUS2: 0x%x,\n", vha->host_no, __func__, halt_status1, halt_status2); status = QLA_FUNCTION_FAILED; } } else vha->seconds_since_last_heartbeat = 0; vha->fw_heartbeat_counter = fw_heartbeat_counter; return status; } void qla8044_watchdog(struct scsi_qla_host *vha) { uint32_t dev_state, halt_status; int halt_status_unrecoverable = 0; struct qla_hw_data *ha = vha->hw; /* don't poll if reset is going on or FW hang in quiescent state */ if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) { dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); if (qla8044_check_fw_alive(vha)) { ha->flags.isp82xx_fw_hung = 1; ql_log(ql_log_warn, vha, 0xb10a, "Firmware hung.\n"); qla82xx_clear_pending_mbx(vha); } if (qla8044_check_temp(vha)) { set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); ha->flags.isp82xx_fw_hung = 1; qla2xxx_wake_dpc(vha); } else if (dev_state == QLA8XXX_DEV_NEED_RESET && !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { ql_log(ql_log_info, vha, 0xb0d6, "%s: HW State: NEED RESET!\n", __func__); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { ql_log(ql_log_info, vha, 0xb0d7, "%s: HW State: NEED QUIES detected!\n", __func__); set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else { /* Check firmware health */ if (ha->flags.isp82xx_fw_hung) { halt_status = qla8044_rd_direct(vha, QLA8044_PEG_HALT_STATUS1_INDEX); if (halt_status & QLA8044_HALT_STATUS_FW_RESET) { ql_log(ql_log_fatal, vha, 0xb0d8, "%s: Firmware " "error detected device " "is being reset\n", __func__); } else if (halt_status & QLA8044_HALT_STATUS_UNRECOVERABLE) { halt_status_unrecoverable = 1; } /* Since we cannot change dev_state in interrupt * context, set appropriate DPC flag then wakeup * DPC */ if (halt_status_unrecoverable) { set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); } else { if (dev_state == QLA8XXX_DEV_QUIESCENT) { set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); ql_log(ql_log_info, vha, 0xb0d9, "%s: FW CONTEXT Reset " "needed!\n", __func__); } else { ql_log(ql_log_info, vha, 0xb0da, "%s: " "detect abort needed\n", __func__); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } } qla2xxx_wake_dpc(vha); } } } } static int qla8044_minidump_process_control(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr) { struct qla8044_minidump_entry_crb *crb_entry; uint32_t read_value, opcode, poll_time, addr, index; uint32_t crb_addr, rval = QLA_SUCCESS; unsigned long wtime; struct qla8044_minidump_template_hdr *tmplt_hdr; int i; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__); tmplt_hdr = (struct qla8044_minidump_template_hdr *) ha->md_tmplt_hdr; crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr; crb_addr = crb_entry->addr; for (i = 0; i < crb_entry->op_count; i++) { opcode = crb_entry->crb_ctrl.opcode; if (opcode & QLA82XX_DBG_OPCODE_WR) { qla8044_wr_reg_indirect(vha, crb_addr, crb_entry->value_1); } if (opcode & QLA82XX_DBG_OPCODE_RW) { qla8044_rd_reg_indirect(vha, crb_addr, &read_value); qla8044_wr_reg_indirect(vha, crb_addr, read_value); } if (opcode & QLA82XX_DBG_OPCODE_AND) { qla8044_rd_reg_indirect(vha, crb_addr, &read_value); read_value &= crb_entry->value_2; if (opcode & QLA82XX_DBG_OPCODE_OR) { read_value |= crb_entry->value_3; opcode &= ~QLA82XX_DBG_OPCODE_OR; } qla8044_wr_reg_indirect(vha, crb_addr, read_value); } if (opcode & QLA82XX_DBG_OPCODE_OR) { qla8044_rd_reg_indirect(vha, crb_addr, &read_value); read_value |= crb_entry->value_3; qla8044_wr_reg_indirect(vha, crb_addr, read_value); } if (opcode & QLA82XX_DBG_OPCODE_POLL) { poll_time = crb_entry->crb_strd.poll_timeout; wtime = jiffies + poll_time; qla8044_rd_reg_indirect(vha, crb_addr, &read_value); do { if ((read_value & crb_entry->value_2) == crb_entry->value_1) { break; } else if (time_after_eq(jiffies, wtime)) { /* capturing dump failed */ rval = QLA_FUNCTION_FAILED; break; } else { qla8044_rd_reg_indirect(vha, crb_addr, &read_value); } } while (1); } if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { if (crb_entry->crb_strd.state_index_a) { index = crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } qla8044_rd_reg_indirect(vha, addr, &read_value); index = crb_entry->crb_ctrl.state_index_v; tmplt_hdr->saved_state_array[index] = read_value; } if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { if (crb_entry->crb_strd.state_index_a) { index = crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } if (crb_entry->crb_ctrl.state_index_v) { index = crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; } else { read_value = crb_entry->value_1; } qla8044_wr_reg_indirect(vha, addr, read_value); } if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { index = crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; read_value <<= crb_entry->crb_ctrl.shl; read_value >>= crb_entry->crb_ctrl.shr; if (crb_entry->value_2) read_value &= crb_entry->value_2; read_value |= crb_entry->value_3; read_value += crb_entry->value_1; tmplt_hdr->saved_state_array[index] = read_value; } crb_addr += crb_entry->crb_strd.addr_stride; } return rval; } static void qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla8044_minidump_entry_crb *crb_hdr; uint32_t *data_ptr = *d_ptr; ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__); crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr; r_addr = crb_hdr->addr; r_stride = crb_hdr->crb_strd.addr_stride; loop_cnt = crb_hdr->op_count; for (i = 0; i < loop_cnt; i++) { qla8044_rd_reg_indirect(vha, r_addr, &r_value); *data_ptr++ = r_addr; *data_ptr++ = r_value; r_addr += r_stride; } *d_ptr = data_ptr; } static int qla8044_minidump_process_rdmem(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t r_addr, r_value, r_data; uint32_t i, j, loop_cnt; struct qla8044_minidump_entry_rdmem *m_hdr; unsigned long flags; uint32_t *data_ptr = *d_ptr; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__); m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr; r_addr = m_hdr->read_addr; loop_cnt = m_hdr->read_data_size/16; ql_dbg(ql_dbg_p3p, vha, 0xb0f0, "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n", __func__, r_addr, m_hdr->read_data_size); if (r_addr & 0xf) { ql_dbg(ql_dbg_p3p, vha, 0xb0f1, "[%s]: Read addr 0x%x not 16 bytes aligned\n", __func__, r_addr); return QLA_FUNCTION_FAILED; } if (m_hdr->read_data_size % 16) { ql_dbg(ql_dbg_p3p, vha, 0xb0f2, "[%s]: Read data[0x%x] not multiple of 16 bytes\n", __func__, m_hdr->read_data_size); return QLA_FUNCTION_FAILED; } ql_dbg(ql_dbg_p3p, vha, 0xb0f3, "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", __func__, r_addr, m_hdr->read_data_size, loop_cnt); write_lock_irqsave(&ha->hw_lock, flags); for (i = 0; i < loop_cnt; i++) { qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr); r_value = 0; qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value); r_value = MIU_TA_CTL_ENABLE; qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value); r_value = MIU_TA_CTL_START_ENABLE; qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value); for (j = 0; j < MAX_CTL_CHECK; j++) { qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, &r_value); if ((r_value & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { write_unlock_irqrestore(&ha->hw_lock, flags); return QLA_SUCCESS; } for (j = 0; j < 4; j++) { qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j], &r_data); *data_ptr++ = r_data; } r_addr += 16; } write_unlock_irqrestore(&ha->hw_lock, flags); ql_dbg(ql_dbg_p3p, vha, 0xb0f4, "Leaving fn: %s datacount: 0x%x\n", __func__, (loop_cnt * 16)); *d_ptr = data_ptr; return QLA_SUCCESS; } /* ISP83xx flash read for _RDROM _BOARD */ static uint32_t qla8044_minidump_process_rdrom(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t fl_addr, u32_count, rval; struct qla8044_minidump_entry_rdrom *rom_hdr; uint32_t *data_ptr = *d_ptr; rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr; fl_addr = rom_hdr->read_addr; u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t); ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n", __func__, fl_addr, u32_count); rval = qla8044_lockless_flash_read_u32(vha, fl_addr, (u8 *)(data_ptr), u32_count); if (rval != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0xb0f6, "%s: Flash Read Error,Count=%d\n", __func__, u32_count); return QLA_FUNCTION_FAILED; } else { data_ptr += u32_count; *d_ptr = data_ptr; return QLA_SUCCESS; } } static void qla8044_mark_entry_skipped(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, int index) { entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; ql_log(ql_log_info, vha, 0xb0f7, "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", vha->host_no, index, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); } static int qla8044_minidump_process_l2tag(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; unsigned long p_wait, w_time, p_mask; uint32_t c_value_w, c_value_r; struct qla8044_minidump_entry_cache *cache_hdr; int rval = QLA_FUNCTION_FAILED; uint32_t *data_ptr = *d_ptr; ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__); cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = cache_hdr->addr_ctrl.init_tag_value; r_cnt = cache_hdr->read_ctrl.read_addr_cnt; p_wait = cache_hdr->cache_ctrl.poll_wait; p_mask = cache_hdr->cache_ctrl.poll_mask; for (i = 0; i < loop_count; i++) { qla8044_wr_reg_indirect(vha, t_r_addr, t_value); if (c_value_w) qla8044_wr_reg_indirect(vha, c_addr, c_value_w); if (p_mask) { w_time = jiffies + p_wait; do { qla8044_rd_reg_indirect(vha, c_addr, &c_value_r); if ((c_value_r & p_mask) == 0) { break; } else if (time_after_eq(jiffies, w_time)) { /* capturing dump failed */ return rval; } } while (1); } addr = r_addr; for (k = 0; k < r_cnt; k++) { qla8044_rd_reg_indirect(vha, addr, &r_value); *data_ptr++ = r_value; addr += cache_hdr->read_ctrl.read_addr_stride; } t_value += cache_hdr->addr_ctrl.tag_value_stride; } *d_ptr = data_ptr; return QLA_SUCCESS; } static void qla8044_minidump_process_l1cache(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; uint32_t c_value_w; struct qla8044_minidump_entry_cache *cache_hdr; uint32_t *data_ptr = *d_ptr; cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = cache_hdr->addr_ctrl.init_tag_value; r_cnt = cache_hdr->read_ctrl.read_addr_cnt; for (i = 0; i < loop_count; i++) { qla8044_wr_reg_indirect(vha, t_r_addr, t_value); qla8044_wr_reg_indirect(vha, c_addr, c_value_w); addr = r_addr; for (k = 0; k < r_cnt; k++) { qla8044_rd_reg_indirect(vha, addr, &r_value); *data_ptr++ = r_value; addr += cache_hdr->read_ctrl.read_addr_stride; } t_value += cache_hdr->addr_ctrl.tag_value_stride; } *d_ptr = data_ptr; } static void qla8044_minidump_process_rdocm(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla8044_minidump_entry_rdocm *ocm_hdr; uint32_t *data_ptr = *d_ptr; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__); ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr; r_addr = ocm_hdr->read_addr; r_stride = ocm_hdr->read_addr_stride; loop_cnt = ocm_hdr->op_count; ql_dbg(ql_dbg_p3p, vha, 0xb0fa, "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n", __func__, r_addr, r_stride, loop_cnt); for (i = 0; i < loop_cnt; i++) { r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase)); *data_ptr++ = r_value; r_addr += r_stride; } ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n", __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))); *d_ptr = data_ptr; } static void qla8044_minidump_process_rdmux(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value = 0; struct qla8044_minidump_entry_mux *mux_hdr; uint32_t *data_ptr = *d_ptr; ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__); mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr; r_addr = mux_hdr->read_addr; s_addr = mux_hdr->select_addr; s_stride = mux_hdr->select_value_stride; s_value = mux_hdr->select_value; loop_cnt = mux_hdr->op_count; for (i = 0; i < loop_cnt; i++) { qla8044_wr_reg_indirect(vha, s_addr, s_value); qla8044_rd_reg_indirect(vha, r_addr, &r_value); *data_ptr++ = s_value; *data_ptr++ = r_value; s_value += s_stride; } *d_ptr = data_ptr; } static void qla8044_minidump_process_queue(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t s_addr, r_addr; uint32_t r_stride, r_value, r_cnt, qid = 0; uint32_t i, k, loop_cnt; struct qla8044_minidump_entry_queue *q_hdr; uint32_t *data_ptr = *d_ptr; ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__); q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr; s_addr = q_hdr->select_addr; r_cnt = q_hdr->rd_strd.read_addr_cnt; r_stride = q_hdr->rd_strd.read_addr_stride; loop_cnt = q_hdr->op_count; for (i = 0; i < loop_cnt; i++) { qla8044_wr_reg_indirect(vha, s_addr, qid); r_addr = q_hdr->read_addr; for (k = 0; k < r_cnt; k++) { qla8044_rd_reg_indirect(vha, r_addr, &r_value); *data_ptr++ = r_value; r_addr += r_stride; } qid += q_hdr->q_strd.queue_id_stride; } *d_ptr = data_ptr; } /* ISP83xx functions to process new minidump entries... */ static uint32_t qla8044_minidump_process_pollrd(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask; uint16_t s_stride, i; struct qla8044_minidump_entry_pollrd *pollrd_hdr; uint32_t *data_ptr = *d_ptr; pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr; s_addr = pollrd_hdr->select_addr; r_addr = pollrd_hdr->read_addr; s_value = pollrd_hdr->select_value; s_stride = pollrd_hdr->select_value_stride; poll_wait = pollrd_hdr->poll_wait; poll_mask = pollrd_hdr->poll_mask; for (i = 0; i < pollrd_hdr->op_count; i++) { qla8044_wr_reg_indirect(vha, s_addr, s_value); poll_wait = pollrd_hdr->poll_wait; while (1) { qla8044_rd_reg_indirect(vha, s_addr, &r_value); if ((r_value & poll_mask) != 0) { break; } else { usleep_range(1000, 1100); if (--poll_wait == 0) { ql_log(ql_log_fatal, vha, 0xb0fe, "%s: TIMEOUT\n", __func__); goto error; } } } qla8044_rd_reg_indirect(vha, r_addr, &r_value); *data_ptr++ = s_value; *data_ptr++ = r_value; s_value += s_stride; } *d_ptr = data_ptr; return QLA_SUCCESS; error: return QLA_FUNCTION_FAILED; } static void qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t sel_val1, sel_val2, t_sel_val, data, i; uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr; struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr; uint32_t *data_ptr = *d_ptr; rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr; sel_val1 = rdmux2_hdr->select_value_1; sel_val2 = rdmux2_hdr->select_value_2; sel_addr1 = rdmux2_hdr->select_addr_1; sel_addr2 = rdmux2_hdr->select_addr_2; sel_val_mask = rdmux2_hdr->select_value_mask; read_addr = rdmux2_hdr->read_addr; for (i = 0; i < rdmux2_hdr->op_count; i++) { qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1); t_sel_val = sel_val1 & sel_val_mask; *data_ptr++ = t_sel_val; qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); qla8044_rd_reg_indirect(vha, read_addr, &data); *data_ptr++ = data; qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2); t_sel_val = sel_val2 & sel_val_mask; *data_ptr++ = t_sel_val; qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); qla8044_rd_reg_indirect(vha, read_addr, &data); *data_ptr++ = data; sel_val1 += rdmux2_hdr->select_value_stride; sel_val2 += rdmux2_hdr->select_value_stride; } *d_ptr = data_ptr; } static uint32_t qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t poll_wait, poll_mask, r_value, data; uint32_t addr_1, addr_2, value_1, value_2; struct qla8044_minidump_entry_pollrdmwr *poll_hdr; uint32_t *data_ptr = *d_ptr; poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr; addr_1 = poll_hdr->addr_1; addr_2 = poll_hdr->addr_2; value_1 = poll_hdr->value_1; value_2 = poll_hdr->value_2; poll_mask = poll_hdr->poll_mask; qla8044_wr_reg_indirect(vha, addr_1, value_1); poll_wait = poll_hdr->poll_wait; while (1) { qla8044_rd_reg_indirect(vha, addr_1, &r_value); if ((r_value & poll_mask) != 0) { break; } else { usleep_range(1000, 1100); if (--poll_wait == 0) { ql_log(ql_log_fatal, vha, 0xb0ff, "%s: TIMEOUT\n", __func__); goto error; } } } qla8044_rd_reg_indirect(vha, addr_2, &data); data &= poll_hdr->modify_mask; qla8044_wr_reg_indirect(vha, addr_2, data); qla8044_wr_reg_indirect(vha, addr_1, value_2); poll_wait = poll_hdr->poll_wait; while (1) { qla8044_rd_reg_indirect(vha, addr_1, &r_value); if ((r_value & poll_mask) != 0) { break; } else { usleep_range(1000, 1100); if (--poll_wait == 0) { ql_log(ql_log_fatal, vha, 0xb100, "%s: TIMEOUT2\n", __func__); goto error; } } } *data_ptr++ = addr_2; *data_ptr++ = data; *d_ptr = data_ptr; return QLA_SUCCESS; error: return QLA_FUNCTION_FAILED; } #define ISP8044_PEX_DMA_ENGINE_INDEX 8 #define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000 #define ISP8044_PEX_DMA_NUM_OFFSET 0x10000UL #define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0 #define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04 #define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08 #define ISP8044_PEX_DMA_READ_SIZE (16 * 1024) #define ISP8044_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */ static int qla8044_check_dma_engine_state(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; int rval = QLA_SUCCESS; uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; uint64_t dma_base_addr = 0; struct qla8044_minidump_template_hdr *tmplt_hdr = NULL; tmplt_hdr = ha->md_tmplt_hdr; dma_eng_num = tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX]; dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS + (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET); /* Read the pex-dma's command-status-and-control register. */ rval = qla8044_rd_reg_indirect(vha, (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL), &cmd_sts_and_cntrl); if (rval) return QLA_FUNCTION_FAILED; /* Check if requested pex-dma engine is available. */ if (cmd_sts_and_cntrl & BIT_31) return QLA_SUCCESS; return QLA_FUNCTION_FAILED; } static int qla8044_start_pex_dma(struct scsi_qla_host *vha, struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr) { struct qla_hw_data *ha = vha->hw; int rval = QLA_SUCCESS, wait = 0; uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; uint64_t dma_base_addr = 0; struct qla8044_minidump_template_hdr *tmplt_hdr = NULL; tmplt_hdr = ha->md_tmplt_hdr; dma_eng_num = tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX]; dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS + (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET); rval = qla8044_wr_reg_indirect(vha, dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW, m_hdr->desc_card_addr); if (rval) goto error_exit; rval = qla8044_wr_reg_indirect(vha, dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0); if (rval) goto error_exit; rval = qla8044_wr_reg_indirect(vha, dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL, m_hdr->start_dma_cmd); if (rval) goto error_exit; /* Wait for dma operation to complete. */ for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) { rval = qla8044_rd_reg_indirect(vha, (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL), &cmd_sts_and_cntrl); if (rval) goto error_exit; if ((cmd_sts_and_cntrl & BIT_1) == 0) break; udelay(10); } /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */ if (wait >= ISP8044_PEX_DMA_MAX_WAIT) { rval = QLA_FUNCTION_FAILED; goto error_exit; } error_exit: return rval; } static int qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; int rval = QLA_SUCCESS; struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL; uint32_t chunk_size, read_size; uint8_t *data_ptr = (uint8_t *)*d_ptr; void *rdmem_buffer = NULL; dma_addr_t rdmem_dma; struct qla8044_pex_dma_descriptor dma_desc; rval = qla8044_check_dma_engine_state(vha); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_p3p, vha, 0xb147, "DMA engine not available. Fallback to rdmem-read.\n"); return QLA_FUNCTION_FAILED; } m_hdr = (void *)entry_hdr; rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL); if (!rdmem_buffer) { ql_dbg(ql_dbg_p3p, vha, 0xb148, "Unable to allocate rdmem dma buffer\n"); return QLA_FUNCTION_FAILED; } /* Prepare pex-dma descriptor to be written to MS memory. */ /* dma-desc-cmd layout: * 0-3: dma-desc-cmd 0-3 * 4-7: pcid function number * 8-15: dma-desc-cmd 8-15 * dma_bus_addr: dma buffer address * cmd.read_data_size: amount of data-chunk to be read. */ dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f); dma_desc.cmd.dma_desc_cmd |= ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4); dma_desc.dma_bus_addr = rdmem_dma; dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE; read_size = 0; /* * Perform rdmem operation using pex-dma. * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE. */ while (read_size < m_hdr->read_data_size) { if (m_hdr->read_data_size - read_size < ISP8044_PEX_DMA_READ_SIZE) { chunk_size = (m_hdr->read_data_size - read_size); dma_desc.cmd.read_data_size = chunk_size; } dma_desc.src_addr = m_hdr->read_addr + read_size; /* Prepare: Write pex-dma descriptor to MS memory. */ rval = qla8044_ms_mem_write_128b(vha, m_hdr->desc_card_addr, (uint32_t *)&dma_desc, (sizeof(struct qla8044_pex_dma_descriptor)/16)); if (rval) { ql_log(ql_log_warn, vha, 0xb14a, "%s: Error writing rdmem-dma-init to MS !!!\n", __func__); goto error_exit; } ql_dbg(ql_dbg_p3p, vha, 0xb14b, "%s: Dma-descriptor: Instruct for rdmem dma " "(chunk_size 0x%x).\n", __func__, chunk_size); /* Execute: Start pex-dma operation. */ rval = qla8044_start_pex_dma(vha, m_hdr); if (rval) goto error_exit; memcpy(data_ptr, rdmem_buffer, chunk_size); data_ptr += chunk_size; read_size += chunk_size; } *d_ptr = (uint32_t *)data_ptr; error_exit: if (rdmem_buffer) dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE, rdmem_buffer, rdmem_dma); return rval; } static uint32_t qla8044_minidump_process_rddfe(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { int loop_cnt; uint32_t addr1, addr2, value, data, temp, wrVal; uint8_t stride, stride2; uint16_t count; uint32_t poll, mask, modify_mask; uint32_t wait_count = 0; uint32_t *data_ptr = *d_ptr; struct qla8044_minidump_entry_rddfe *rddfe; rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr; addr1 = rddfe->addr_1; value = rddfe->value; stride = rddfe->stride; stride2 = rddfe->stride2; count = rddfe->count; poll = rddfe->poll; mask = rddfe->mask; modify_mask = rddfe->modify_mask; addr2 = addr1 + stride; for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value)); wait_count = 0; while (wait_count < poll) { qla8044_rd_reg_indirect(vha, addr1, &temp); if ((temp & mask) != 0) break; wait_count++; } if (wait_count == poll) { ql_log(ql_log_warn, vha, 0xb153, "%s: TIMEOUT\n", __func__); goto error; } else { qla8044_rd_reg_indirect(vha, addr2, &temp); temp = temp & modify_mask; temp = (temp | ((loop_cnt << 16) | loop_cnt)); wrVal = ((temp << 16) | temp); qla8044_wr_reg_indirect(vha, addr2, wrVal); qla8044_wr_reg_indirect(vha, addr1, value); wait_count = 0; while (wait_count < poll) { qla8044_rd_reg_indirect(vha, addr1, &temp); if ((temp & mask) != 0) break; wait_count++; } if (wait_count == poll) { ql_log(ql_log_warn, vha, 0xb154, "%s: TIMEOUT\n", __func__); goto error; } qla8044_wr_reg_indirect(vha, addr1, ((0x40000000 | value) + stride2)); wait_count = 0; while (wait_count < poll) { qla8044_rd_reg_indirect(vha, addr1, &temp); if ((temp & mask) != 0) break; wait_count++; } if (wait_count == poll) { ql_log(ql_log_warn, vha, 0xb155, "%s: TIMEOUT\n", __func__); goto error; } qla8044_rd_reg_indirect(vha, addr2, &data); *data_ptr++ = wrVal; *data_ptr++ = data; } } *d_ptr = data_ptr; return QLA_SUCCESS; error: return -1; } static uint32_t qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { int ret = 0; uint32_t addr1, addr2, value1, value2, data, selVal; uint8_t stride1, stride2; uint32_t addr3, addr4, addr5, addr6, addr7; uint16_t count, loop_cnt; uint32_t mask; uint32_t *data_ptr = *d_ptr; struct qla8044_minidump_entry_rdmdio *rdmdio; rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr; addr1 = rdmdio->addr_1; addr2 = rdmdio->addr_2; value1 = rdmdio->value_1; stride1 = rdmdio->stride_1; stride2 = rdmdio->stride_2; count = rdmdio->count; mask = rdmdio->mask; value2 = rdmdio->value_2; addr3 = addr1 + stride1; for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, addr3, mask); if (ret == -1) goto error; addr4 = addr2 - stride1; ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4, value2); if (ret == -1) goto error; addr5 = addr2 - (2 * stride1); ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5, value1); if (ret == -1) goto error; addr6 = addr2 - (3 * stride1); ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr6, 0x2); if (ret == -1) goto error; ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, addr3, mask); if (ret == -1) goto error; addr7 = addr2 - (4 * stride1); data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7); if (data == -1) goto error; selVal = (value2 << 18) | (value1 << 2) | 2; stride2 = rdmdio->stride_2; *data_ptr++ = selVal; *data_ptr++ = data; value1 = value1 + stride2; *d_ptr = data_ptr; } return 0; error: return -1; } static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t addr1, addr2, value1, value2, poll, r_value; uint32_t wait_count = 0; struct qla8044_minidump_entry_pollwr *pollwr_hdr; pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; addr1 = pollwr_hdr->addr_1; addr2 = pollwr_hdr->addr_2; value1 = pollwr_hdr->value_1; value2 = pollwr_hdr->value_2; poll = pollwr_hdr->poll; while (wait_count < poll) { qla8044_rd_reg_indirect(vha, addr1, &r_value); if ((r_value & poll) != 0) break; wait_count++; } if (wait_count == poll) { ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__); goto error; } qla8044_wr_reg_indirect(vha, addr2, value2); qla8044_wr_reg_indirect(vha, addr1, value1); wait_count = 0; while (wait_count < poll) { qla8044_rd_reg_indirect(vha, addr1, &r_value); if ((r_value & poll) != 0) break; wait_count++; } return QLA_SUCCESS; error: return -1; } /* * * qla8044_collect_md_data - Retrieve firmware minidump data. * @ha: pointer to adapter structure **/ int qla8044_collect_md_data(struct scsi_qla_host *vha) { int num_entry_hdr = 0; struct qla8044_minidump_entry_hdr *entry_hdr; struct qla8044_minidump_template_hdr *tmplt_hdr; uint32_t *data_ptr; uint32_t data_collected = 0, f_capture_mask; int i, rval = QLA_FUNCTION_FAILED; uint64_t now; uint32_t timestamp, idc_control; struct qla_hw_data *ha = vha->hw; if (!ha->md_dump) { ql_log(ql_log_info, vha, 0xb101, "%s(%ld) No buffer to dump\n", __func__, vha->host_no); return rval; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xb10d, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); goto md_failed; } ha->fw_dumped = false; if (!ha->md_tmplt_hdr || !ha->md_dump) { ql_log(ql_log_warn, vha, 0xb10e, "Memory not allocated for minidump capture\n"); goto md_failed; } qla8044_idc_lock(ha); idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); if (idc_control & GRACEFUL_RESET_BIT1) { ql_log(ql_log_warn, vha, 0xb112, "Forced reset from application, " "ignore minidump capture\n"); qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, (idc_control & ~GRACEFUL_RESET_BIT1)); qla8044_idc_unlock(ha); goto md_failed; } qla8044_idc_unlock(ha); if (qla82xx_validate_template_chksum(vha)) { ql_log(ql_log_info, vha, 0xb109, "Template checksum validation error\n"); goto md_failed; } tmplt_hdr = (struct qla8044_minidump_template_hdr *) ha->md_tmplt_hdr; data_ptr = (uint32_t *)((uint8_t *)ha->md_dump); num_entry_hdr = tmplt_hdr->num_of_entries; ql_dbg(ql_dbg_p3p, vha, 0xb11a, "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; /* Validate whether required debug level is set */ if ((f_capture_mask & 0x3) != 0x3) { ql_log(ql_log_warn, vha, 0xb10f, "Minimum required capture mask[0x%x] level not set\n", f_capture_mask); } tmplt_hdr->driver_capture_mask = ql2xmdcapmask; ql_log(ql_log_info, vha, 0xb102, "[%s]: starting data ptr: %p\n", __func__, data_ptr); ql_log(ql_log_info, vha, 0xb10b, "[%s]: no of entry headers in Template: 0x%x\n", __func__, num_entry_hdr); ql_log(ql_log_info, vha, 0xb10c, "[%s]: Total_data_size 0x%x, %d obtained\n", __func__, ha->md_dump_size, ha->md_dump_size); /* Update current timestamp before taking dump */ now = get_jiffies_64(); timestamp = (u32)(jiffies_to_msecs(now) / 1000); tmplt_hdr->driver_timestamp = timestamp; entry_hdr = (struct qla8044_minidump_entry_hdr *) (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] = tmplt_hdr->ocm_window_reg[ha->portnum]; /* Walk through the entry headers - validate/perform required action */ for (i = 0; i < num_entry_hdr; i++) { if (data_collected > ha->md_dump_size) { ql_log(ql_log_info, vha, 0xb103, "Data collected: [0x%x], " "Total Dump size: [0x%x]\n", data_collected, ha->md_dump_size); return rval; } if (!(entry_hdr->d_ctrl.entry_capture_mask & ql2xmdcapmask)) { entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; goto skip_nxt_entry; } ql_dbg(ql_dbg_p3p, vha, 0xb104, "Data collected: [0x%x], Dump size left:[0x%x]\n", data_collected, (ha->md_dump_size - data_collected)); /* Decode the entry type and take required action to capture * debug data */ switch (entry_hdr->entry_type) { case QLA82XX_RDEND: qla8044_mark_entry_skipped(vha, entry_hdr, i); break; case QLA82XX_CNTRL: rval = qla8044_minidump_process_control(vha, entry_hdr); if (rval != QLA_SUCCESS) { qla8044_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_RDCRB: qla8044_minidump_process_rdcrb(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDMEM: rval = qla8044_minidump_pex_dma_read(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { rval = qla8044_minidump_process_rdmem(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla8044_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } } break; case QLA82XX_BOARD: case QLA82XX_RDROM: rval = qla8044_minidump_process_rdrom(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla8044_mark_entry_skipped(vha, entry_hdr, i); } break; case QLA82XX_L2DTG: case QLA82XX_L2ITG: case QLA82XX_L2DAT: case QLA82XX_L2INS: rval = qla8044_minidump_process_l2tag(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla8044_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA8044_L1DTG: case QLA8044_L1ITG: case QLA82XX_L1DAT: case QLA82XX_L1INS: qla8044_minidump_process_l1cache(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDOCM: qla8044_minidump_process_rdocm(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDMUX: qla8044_minidump_process_rdmux(vha, entry_hdr, &data_ptr); break; case QLA82XX_QUEUE: qla8044_minidump_process_queue(vha, entry_hdr, &data_ptr); break; case QLA8044_POLLRD: rval = qla8044_minidump_process_pollrd(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) qla8044_mark_entry_skipped(vha, entry_hdr, i); break; case QLA8044_RDMUX2: qla8044_minidump_process_rdmux2(vha, entry_hdr, &data_ptr); break; case QLA8044_POLLRDMWR: rval = qla8044_minidump_process_pollrdmwr(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) qla8044_mark_entry_skipped(vha, entry_hdr, i); break; case QLA8044_RDDFE: rval = qla8044_minidump_process_rddfe(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) qla8044_mark_entry_skipped(vha, entry_hdr, i); break; case QLA8044_RDMDIO: rval = qla8044_minidump_process_rdmdio(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) qla8044_mark_entry_skipped(vha, entry_hdr, i); break; case QLA8044_POLLWR: rval = qla8044_minidump_process_pollwr(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) qla8044_mark_entry_skipped(vha, entry_hdr, i); break; case QLA82XX_RDNOP: default: qla8044_mark_entry_skipped(vha, entry_hdr, i); break; } data_collected = (uint8_t *)data_ptr - (uint8_t *)((uint8_t *)ha->md_dump); skip_nxt_entry: /* * next entry in the template */ entry_hdr = (struct qla8044_minidump_entry_hdr *) (((uint8_t *)entry_hdr) + entry_hdr->entry_size); } if (data_collected != ha->md_dump_size) { ql_log(ql_log_info, vha, 0xb105, "Dump data mismatch: Data collected: " "[0x%x], total_data_size:[0x%x]\n", data_collected, ha->md_dump_size); rval = QLA_FUNCTION_FAILED; goto md_failed; } ql_log(ql_log_info, vha, 0xb110, "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); ha->fw_dumped = true; qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); ql_log(ql_log_info, vha, 0xb106, "Leaving fn: %s Last entry: 0x%x\n", __func__, i); md_failed: return rval; } void qla8044_get_minidump(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; if (!qla8044_collect_md_data(vha)) { ha->fw_dumped = true; ha->prev_minidump_failed = 0; } else { ql_log(ql_log_fatal, vha, 0xb0db, "%s: Unable to collect minidump\n", __func__); ha->prev_minidump_failed = 1; } } static int qla8044_poll_flash_status_reg(struct scsi_qla_host *vha) { uint32_t flash_status; int retries = QLA8044_FLASH_READ_RETRY_COUNT; int ret_val = QLA_SUCCESS; while (retries--) { ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS, &flash_status); if (ret_val) { ql_log(ql_log_warn, vha, 0xb13c, "%s: Failed to read FLASH_STATUS reg.\n", __func__); break; } if ((flash_status & QLA8044_FLASH_STATUS_READY) == QLA8044_FLASH_STATUS_READY) break; msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY); } if (!retries) ret_val = QLA_FUNCTION_FAILED; return ret_val; } static int qla8044_write_flash_status_reg(struct scsi_qla_host *vha, uint32_t data) { int ret_val = QLA_SUCCESS; uint32_t cmd; cmd = vha->hw->fdt_wrt_sts_reg_cmd; ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd); if (ret_val) { ql_log(ql_log_warn, vha, 0xb125, "%s: Failed to write to FLASH_ADDR.\n", __func__); goto exit_func; } ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data); if (ret_val) { ql_log(ql_log_warn, vha, 0xb126, "%s: Failed to write to FLASH_WRDATA.\n", __func__); goto exit_func; } ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, QLA8044_FLASH_SECOND_ERASE_MS_VAL); if (ret_val) { ql_log(ql_log_warn, vha, 0xb127, "%s: Failed to write to FLASH_CONTROL.\n", __func__); goto exit_func; } ret_val = qla8044_poll_flash_status_reg(vha); if (ret_val) ql_log(ql_log_warn, vha, 0xb128, "%s: Error polling flash status reg.\n", __func__); exit_func: return ret_val; } /* * This function assumes that the flash lock is held. */ static int qla8044_unprotect_flash(scsi_qla_host_t *vha) { int ret_val; struct qla_hw_data *ha = vha->hw; ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable); if (ret_val) ql_log(ql_log_warn, vha, 0xb139, "%s: Write flash status failed.\n", __func__); return ret_val; } /* * This function assumes that the flash lock is held. */ static int qla8044_protect_flash(scsi_qla_host_t *vha) { int ret_val; struct qla_hw_data *ha = vha->hw; ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable); if (ret_val) ql_log(ql_log_warn, vha, 0xb13b, "%s: Write flash status failed.\n", __func__); return ret_val; } static int qla8044_erase_flash_sector(struct scsi_qla_host *vha, uint32_t sector_start_addr) { uint32_t reversed_addr; int ret_val = QLA_SUCCESS; ret_val = qla8044_poll_flash_status_reg(vha); if (ret_val) { ql_log(ql_log_warn, vha, 0xb12e, "%s: Poll flash status after erase failed..\n", __func__); } reversed_addr = (((sector_start_addr & 0xFF) << 16) | (sector_start_addr & 0xFF00) | ((sector_start_addr & 0xFF0000) >> 16)); ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, reversed_addr); if (ret_val) { ql_log(ql_log_warn, vha, 0xb12f, "%s: Failed to write to FLASH_WRDATA.\n", __func__); } ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd); if (ret_val) { ql_log(ql_log_warn, vha, 0xb130, "%s: Failed to write to FLASH_ADDR.\n", __func__); } ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, QLA8044_FLASH_LAST_ERASE_MS_VAL); if (ret_val) { ql_log(ql_log_warn, vha, 0xb131, "%s: Failed write to FLASH_CONTROL.\n", __func__); } ret_val = qla8044_poll_flash_status_reg(vha); if (ret_val) { ql_log(ql_log_warn, vha, 0xb132, "%s: Poll flash status failed.\n", __func__); } return ret_val; } /* * qla8044_flash_write_u32 - Write data to flash * * @ha : Pointer to adapter structure * addr : Flash address to write to * p_data : Data to be written * * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED * * NOTE: Lock should be held on entry */ static int qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr, uint32_t *p_data) { int ret_val = QLA_SUCCESS; ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, 0x00800000 | (addr >> 2)); if (ret_val) { ql_log(ql_log_warn, vha, 0xb134, "%s: Failed write to FLASH_ADDR.\n", __func__); goto exit_func; } ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data); if (ret_val) { ql_log(ql_log_warn, vha, 0xb135, "%s: Failed write to FLASH_WRDATA.\n", __func__); goto exit_func; } ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D); if (ret_val) { ql_log(ql_log_warn, vha, 0xb136, "%s: Failed write to FLASH_CONTROL.\n", __func__); goto exit_func; } ret_val = qla8044_poll_flash_status_reg(vha); if (ret_val) { ql_log(ql_log_warn, vha, 0xb137, "%s: Poll flash status failed.\n", __func__); } exit_func: return ret_val; } static int qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t dwords) { int ret = QLA_FUNCTION_FAILED; uint32_t spi_val; if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS || dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) { ql_dbg(ql_dbg_user, vha, 0xb123, "Got unsupported dwords = 0x%x.\n", dwords); return QLA_FUNCTION_FAILED; } qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val); qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, spi_val | QLA8044_FLASH_SPI_CTL); qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, QLA8044_FLASH_FIRST_TEMP_VAL); /* First DWORD write to FLASH_WRDATA */ ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++); qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, QLA8044_FLASH_FIRST_MS_PATTERN); ret = qla8044_poll_flash_status_reg(vha); if (ret) { ql_log(ql_log_warn, vha, 0xb124, "%s: Failed.\n", __func__); goto exit_func; } dwords--; qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, QLA8044_FLASH_SECOND_TEMP_VAL); /* Second to N-1 DWORDS writes */ while (dwords != 1) { qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++); qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, QLA8044_FLASH_SECOND_MS_PATTERN); ret = qla8044_poll_flash_status_reg(vha); if (ret) { ql_log(ql_log_warn, vha, 0xb129, "%s: Failed.\n", __func__); goto exit_func; } dwords--; } qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2)); /* Last DWORD write */ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++); qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, QLA8044_FLASH_LAST_MS_PATTERN); ret = qla8044_poll_flash_status_reg(vha); if (ret) { ql_log(ql_log_warn, vha, 0xb12a, "%s: Failed.\n", __func__); goto exit_func; } qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val); if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) { ql_log(ql_log_warn, vha, 0xb12b, "%s: Failed.\n", __func__); spi_val = 0; /* Operation failed, clear error bit. */ qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val); qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, spi_val | QLA8044_FLASH_SPI_CTL); } exit_func: return ret; } static int qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t dwords) { int ret = QLA_FUNCTION_FAILED; uint32_t liter; for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { ret = qla8044_flash_write_u32(vha, faddr, dwptr); if (ret) { ql_dbg(ql_dbg_p3p, vha, 0xb141, "%s: flash address=%x data=%x.\n", __func__, faddr, *dwptr); break; } } return ret; } int qla8044_write_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { int rval = QLA_FUNCTION_FAILED, i, burst_iter_count; int dword_count, erase_sec_count; uint32_t erase_offset; uint8_t *p_cache, *p_src; erase_offset = offset; p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL); if (!p_cache) return QLA_FUNCTION_FAILED; memcpy(p_cache, buf, length); p_src = p_cache; dword_count = length / sizeof(uint32_t); /* Since the offset and legth are sector aligned, it will be always * multiple of burst_iter_count (64) */ burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS; erase_sec_count = length / QLA8044_SECTOR_SIZE; /* Suspend HBA. */ scsi_block_requests(vha->host); /* Lock and enable write for whole operation. */ qla8044_flash_lock(vha); qla8044_unprotect_flash(vha); /* Erasing the sectors */ for (i = 0; i < erase_sec_count; i++) { rval = qla8044_erase_flash_sector(vha, erase_offset); ql_dbg(ql_dbg_user, vha, 0xb138, "Done erase of sector=0x%x.\n", erase_offset); if (rval) { ql_log(ql_log_warn, vha, 0xb121, "Failed to erase the sector having address: " "0x%x.\n", erase_offset); goto out; } erase_offset += QLA8044_SECTOR_SIZE; } ql_dbg(ql_dbg_user, vha, 0xb13f, "Got write for addr = 0x%x length=0x%x.\n", offset, length); for (i = 0; i < burst_iter_count; i++) { /* Go with write. */ rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src, offset, QLA8044_MAX_OPTROM_BURST_DWORDS); if (rval) { /* Buffer Mode failed skip to dword mode */ ql_log(ql_log_warn, vha, 0xb122, "Failed to write flash in buffer mode, " "Reverting to slow-write.\n"); rval = qla8044_write_flash_dword_mode(vha, (uint32_t *)p_src, offset, QLA8044_MAX_OPTROM_BURST_DWORDS); } p_src += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS; offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS; } ql_dbg(ql_dbg_user, vha, 0xb133, "Done writing.\n"); out: qla8044_protect_flash(vha); qla8044_flash_unlock(vha); scsi_unblock_requests(vha->host); kfree(p_cache); return rval; } #define LEG_INT_PTR_B31 (1 << 31) #define LEG_INT_PTR_B30 (1 << 30) #define PF_BITS_MASK (0xF << 16) /** * qla8044_intr_handler() - Process interrupts for the ISP8044 * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla8044_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; int status = 0; unsigned long flags; unsigned long iter; uint32_t stat; uint16_t mb[8]; uint32_t leg_int_ptr = 0, pf_bit; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0xb143, "%s(): NULL response queue pointer\n", __func__); return IRQ_NONE; } ha = rsp->hw; vha = pci_get_drvdata(ha->pdev); if (unlikely(pci_channel_offline(ha->pdev))) return IRQ_HANDLED; leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET); /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */ if (!(leg_int_ptr & (LEG_INT_PTR_B31))) { ql_dbg(ql_dbg_p3p, vha, 0xb144, "%s: Legacy Interrupt Bit 31 not set, " "spurious interrupt!\n", __func__); return IRQ_NONE; } pf_bit = ha->portnum << 16; /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */ if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) { ql_dbg(ql_dbg_p3p, vha, 0xb145, "%s: Incorrect function ID 0x%x in " "legacy interrupt register, " "ha->pf_bit = 0x%x\n", __func__, (leg_int_ptr & (PF_BITS_MASK)), pf_bit); return IRQ_NONE; } /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger * Control register and poll till Legacy Interrupt Pointer register * bit32 is 0. */ qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0); do { leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET); if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) break; } while (leg_int_ptr & (LEG_INT_PTR_B30)); reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); for (iter = 1; iter--; ) { if (rd_reg_dword(&reg->host_int)) { stat = rd_reg_dword(&reg->host_status); if ((stat & HSRX_RISC_INT) == 0) break; switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = rd_reg_word(&reg->mailbox_out[1]); mb[2] = rd_reg_word(&reg->mailbox_out[2]); mb[3] = rd_reg_word(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_p3p, vha, 0xb146, "Unrecognized interrupt type " "(%d).\n", stat & 0xff); break; } } wrt_reg_dword(&reg->host_int, 0); } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } static int qla8044_idc_dontreset(struct qla_hw_data *ha) { uint32_t idc_ctrl; idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); return idc_ctrl & DONTRESET_BIT0; } static void qla8044_clear_rst_ready(scsi_qla_host_t *vha) { uint32_t drv_state; drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); /* * For ISP8044, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP82xx, drv_active has 4 bits per function */ drv_state &= ~(1 << vha->hw->portnum); ql_dbg(ql_dbg_p3p, vha, 0xb13d, "drv_state: 0x%08x\n", drv_state); qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state); } int qla8044_abort_isp(scsi_qla_host_t *vha) { int rval; uint32_t dev_state; struct qla_hw_data *ha = vha->hw; qla8044_idc_lock(ha); dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); if (ql2xdontresethba) qla8044_set_idc_dontreset(vha); /* If device_state is NEED_RESET, go ahead with * Reset,irrespective of ql2xdontresethba. This is to allow a * non-reset-owner to force a reset. Non-reset-owner sets * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset * and then forces a Reset by setting device_state to * NEED_RESET. */ if (dev_state == QLA8XXX_DEV_READY) { /* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset * recovery */ if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) { ql_dbg(ql_dbg_p3p, vha, 0xb13e, "Reset recovery disabled\n"); rval = QLA_FUNCTION_FAILED; goto exit_isp_reset; } ql_dbg(ql_dbg_p3p, vha, 0xb140, "HW State: NEED RESET\n"); qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_NEED_RESET); } /* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority * and which drivers are present. Unlike ISP82XX, the function setting * NEED_RESET, may not be the Reset owner. */ qla83xx_reset_ownership(vha); qla8044_idc_unlock(ha); rval = qla8044_device_state_handler(vha); qla8044_idc_lock(ha); qla8044_clear_rst_ready(vha); exit_isp_reset: qla8044_idc_unlock(ha); if (rval == QLA_SUCCESS) { ha->flags.isp82xx_fw_hung = 0; ha->flags.nic_core_reset_hdlr_active = 0; rval = qla82xx_restart_isp(vha); } return rval; } void qla8044_fw_dump(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (!ha->allow_cna_fw_dump) return; scsi_block_requests(vha->host); ha->flags.isp82xx_no_md_cap = 1; qla8044_idc_lock(ha); qla82xx_set_reset_owner(vha); qla8044_idc_unlock(ha); qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); }
linux-master
drivers/scsi/qla2xxx/qla_nx2.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include "qla_gbl.h" #include <linux/delay.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "qla_devtbl.h" #ifdef CONFIG_SPARC #include <asm/prom.h> #endif #include "qla_target.h" /* * QLogic ISP2x00 Hardware Support Function Prototypes. */ static int qla2x00_isp_firmware(scsi_qla_host_t *); static int qla2x00_setup_chip(scsi_qla_host_t *); static int qla2x00_fw_ready(scsi_qla_host_t *); static int qla2x00_configure_hba(scsi_qla_host_t *); static int qla2x00_configure_loop(scsi_qla_host_t *); static int qla2x00_configure_local_loop(scsi_qla_host_t *); static int qla2x00_configure_fabric(scsi_qla_host_t *); static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *); static int qla2x00_restart_isp(scsi_qla_host_t *); static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); static int qla84xx_init_chip(scsi_qla_host_t *); static int qla25xx_init_queues(struct qla_hw_data *); static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea); static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, struct event_arg *); static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *); /* SRB Extensions ---------------------------------------------------------- */ void qla2x00_sp_timeout(struct timer_list *t) { srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); struct srb_iocb *iocb; scsi_qla_host_t *vha = sp->vha; WARN_ON(irqs_disabled()); iocb = &sp->u.iocb_cmd; iocb->timeout(sp); /* ref: TMR */ kref_put(&sp->cmd_kref, qla2x00_sp_release); if (vha && qla2x00_isp_reg_stat(vha->hw)) { ql_log(ql_log_info, vha, 0x9008, "PCI/Register disconnect.\n"); qla_pci_set_eeh_busy(vha); } } void qla2x00_sp_free(srb_t *sp) { struct srb_iocb *iocb = &sp->u.iocb_cmd; del_timer(&iocb->timer); qla2x00_rel_sp(sp); } void qla2xxx_rel_done_warning(srb_t *sp, int res) { WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp); } void qla2xxx_rel_free_warning(srb_t *sp) { WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp); } /* Asynchronous Login/Logout Routines -------------------------------------- */ unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *vha) { unsigned long tmo; struct qla_hw_data *ha = vha->hw; /* Firmware should use switch negotiated r_a_tov for timeout. */ tmo = ha->r_a_tov / 10 * 2; if (IS_QLAFX00(ha)) { tmo = FX00_DEF_RATOV * 2; } else if (!IS_FWI2_CAPABLE(ha)) { /* * Except for earlier ISPs where the timeout is seeded from the * initialization control block. */ tmo = ha->login_timeout; } return tmo; } static void qla24xx_abort_iocb_timeout(void *data) { srb_t *sp = data; struct srb_iocb *abt = &sp->u.iocb_cmd; struct qla_qpair *qpair = sp->qpair; u32 handle; unsigned long flags; int sp_found = 0, cmdsp_found = 0; if (sp->cmd_sp) ql_dbg(ql_dbg_async, sp->vha, 0x507c, "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n", sp->cmd_sp->handle, sp->cmd_sp->type, sp->handle, sp->type); else ql_dbg(ql_dbg_async, sp->vha, 0x507c, "Abort timeout 2 - hdl=%x, type=%x\n", sp->handle, sp->type); spin_lock_irqsave(qpair->qp_lock_ptr, flags); for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) { if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] == sp->cmd_sp)) { qpair->req->outstanding_cmds[handle] = NULL; cmdsp_found = 1; qla_put_fw_resources(qpair, &sp->cmd_sp->iores); } /* removing the abort */ if (qpair->req->outstanding_cmds[handle] == sp) { qpair->req->outstanding_cmds[handle] = NULL; sp_found = 1; qla_put_fw_resources(qpair, &sp->iores); break; } } spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); if (cmdsp_found && sp->cmd_sp) { /* * This done function should take care of * original command ref: INIT */ sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); } if (sp_found) { abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); sp->done(sp, QLA_OS_TIMER_EXPIRED); } } static void qla24xx_abort_sp_done(srb_t *sp, int res) { struct srb_iocb *abt = &sp->u.iocb_cmd; srb_t *orig_sp = sp->cmd_sp; if (orig_sp) qla_wait_nvme_release_cmd_kref(orig_sp); if (sp->flags & SRB_WAKEUP_ON_COMP) complete(&abt->u.abt.comp); else /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) { scsi_qla_host_t *vha = cmd_sp->vha; struct srb_iocb *abt_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; /* ref: INIT for ABTS command */ sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, GFP_ATOMIC); if (!sp) return QLA_MEMORY_ALLOC_FAILED; qla_vha_mark_busy(vha); abt_iocb = &sp->u.iocb_cmd; sp->type = SRB_ABT_CMD; sp->name = "abort"; sp->qpair = cmd_sp->qpair; sp->cmd_sp = cmd_sp; if (wait) sp->flags = SRB_WAKEUP_ON_COMP; init_completion(&abt_iocb->u.abt.comp); /* FW can send 2 x ABTS's timeout/20s */ qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done); sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout; abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id); ql_dbg(ql_dbg_async, vha, 0x507c, "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle, cmd_sp->type); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); return rval; } if (wait) { wait_for_completion(&abt_iocb->u.abt.comp); rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? QLA_SUCCESS : QLA_ERR_FROM_FW; /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); } return rval; } void qla2x00_async_iocb_timeout(void *data) { srb_t *sp = data; fc_port_t *fcport = sp->fcport; struct srb_iocb *lio = &sp->u.iocb_cmd; int rc, h; unsigned long flags; if (fcport) { ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); } else { pr_info("Async-%s timeout - hdl=%x.\n", sp->name, sp->handle); } switch (sp->type) { case SRB_LOGIN_CMD: rc = qla24xx_async_abort_cmd(sp, false); if (rc) { /* Retry as needed. */ lio->u.logio.data[0] = MBS_COMMAND_ERROR; lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { if (sp->qpair->req->outstanding_cmds[h] == sp) { sp->qpair->req->outstanding_cmds[h] = NULL; break; } } spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); sp->done(sp, QLA_FUNCTION_TIMEOUT); } break; case SRB_LOGOUT_CMD: case SRB_CT_PTHRU_CMD: case SRB_MB_IOCB: case SRB_NACK_PLOGI: case SRB_NACK_PRLI: case SRB_NACK_LOGO: case SRB_CTRL_VP: default: rc = qla24xx_async_abort_cmd(sp, false); if (rc) { spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { if (sp->qpair->req->outstanding_cmds[h] == sp) { sp->qpair->req->outstanding_cmds[h] = NULL; break; } } spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); sp->done(sp, QLA_FUNCTION_TIMEOUT); } break; } } static void qla2x00_async_login_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; struct event_arg ea; ql_dbg(ql_dbg_disc, vha, 0x20dd, "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); if (!test_bit(UNLOADING, &vha->dpc_flags)) { memset(&ea, 0, sizeof(ea)); ea.fcport = sp->fcport; ea.data[0] = lio->u.logio.data[0]; ea.data[1] = lio->u.logio.data[1]; ea.iop[0] = lio->u.logio.iop[0]; ea.iop[1] = lio->u.logio.iop[1]; ea.sp = sp; if (res) ea.data[0] = MBS_COMMAND_ERROR; qla24xx_handle_plogi_done_event(vha, &ea); } /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { srb_t *sp; struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || fcport->loop_id == FC_NO_LOOP_ID) { ql_log(ql_log_warn, vha, 0xffff, "%s: %8phC - not sending command.\n", __func__, fcport->port_name); return rval; } /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); fcport->flags |= FCF_ASYNC_SENT; fcport->logout_completed = 0; sp->type = SRB_LOGIN_CMD; sp->name = "login"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_login_sp_done); lio = &sp->u.iocb_cmd; if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) { lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; } else { if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) { lio->u.logio.flags |= (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI); } else { lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; } } if (NVME_TARGET(vha->hw, fcport)) lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; rval = qla2x00_start_sp(sp); ql_dbg(ql_dbg_disc, vha, 0x2072, "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->login_retry, lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : ""); if (rval != QLA_SUCCESS) { fcport->flags |= FCF_LOGIN_NEEDED; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); goto done_free_sp; } return rval; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; done: fcport->flags &= ~FCF_ASYNC_ACTIVE; /* * async login failed. Could be due to iocb/exchange resource * being low. Set state DELETED for re-login process to start again. */ qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); return rval; } static void qla2x00_async_logout_sp_done(srb_t *sp, int res) { sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); sp->fcport->login_gen++; qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]); /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; int rval = QLA_FUNCTION_FAILED; fcport->flags |= FCF_ASYNC_SENT; /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_LOGOUT_CMD; sp->name = "logout"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_logout_sp_done), ql_dbg(ql_dbg_disc, vha, 0x2070, "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->port_name, fcport->explicit_logout); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); return rval; } void qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { fcport->flags &= ~FCF_ASYNC_ACTIVE; /* Don't re-login in target mode */ if (!fcport->tgt_session) qla2x00_mark_device_lost(vha, fcport, 1); qlt_logo_completion_handler(fcport, data[0]); } static void qla2x00_async_prlo_sp_done(srb_t *sp, int res) { struct srb_iocb *lio = &sp->u.iocb_cmd; struct scsi_qla_host *vha = sp->vha; sp->fcport->flags &= ~FCF_ASYNC_ACTIVE; if (!test_bit(UNLOADING, &vha->dpc_flags)) qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, lio->u.logio.data); /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; int rval; rval = QLA_FUNCTION_FAILED; /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_PRLO_CMD; sp->name = "prlo"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_prlo_sp_done); ql_dbg(ql_dbg_disc, vha, 0x2070, "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } static void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) { struct fc_port *fcport = ea->fcport; unsigned long flags; ql_dbg(ql_dbg_disc, vha, 0x20d2, "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, fcport->rscn_gen, ea->sp->gen1, fcport->loop_id); WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", ea->data[0]); if (ea->data[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_disc, vha, 0x2066, "%s %8phC: adisc fail: post delete\n", __func__, ea->fcport->port_name); spin_lock_irqsave(&vha->work_lock, flags); /* deleted = 0 & logout_on_delete = force fw cleanup */ if (fcport->deleted == QLA_SESS_DELETED) fcport->deleted = 0; fcport->logout_on_delete = 1; spin_unlock_irqrestore(&vha->work_lock, flags); qlt_schedule_sess_for_deletion(ea->fcport); return; } if (ea->fcport->disc_state == DSC_DELETE_PEND) return; if (ea->sp->gen2 != ea->fcport->login_gen) { /* target side must have changed it. */ ql_dbg(ql_dbg_disc, vha, 0x20d3, "%s %8phC generation changed\n", __func__, ea->fcport->port_name); return; } else if (ea->sp->gen1 != ea->fcport->rscn_gen) { qla_rscn_replay(fcport); qlt_schedule_sess_for_deletion(fcport); return; } __qla24xx_handle_gpdb_event(vha, ea); } static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI); if (!e) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; fcport->flags |= FCF_ASYNC_ACTIVE; qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); return qla2x00_post_work(vha, e); } static void qla2x00_async_adisc_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; struct event_arg ea; struct srb_iocb *lio = &sp->u.iocb_cmd; ql_dbg(ql_dbg_disc, vha, 0x2066, "Async done-%s res %x %8phC\n", sp->name, res, sp->fcport->port_name); sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); memset(&ea, 0, sizeof(ea)); ea.rc = res; ea.data[0] = lio->u.logio.data[0]; ea.data[1] = lio->u.logio.data[1]; ea.iop[0] = lio->u.logio.iop[0]; ea.iop[1] = lio->u.logio.iop[1]; ea.fcport = sp->fcport; ea.sp = sp; if (res) ea.data[0] = MBS_COMMAND_ERROR; qla24xx_handle_adisc_event(vha, &ea); /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { srb_t *sp; struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; if (IS_SESSION_DELETED(fcport)) { ql_log(ql_log_warn, vha, 0xffff, "%s: %8phC is being delete - not sending command.\n", __func__, fcport->port_name); fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; fcport->flags |= FCF_ASYNC_SENT; /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_ADISC_CMD; sp->name = "adisc"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_adisc_sp_done); if (data[1] & QLA_LOGIO_LOGIN_RETRIED) { lio = &sp->u.iocb_cmd; lio->u.logio.flags |= SRB_LOGIN_RETRIED; } ql_dbg(ql_dbg_disc, vha, 0x206f, "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n", sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); qla2x00_post_async_adisc_work(vha, fcport, data); return rval; } static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) { struct qla_hw_data *ha = vha->hw; if (IS_FWI2_CAPABLE(ha)) return loop_id > NPH_LAST_HANDLE; return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST; } /** * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID * @vha: adapter state pointer. * @dev: port structure pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) { int rval; struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; rval = QLA_SUCCESS; spin_lock_irqsave(&ha->vport_slock, flags); dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE); if (dev->loop_id >= LOOPID_MAP_SIZE || qla2x00_is_reserved_id(vha, dev->loop_id)) { dev->loop_id = FC_NO_LOOP_ID; rval = QLA_FUNCTION_FAILED; } else { set_bit(dev->loop_id, ha->loop_id_map); } spin_unlock_irqrestore(&ha->vport_slock, flags); if (rval == QLA_SUCCESS) ql_dbg(ql_dbg_disc, dev->vha, 0x2086, "Assigning new loopid=%x, portid=%x.\n", dev->loop_id, dev->d_id.b24); else ql_log(ql_log_warn, dev->vha, 0x2087, "No loop_id's available, portid=%x.\n", dev->d_id.b24); return rval; } void qla2x00_clear_loop_id(fc_port_t *fcport) { struct qla_hw_data *ha = fcport->vha->hw; if (fcport->loop_id == FC_NO_LOOP_ID || qla2x00_is_reserved_id(fcport->vha, fcport->loop_id)) return; clear_bit(fcport->loop_id, ha->loop_id_map); fcport->loop_id = FC_NO_LOOP_ID; } static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport, *conflict_fcport; struct get_name_list_extended *e; u16 i, n, found = 0, loop_id; port_id_t id; u64 wwn; u16 data[2]; u8 current_login_state, nvme_cls; fcport = ea->fcport; ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, ea->rc, fcport->login_gen, fcport->last_login_gen, fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable); if (fcport->disc_state == DSC_DELETE_PEND) return; if (ea->rc) { /* rval */ if (fcport->login_retry == 0) { ql_dbg(ql_dbg_disc, vha, 0x20de, "GNL failed Port login retry %8phN, retry cnt=%d.\n", fcport->port_name, fcport->login_retry); } return; } if (fcport->last_rscn_gen != fcport->rscn_gen) { qla_rscn_replay(fcport); qlt_schedule_sess_for_deletion(fcport); return; } else if (fcport->last_login_gen != fcport->login_gen) { ql_dbg(ql_dbg_disc, vha, 0x20e0, "%s %8phC login gen changed\n", __func__, fcport->port_name); set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return; } n = ea->data[0] / sizeof(struct get_name_list_extended); ql_dbg(ql_dbg_disc, vha, 0x20e1, "%s %d %8phC n %d %02x%02x%02x lid %d \n", __func__, __LINE__, fcport->port_name, n, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->loop_id); for (i = 0; i < n; i++) { e = &vha->gnl.l[i]; wwn = wwn_to_u64(e->port_name); id.b.domain = e->port_id[2]; id.b.area = e->port_id[1]; id.b.al_pa = e->port_id[0]; id.b.rsvd_1 = 0; if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) continue; if (IS_SW_RESV_ADDR(id)) continue; found = 1; loop_id = le16_to_cpu(e->nport_handle); loop_id = (loop_id & 0x7fff); nvme_cls = e->current_login_state >> 4; current_login_state = e->current_login_state & 0xf; if (PRLI_PHASE(nvme_cls)) { current_login_state = nvme_cls; fcport->fc4_type &= ~FS_FC4TYPE_FCP; fcport->fc4_type |= FS_FC4TYPE_NVME; } else if (PRLI_PHASE(current_login_state)) { fcport->fc4_type |= FS_FC4TYPE_FCP; fcport->fc4_type &= ~FS_FC4TYPE_NVME; } ql_dbg(ql_dbg_disc, vha, 0x20e2, "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n", __func__, fcport->port_name, e->current_login_state, fcport->fw_login_state, fcport->fc4_type, id.b24, fcport->d_id.b24, loop_id, fcport->loop_id); switch (fcport->disc_state) { case DSC_DELETE_PEND: case DSC_DELETED: break; default: if ((id.b24 != fcport->d_id.b24 && fcport->d_id.b24 && fcport->loop_id != FC_NO_LOOP_ID) || (fcport->loop_id != FC_NO_LOOP_ID && fcport->loop_id != loop_id)) { ql_dbg(ql_dbg_disc, vha, 0x20e3, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); if (fcport->n2n_flag) fcport->d_id.b24 = 0; qlt_schedule_sess_for_deletion(fcport); return; } break; } fcport->loop_id = loop_id; if (fcport->n2n_flag) fcport->d_id.b24 = id.b24; wwn = wwn_to_u64(fcport->port_name); qlt_find_sess_invalidate_other(vha, wwn, id, loop_id, &conflict_fcport); if (conflict_fcport) { /* * Another share fcport share the same loop_id & * nport id. Conflict fcport needs to finish * cleanup before this fcport can proceed to login. */ conflict_fcport->conflict = fcport; fcport->login_pause = 1; } switch (vha->hw->current_topology) { default: switch (current_login_state) { case DSC_LS_PRLI_COMP: ql_dbg(ql_dbg_disc, vha, 0x20e4, "%s %d %8phC post gpdb\n", __func__, __LINE__, fcport->port_name); if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; data[0] = data[1] = 0; qla2x00_post_async_adisc_work(vha, fcport, data); break; case DSC_LS_PLOGI_COMP: if (vha->hw->flags.edif_enabled) { /* check to see if App support Secure */ qla24xx_post_gpdb_work(vha, fcport, 0); break; } fallthrough; case DSC_LS_PORT_UNAVAIL: default: if (fcport->loop_id == FC_NO_LOOP_ID) { qla2x00_find_new_loop_id(vha, fcport); fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; } ql_dbg(ql_dbg_disc, vha, 0x20e5, "%s %d %8phC\n", __func__, __LINE__, fcport->port_name); qla24xx_fcport_handle_login(vha, fcport); break; } break; case ISP_CFG_N: fcport->fw_login_state = current_login_state; fcport->d_id = id; switch (current_login_state) { case DSC_LS_PRLI_PEND: /* * In the middle of PRLI. Let it finish. * Allow relogin code to recheck state again * with GNL. Push disc_state back to DELETED * so GNL can go out again */ qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); set_bit(RELOGIN_NEEDED, &vha->dpc_flags); break; case DSC_LS_PRLI_COMP: if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; data[0] = data[1] = 0; qla2x00_post_async_adisc_work(vha, fcport, data); break; case DSC_LS_PLOGI_COMP: if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) { /* check to see if App support secure or not */ qla24xx_post_gpdb_work(vha, fcport, 0); break; } if (fcport_is_bigger(fcport)) { /* local adapter is smaller */ if (fcport->loop_id != FC_NO_LOOP_ID) qla2x00_clear_loop_id(fcport); fcport->loop_id = loop_id; qla24xx_fcport_handle_login(vha, fcport); break; } fallthrough; default: if (fcport_is_smaller(fcport)) { /* local adapter is bigger */ if (fcport->loop_id != FC_NO_LOOP_ID) qla2x00_clear_loop_id(fcport); fcport->loop_id = loop_id; qla24xx_fcport_handle_login(vha, fcport); } break; } break; } /* switch (ha->current_topology) */ } if (!found) { switch (vha->hw->current_topology) { case ISP_CFG_F: case ISP_CFG_FL: for (i = 0; i < n; i++) { e = &vha->gnl.l[i]; id.b.domain = e->port_id[0]; id.b.area = e->port_id[1]; id.b.al_pa = e->port_id[2]; id.b.rsvd_1 = 0; loop_id = le16_to_cpu(e->nport_handle); if (fcport->d_id.b24 == id.b24) { conflict_fcport = qla2x00_find_fcport_by_wwpn(vha, e->port_name, 0); if (conflict_fcport) { ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e5, "%s %d %8phC post del sess\n", __func__, __LINE__, conflict_fcport->port_name); qlt_schedule_sess_for_deletion (conflict_fcport); } } /* * FW already picked this loop id for * another fcport */ if (fcport->loop_id == loop_id) fcport->loop_id = FC_NO_LOOP_ID; } qla24xx_fcport_handle_login(vha, fcport); break; case ISP_CFG_N: qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); if (time_after_eq(jiffies, fcport->dm_login_expire)) { if (fcport->n2n_link_reset_cnt < 2) { fcport->n2n_link_reset_cnt++; /* * remote port is not sending PLOGI. * Reset link to kick start his state * machine */ set_bit(N2N_LINK_RESET, &vha->dpc_flags); } else { if (fcport->n2n_chip_reset < 1) { ql_log(ql_log_info, vha, 0x705d, "Chip reset to bring laser down"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); fcport->n2n_chip_reset++; } else { ql_log(ql_log_info, vha, 0x705d, "Remote port %8ph is not coming back\n", fcport->port_name); fcport->scan_state = 0; } } qla2xxx_wake_dpc(vha); } else { /* * report port suppose to do PLOGI. Give him * more time. FW will catch it. */ set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } break; case ISP_CFG_NL: qla24xx_fcport_handle_login(vha, fcport); break; default: break; } } } /* gnl_event */ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; unsigned long flags; struct fc_port *fcport = NULL, *tf; u16 i, n = 0, loop_id; struct event_arg ea; struct get_name_list_extended *e; u64 wwn; struct list_head h; bool found = false; ql_dbg(ql_dbg_disc, vha, 0x20e7, "Async done-%s res %x mb[1]=%x mb[2]=%x \n", sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], sp->u.iocb_cmd.u.mbx.in_mb[2]); sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); memset(&ea, 0, sizeof(ea)); ea.sp = sp; ea.rc = res; if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= sizeof(struct get_name_list_extended)) { n = sp->u.iocb_cmd.u.mbx.in_mb[1] / sizeof(struct get_name_list_extended); ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ } for (i = 0; i < n; i++) { e = &vha->gnl.l[i]; loop_id = le16_to_cpu(e->nport_handle); /* mask out reserve bit */ loop_id = (loop_id & 0x7fff); set_bit(loop_id, vha->hw->loop_id_map); wwn = wwn_to_u64(e->port_name); ql_dbg(ql_dbg_disc, vha, 0x20e8, "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", __func__, &wwn, e->port_id[2], e->port_id[1], e->port_id[0], e->current_login_state, e->last_login_state, (loop_id & 0x7fff)); } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); INIT_LIST_HEAD(&h); fcport = tf = NULL; if (!list_empty(&vha->gnl.fcports)) list_splice_init(&vha->gnl.fcports, &h); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); list_del_init(&fcport->gnl_entry); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); ea.fcport = fcport; qla24xx_handle_gnl_done_event(vha, &ea); } /* create new fcport if fw has knowledge of new sessions */ for (i = 0; i < n; i++) { port_id_t id; u64 wwnn; e = &vha->gnl.l[i]; wwn = wwn_to_u64(e->port_name); found = false; list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { if (!memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) { found = true; break; } } id.b.domain = e->port_id[2]; id.b.area = e->port_id[1]; id.b.al_pa = e->port_id[0]; id.b.rsvd_1 = 0; if (!found && wwn && !IS_SW_RESV_ADDR(id)) { ql_dbg(ql_dbg_disc, vha, 0x2065, "%s %d %8phC %06x post new sess\n", __func__, __LINE__, (u8 *)&wwn, id.b24); wwnn = wwn_to_u64(e->node_name); qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn, (u8 *)&wwnn, NULL, 0); } } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); vha->gnl.sent = 0; if (!list_empty(&vha->gnl.fcports)) { /* retrigger gnl */ list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports, gnl_entry) { list_del_init(&fcport->gnl_entry); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS) break; } } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; int rval = QLA_FUNCTION_FAILED; unsigned long flags; u16 *mb; if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) goto done; ql_dbg(ql_dbg_disc, vha, 0x20d9, "Async-gnlist WWPN %8phC \n", fcport->port_name); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); fcport->flags |= FCF_ASYNC_SENT; qla2x00_set_fcport_disc_state(fcport, DSC_GNL); fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); if (vha->gnl.sent) { spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return QLA_SUCCESS; } vha->gnl.sent = 1; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_MB_IOCB; sp->name = "gnlist"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla24xx_async_gnl_sp_done); mb = sp->u.iocb_cmd.u.mbx.out_mb; mb[0] = MBC_PORT_NODE_NAME_LIST; mb[1] = BIT_2 | BIT_3; mb[2] = MSW(vha->gnl.ldma); mb[3] = LSW(vha->gnl.ldma); mb[6] = MSW(MSD(vha->gnl.ldma)); mb[7] = LSW(MSD(vha->gnl.ldma)); mb[8] = vha->gnl.size; mb[9] = vha->vp_idx; ql_dbg(ql_dbg_disc, vha, 0x20da, "Async-%s - OUT WWPN %8phC hndl %x\n", sp->name, fcport->port_name, sp->handle); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~(FCF_ASYNC_SENT); done: fcport->flags &= ~(FCF_ASYNC_ACTIVE); return rval; } int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_GNL); if (!e) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; fcport->flags |= FCF_ASYNC_ACTIVE; return qla2x00_post_work(vha, e); } static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; fc_port_t *fcport = sp->fcport; u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; struct event_arg ea; ql_dbg(ql_dbg_disc, vha, 0x20db, "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", sp->name, res, fcport->port_name, mb[1], mb[2]); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); if (res == QLA_FUNCTION_TIMEOUT) goto done; memset(&ea, 0, sizeof(ea)); ea.fcport = fcport; ea.sp = sp; qla24xx_handle_gpdb_event(vha, &ea); done: dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, sp->u.iocb_cmd.u.mbx.in_dma); kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; if (vha->host->active_mode == MODE_TARGET) return QLA_FUNCTION_FAILED; e = qla2x00_alloc_work(vha, QLA_EVT_PRLI); if (!e) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; return qla2x00_post_work(vha, e); } static void qla2x00_async_prli_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; struct event_arg ea; ql_dbg(ql_dbg_disc, vha, 0x2129, "%s %8phC res %x\n", __func__, sp->fcport->port_name, res); sp->fcport->flags &= ~FCF_ASYNC_SENT; if (!test_bit(UNLOADING, &vha->dpc_flags)) { memset(&ea, 0, sizeof(ea)); ea.fcport = sp->fcport; ea.data[0] = lio->u.logio.data[0]; ea.data[1] = lio->u.logio.data[1]; ea.iop[0] = lio->u.logio.iop[0]; ea.iop[1] = lio->u.logio.iop[1]; ea.sp = sp; if (res == QLA_OS_TIMER_EXPIRED) ea.data[0] = QLA_OS_TIMER_EXPIRED; else if (res) ea.data[0] = MBS_COMMAND_ERROR; qla24xx_handle_prli_done_event(vha, &ea); } kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; if (!vha->flags.online) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", __func__, __LINE__, fcport->port_name); return rval; } if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND || fcport->fw_login_state == DSC_LS_PRLI_PEND) && qla_dual_mode_enabled(vha)) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", __func__, __LINE__, fcport->port_name); return rval; } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) return rval; fcport->flags |= FCF_ASYNC_SENT; fcport->logout_completed = 0; sp->type = SRB_PRLI_CMD; sp->name = "prli"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_prli_sp_done); lio = &sp->u.iocb_cmd; lio->u.logio.flags = 0; if (NVME_TARGET(vha->hw, fcport)) lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; ql_dbg(ql_dbg_disc, vha, 0x211b, "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n", fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority, NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp"); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { fcport->flags |= FCF_LOGIN_NEEDED; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); goto done_free_sp; } return rval; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; return rval; } int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_GPDB); if (!e) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; e->u.fcport.opt = opt; fcport->flags |= FCF_ASYNC_ACTIVE; return qla2x00_post_work(vha, e); } int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) { srb_t *sp; struct srb_iocb *mbx; int rval = QLA_FUNCTION_FAILED; u16 *mb; dma_addr_t pd_dma; struct port_database_24xx *pd; struct qla_hw_data *ha = vha->hw; if (IS_SESSION_DELETED(fcport)) { ql_log(ql_log_warn, vha, 0xffff, "%s: %8phC is being delete - not sending command.\n", __func__, fcport->port_name); fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) { ql_log(ql_log_warn, vha, 0xffff, "%s: %8phC online %d flags %x - not sending command.\n", __func__, fcport->port_name, vha->flags.online, fcport->flags); goto done; } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; qla2x00_set_fcport_disc_state(fcport, DSC_GPDB); fcport->flags |= FCF_ASYNC_SENT; sp->type = SRB_MB_IOCB; sp->name = "gpdb"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla24xx_async_gpdb_sp_done); pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { ql_log(ql_log_warn, vha, 0xd043, "Failed to allocate port database structure.\n"); goto done_free_sp; } mb = sp->u.iocb_cmd.u.mbx.out_mb; mb[0] = MBC_GET_PORT_DATABASE; mb[1] = fcport->loop_id; mb[2] = MSW(pd_dma); mb[3] = LSW(pd_dma); mb[6] = MSW(MSD(pd_dma)); mb[7] = LSW(MSD(pd_dma)); mb[9] = vha->vp_idx; mb[10] = opt; mbx = &sp->u.iocb_cmd; mbx->u.mbx.in = (void *)pd; mbx->u.mbx.in_dma = pd_dma; ql_dbg(ql_dbg_disc, vha, 0x20dc, "Async-%s %8phC hndl %x opt %x\n", sp->name, fcport->port_name, sp->handle, opt); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: if (pd) dma_pool_free(ha->s_dma_pool, pd, pd_dma); kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; done: fcport->flags &= ~FCF_ASYNC_ACTIVE; qla24xx_post_gpdb_work(vha, fcport, opt); return rval; } static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) { unsigned long flags; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); ea->fcport->login_gen++; ea->fcport->logout_on_delete = 1; if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { vha->fcport_count++; ea->fcport->login_succ = 1; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); qla24xx_sched_upd_fcport(ea->fcport); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); } else if (ea->fcport->login_succ) { /* * We have an existing session. A late RSCN delivery * must have triggered the session to be re-validate. * Session is still valid. */ ql_dbg(ql_dbg_disc, vha, 0x20d6, "%s %d %8phC session revalidate success\n", __func__, __LINE__, ea->fcport->port_name); qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE); } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport, struct port_database_24xx *pd) { int rc = 0; if (pd->secure_login) { ql_dbg(ql_dbg_disc, vha, 0x104d, "Secure Login established on %8phC\n", fcport->port_name); fcport->flags |= FCF_FCSP_DEVICE; } else { ql_dbg(ql_dbg_disc, vha, 0x104d, "non-Secure Login %8phC", fcport->port_name); fcport->flags &= ~FCF_FCSP_DEVICE; } if (vha->hw->flags.edif_enabled) { if (fcport->flags & FCF_FCSP_DEVICE) { qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND); /* Start edif prli timer & ring doorbell for app */ fcport->edif.rx_sa_set = 0; fcport->edif.tx_sa_set = 0; fcport->edif.rx_sa_pending = 0; fcport->edif.tx_sa_pending = 0; qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, fcport->d_id.b24); if (DBELL_ACTIVE(vha)) { ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n", __func__, __LINE__, fcport->port_name); fcport->edif.app_sess_online = 1; qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, fcport->d_id.b24, 0, fcport); } rc = 1; } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { ql_dbg(ql_dbg_disc, vha, 0x2117, "%s %d %8phC post prli\n", __func__, __LINE__, fcport->port_name); qla24xx_post_prli_work(vha, fcport); rc = 1; } } return rc; } static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport = ea->fcport; struct port_database_24xx *pd; struct srb *sp = ea->sp; uint8_t ls; pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; fcport->flags &= ~FCF_ASYNC_SENT; ql_dbg(ql_dbg_disc, vha, 0x20d2, "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__, fcport->port_name, fcport->disc_state, pd->current_login_state, fcport->fc4_type, ea->rc); if (fcport->disc_state == DSC_DELETE_PEND) { ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n", __func__, __LINE__, fcport->port_name); return; } if (NVME_TARGET(vha->hw, fcport)) ls = pd->current_login_state >> 4; else ls = pd->current_login_state & 0xf; if (ea->sp->gen2 != fcport->login_gen) { /* target side must have changed it. */ ql_dbg(ql_dbg_disc, vha, 0x20d3, "%s %8phC generation changed\n", __func__, fcport->port_name); return; } else if (ea->sp->gen1 != fcport->rscn_gen) { qla_rscn_replay(fcport); qlt_schedule_sess_for_deletion(fcport); ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n", __func__, __LINE__, fcport->port_name, ls); return; } switch (ls) { case PDS_PRLI_COMPLETE: __qla24xx_parse_gpdb(vha, fcport, pd); break; case PDS_PLOGI_COMPLETE: if (qla_chk_secure_login(vha, fcport, pd)) { ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n", __func__, __LINE__, fcport->port_name, ls); return; } fallthrough; case PDS_PLOGI_PENDING: case PDS_PRLI_PENDING: case PDS_PRLI2_PENDING: /* Set discovery state back to GNL to Relogin attempt */ if (qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) { qla2x00_set_fcport_disc_state(fcport, DSC_GNL); set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n", __func__, __LINE__, fcport->port_name, ls); return; case PDS_LOGO_PENDING: case PDS_PORT_UNAVAILABLE: default: ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); qlt_schedule_sess_for_deletion(fcport); return; } __qla24xx_handle_gpdb_event(vha, ea); } /* gpdb event */ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport) { u8 login = 0; int rc; ql_dbg(ql_dbg_disc, vha, 0x307b, "%s %8phC DS %d LS %d lid %d retries=%d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, fcport->loop_id, fcport->login_retry); if (qla_tgt_mode_enabled(vha)) return; if (qla_dual_mode_enabled(vha)) { if (N2N_TOPO(vha->hw)) { u64 mywwn, wwn; mywwn = wwn_to_u64(vha->port_name); wwn = wwn_to_u64(fcport->port_name); if (mywwn > wwn) login = 1; else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP) && time_after_eq(jiffies, fcport->plogi_nack_done_deadline)) login = 1; } else { login = 1; } } else { /* initiator mode */ login = 1; } if (login && fcport->login_retry) { fcport->login_retry--; if (fcport->loop_id == FC_NO_LOOP_ID) { fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; rc = qla2x00_find_new_loop_id(vha, fcport); if (rc) { ql_dbg(ql_dbg_disc, vha, 0x20e6, "%s %d %8phC post del sess - out of loopid\n", __func__, __LINE__, fcport->port_name); fcport->scan_state = 0; qlt_schedule_sess_for_deletion(fcport); return; } } ql_dbg(ql_dbg_disc, vha, 0x20bf, "%s %d %8phC post login\n", __func__, __LINE__, fcport->port_name); qla2x00_post_async_login_work(vha, fcport, NULL); } } int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) { u16 data[2]; u16 sec; ql_dbg(ql_dbg_disc, vha, 0x20d8, "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, fcport->login_pause, fcport->flags, fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, fcport->login_gen, fcport->loop_id, fcport->scan_state, fcport->fc4_type); if (fcport->scan_state != QLA_FCPORT_FOUND || fcport->disc_state == DSC_DELETE_PEND) return 0; if ((fcport->loop_id != FC_NO_LOOP_ID) && qla_dual_mode_enabled(vha) && ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || (fcport->fw_login_state == DSC_LS_PRLI_PEND))) return 0; if (fcport->fw_login_state == DSC_LS_PLOGI_COMP && !N2N_TOPO(vha->hw)) { if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return 0; } } /* Target won't initiate port login if fabric is present */ if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw)) return 0; if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return 0; } switch (fcport->disc_state) { case DSC_DELETED: switch (vha->hw->current_topology) { case ISP_CFG_N: if (fcport_is_smaller(fcport)) { /* this adapter is bigger */ if (fcport->login_retry) { if (fcport->loop_id == FC_NO_LOOP_ID) { qla2x00_find_new_loop_id(vha, fcport); fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; } fcport->login_retry--; qla_post_els_plogi_work(vha, fcport); } else { ql_log(ql_log_info, vha, 0x705d, "Unable to reach remote port %8phC", fcport->port_name); } } else { qla24xx_post_gnl_work(vha, fcport); } break; default: if (fcport->loop_id == FC_NO_LOOP_ID) { ql_dbg(ql_dbg_disc, vha, 0x20bd, "%s %d %8phC post gnl\n", __func__, __LINE__, fcport->port_name); qla24xx_post_gnl_work(vha, fcport); } else { qla_chk_n2n_b4_login(vha, fcport); } break; } break; case DSC_GNL: switch (vha->hw->current_topology) { case ISP_CFG_N: if ((fcport->current_login_state & 0xf) == 0x6) { ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post GPDB work\n", __func__, __LINE__, fcport->port_name); fcport->chip_reset = vha->hw->base_qpair->chip_reset; qla24xx_post_gpdb_work(vha, fcport, 0); } else { ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post %s PRLI\n", __func__, __LINE__, fcport->port_name, NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC"); qla24xx_post_prli_work(vha, fcport); } break; default: if (fcport->login_pause) { ql_dbg(ql_dbg_disc, vha, 0x20d8, "%s %d %8phC exit\n", __func__, __LINE__, fcport->port_name); fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); break; } qla_chk_n2n_b4_login(vha, fcport); break; } break; case DSC_LOGIN_FAILED: if (N2N_TOPO(vha->hw)) qla_chk_n2n_b4_login(vha, fcport); else qlt_schedule_sess_for_deletion(fcport); break; case DSC_LOGIN_COMPLETE: /* recheck login state */ data[0] = data[1] = 0; qla2x00_post_async_adisc_work(vha, fcport, data); break; case DSC_LOGIN_PEND: if (vha->hw->flags.edif_enabled) break; if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post %s PRLI\n", __func__, __LINE__, fcport->port_name, NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC"); qla24xx_post_prli_work(vha, fcport); } break; case DSC_UPD_FCPORT: sec = jiffies_to_msecs(jiffies - fcport->jiffies_at_registration)/1000; if (fcport->sec_since_registration < sec && sec && !(sec % 60)) { fcport->sec_since_registration = sec; ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, "%s %8phC - Slow Rport registration(%d Sec)\n", __func__, fcport->port_name, sec); } if (fcport->next_disc_state != DSC_DELETE_PEND) fcport->next_disc_state = DSC_ADISC; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); break; default: break; } return 0; } int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, u8 *port_name, u8 *node_name, void *pla, u8 fc4_type) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); if (!e) return QLA_FUNCTION_FAILED; e->u.new_sess.id = *id; e->u.new_sess.pla = pla; e->u.new_sess.fc4_type = fc4_type; memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); if (node_name) memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE); return qla2x00_post_work(vha, e); } void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport; unsigned long flags; switch (ea->id.b.rsvd_1) { case RSCN_PORT_ADDR: fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); if (fcport) { if (ql2xfc2target && fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) { ql_dbg(ql_dbg_disc, vha, 0x2115, "Delaying session delete for FCP2 portid=%06x %8phC ", fcport->d_id.b24, fcport->port_name); return; } if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) { /* * On ipsec start by remote port, Target port * may use RSCN to trigger initiator to * relogin. If driver is already in the * process of a relogin, then ignore the RSCN * and allow the current relogin to continue. * This reduces thrashing of the connection. */ if (atomic_read(&fcport->state) == FCS_ONLINE) { /* * If state = online, then set scan_needed=1 to do relogin. * Otherwise we're already in the middle of a relogin */ fcport->scan_needed = 1; fcport->rscn_gen++; } } else { fcport->scan_needed = 1; fcport->rscn_gen++; } } break; case RSCN_AREA_ADDR: list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) continue; if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) { fcport->scan_needed = 1; fcport->rscn_gen++; } } break; case RSCN_DOM_ADDR: list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) continue; if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) { fcport->scan_needed = 1; fcport->rscn_gen++; } } break; case RSCN_FAB_ADDR: default: list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) continue; fcport->scan_needed = 1; fcport->rscn_gen++; } break; } spin_lock_irqsave(&vha->work_lock, flags); if (vha->scan.scan_flags == 0) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__); vha->scan.scan_flags |= SF_QUEUED; schedule_delayed_work(&vha->scan.scan_work, 5); } spin_unlock_irqrestore(&vha->work_lock, flags); } void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport = ea->fcport; if (test_bit(UNLOADING, &vha->dpc_flags)) return; ql_dbg(ql_dbg_disc, vha, 0x2102, "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, fcport->login_pause, fcport->deleted, fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen, fcport->flags); if (fcport->last_rscn_gen != fcport->rscn_gen) { ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n", __func__, __LINE__, fcport->port_name); qla24xx_post_gnl_work(vha, fcport); return; } qla24xx_fcport_handle_login(vha, fcport); } void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea) { if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) && vha->hw->flags.edif_enabled) { /* check to see if App support Secure */ qla24xx_post_gpdb_work(vha, ea->fcport, 0); return; } /* for pure Target Mode, PRLI will not be initiated */ if (vha->host->active_mode == MODE_TARGET) return; ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post PRLI\n", __func__, __LINE__, ea->fcport->port_name); qla24xx_post_prli_work(vha, ea->fcport); } /* * RSCN(s) came in for this fcport, but the RSCN(s) was not able * to be consumed by the fcport */ void qla_rscn_replay(fc_port_t *fcport) { struct event_arg ea; switch (fcport->disc_state) { case DSC_DELETE_PEND: return; default: break; } if (fcport->scan_needed) { memset(&ea, 0, sizeof(ea)); ea.id = fcport->d_id; ea.id.b.rsvd_1 = RSCN_PORT_ADDR; qla2x00_handle_rscn(fcport->vha, &ea); } } static void qla2x00_tmf_iocb_timeout(void *data) { srb_t *sp = data; struct srb_iocb *tmf = &sp->u.iocb_cmd; int rc, h; unsigned long flags; if (sp->type == SRB_MARKER) rc = QLA_FUNCTION_FAILED; else rc = qla24xx_async_abort_cmd(sp, false); if (rc) { spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { if (sp->qpair->req->outstanding_cmds[h] == sp) { sp->qpair->req->outstanding_cmds[h] = NULL; qla_put_fw_resources(sp->qpair, &sp->iores); break; } } spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT); tmf->u.tmf.data = QLA_FUNCTION_FAILED; complete(&tmf->u.tmf.comp); } } static void qla_marker_sp_done(srb_t *sp, int res) { struct srb_iocb *tmf = &sp->u.iocb_cmd; if (res != QLA_SUCCESS) ql_dbg(ql_dbg_taskm, sp->vha, 0x8004, "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n", sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags, sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id); sp->u.iocb_cmd.u.tmf.data = res; complete(&tmf->u.tmf.comp); } #define START_SP_W_RETRIES(_sp, _rval, _chip_gen, _login_gen) \ {\ int cnt = 5; \ do { \ if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\ _rval = EINVAL; \ break; \ } \ _rval = qla2x00_start_sp(_sp); \ if (_rval == EAGAIN) \ msleep(1); \ else \ break; \ cnt--; \ } while (cnt); \ } /** * qla26xx_marker: send marker IOCB and wait for the completion of it. * @arg: pointer to argument list. * It is assume caller will provide an fcport pointer and modifier */ static int qla26xx_marker(struct tmf_arg *arg) { struct scsi_qla_host *vha = arg->vha; struct srb_iocb *tm_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; fc_port_t *fcport = arg->fcport; u32 chip_gen, login_gen; if (TMF_NOT_READY(arg->fcport)) { ql_dbg(ql_dbg_taskm, vha, 0x8039, "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n", fcport->loop_id, fcport->d_id.b24, arg->modifier, arg->lun, arg->qpair->id); return QLA_SUSPENDED; } chip_gen = vha->hw->chip_reset; login_gen = fcport->login_gen; /* ref: INIT */ sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_MARKER; sp->name = "marker"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done); sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout; tm_iocb = &sp->u.iocb_cmd; init_completion(&tm_iocb->u.tmf.comp); tm_iocb->u.tmf.modifier = arg->modifier; tm_iocb->u.tmf.lun = arg->lun; tm_iocb->u.tmf.loop_id = fcport->loop_id; tm_iocb->u.tmf.vp_index = vha->vp_idx; START_SP_W_RETRIES(sp, rval, chip_gen, login_gen); ql_dbg(ql_dbg_taskm, vha, 0x8006, "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n", sp->handle, fcport->loop_id, fcport->d_id.b24, arg->modifier, arg->lun, sp->qpair->id, rval); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x8031, "Marker IOCB send failure (%x).\n", rval); goto done_free_sp; } wait_for_completion(&tm_iocb->u.tmf.comp); rval = tm_iocb->u.tmf.data; if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x8019, "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n", sp->handle, fcport->loop_id, fcport->d_id.b24, arg->modifier, arg->lun, sp->qpair->id, rval); } done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } static void qla2x00_tmf_sp_done(srb_t *sp, int res) { struct srb_iocb *tmf = &sp->u.iocb_cmd; if (res) tmf->u.tmf.data = res; complete(&tmf->u.tmf.comp); } static int qla_tmf_wait(struct tmf_arg *arg) { /* there are only 2 types of error handling that reaches here, lun or target reset */ if (arg->flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET | TCF_CLEAR_TASK_SET)) return qla2x00_eh_wait_for_pending_commands(arg->vha, arg->fcport->d_id.b24, arg->lun, WAIT_LUN); else return qla2x00_eh_wait_for_pending_commands(arg->vha, arg->fcport->d_id.b24, arg->lun, WAIT_TARGET); } static int __qla2x00_async_tm_cmd(struct tmf_arg *arg) { struct scsi_qla_host *vha = arg->vha; struct srb_iocb *tm_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; fc_port_t *fcport = arg->fcport; u32 chip_gen, login_gen; u64 jif; if (TMF_NOT_READY(arg->fcport)) { ql_dbg(ql_dbg_taskm, vha, 0x8032, "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n", fcport->loop_id, fcport->d_id.b24, arg->modifier, arg->lun, arg->qpair->id); return QLA_SUSPENDED; } chip_gen = vha->hw->chip_reset; login_gen = fcport->login_gen; /* ref: INIT */ sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); if (!sp) goto done; qla_vha_mark_busy(vha); sp->type = SRB_TM_CMD; sp->name = "tmf"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla2x00_tmf_sp_done); sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout; tm_iocb = &sp->u.iocb_cmd; init_completion(&tm_iocb->u.tmf.comp); tm_iocb->u.tmf.flags = arg->flags; tm_iocb->u.tmf.lun = arg->lun; START_SP_W_RETRIES(sp, rval, chip_gen, login_gen); ql_dbg(ql_dbg_taskm, vha, 0x802f, "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n", sp->handle, fcport->loop_id, fcport->d_id.b24, arg->flags, arg->lun, sp->qpair->id, rval); if (rval != QLA_SUCCESS) goto done_free_sp; wait_for_completion(&tm_iocb->u.tmf.comp); rval = tm_iocb->u.tmf.data; if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x8030, "TM IOCB failed (%x).\n", rval); } if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { jif = jiffies; if (qla_tmf_wait(arg)) { ql_log(ql_log_info, vha, 0x803e, "Waited %u ms Nexus=%ld:%06x:%llu.\n", jiffies_to_msecs(jiffies - jif), vha->host_no, fcport->d_id.b24, arg->lun); } if (chip_gen == vha->hw->chip_reset && login_gen == fcport->login_gen) { rval = qla26xx_marker(arg); } else { ql_log(ql_log_info, vha, 0x803e, "Skip Marker due to disruption. Nexus=%ld:%06x:%llu.\n", vha->host_no, fcport->d_id.b24, arg->lun); rval = QLA_FUNCTION_FAILED; } } if (tm_iocb->u.tmf.data) rval = tm_iocb->u.tmf.data; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } static void qla_put_tmf(struct tmf_arg *arg) { struct scsi_qla_host *vha = arg->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags; spin_lock_irqsave(&ha->tgt.sess_lock, flags); ha->active_tmf--; list_del(&arg->tmf_elem); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } static int qla_get_tmf(struct tmf_arg *arg) { struct scsi_qla_host *vha = arg->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags; fc_port_t *fcport = arg->fcport; int rc = 0; struct tmf_arg *t; spin_lock_irqsave(&ha->tgt.sess_lock, flags); list_for_each_entry(t, &ha->tmf_active, tmf_elem) { if (t->fcport == arg->fcport && t->lun == arg->lun) { /* reject duplicate TMF */ ql_log(ql_log_warn, vha, 0x802c, "found duplicate TMF. Nexus=%ld:%06x:%llu.\n", vha->host_no, fcport->d_id.b24, arg->lun); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return -EINVAL; } } list_add_tail(&arg->tmf_elem, &ha->tmf_pending); while (ha->active_tmf >= MAX_ACTIVE_TMF) { spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); msleep(1); spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (TMF_NOT_READY(fcport)) { ql_log(ql_log_warn, vha, 0x802c, "Unable to acquire TM resource due to disruption.\n"); rc = EIO; break; } if (ha->active_tmf < MAX_ACTIVE_TMF && list_is_first(&arg->tmf_elem, &ha->tmf_pending)) break; } list_del(&arg->tmf_elem); if (!rc) { ha->active_tmf++; list_add_tail(&arg->tmf_elem, &ha->tmf_active); } spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return rc; } int qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, uint32_t tag) { struct scsi_qla_host *vha = fcport->vha; struct tmf_arg a; int rval = QLA_SUCCESS; if (TMF_NOT_READY(fcport)) return QLA_SUSPENDED; a.vha = fcport->vha; a.fcport = fcport; a.lun = lun; a.flags = flags; INIT_LIST_HEAD(&a.tmf_elem); if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { a.modifier = MK_SYNC_ID_LUN; } else { a.modifier = MK_SYNC_ID; } if (qla_get_tmf(&a)) return QLA_FUNCTION_FAILED; a.qpair = vha->hw->base_qpair; rval = __qla2x00_async_tm_cmd(&a); qla_put_tmf(&a); return rval; } int qla24xx_async_abort_command(srb_t *sp) { unsigned long flags = 0; uint32_t handle; fc_port_t *fcport = sp->fcport; struct qla_qpair *qpair = sp->qpair; struct scsi_qla_host *vha = fcport->vha; struct req_que *req = qpair->req; spin_lock_irqsave(qpair->qp_lock_ptr, flags); for (handle = 1; handle < req->num_outstanding_cmds; handle++) { if (req->outstanding_cmds[handle] == sp) break; } spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); if (handle == req->num_outstanding_cmds) { /* Command not found. */ return QLA_ERR_NOT_FOUND; } if (sp->type == SRB_FXIOCB_DCMD) return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, FXDISC_ABORT_IOCTL); return qla24xx_async_abort_cmd(sp, true); } static void qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { struct srb *sp; WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", ea->data[0]); switch (ea->data[0]) { case MBS_COMMAND_COMPLETE: ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post gpdb\n", __func__, __LINE__, ea->fcport->port_name); ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea->fcport->logout_on_delete = 1; ea->fcport->nvme_prli_service_param = ea->iop[0]; if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST) ea->fcport->nvme_first_burst_size = (ea->iop[1] & 0xffff) * 512; else ea->fcport->nvme_first_burst_size = 0; qla24xx_post_gpdb_work(vha, ea->fcport, 0); break; default: sp = ea->sp; ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC priority %s, fc4type %x prev try %s\n", __func__, __LINE__, ea->fcport->port_name, vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe", ea->fcport->fc4_type, (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ? "NVME" : "FCP"); if (NVME_FCP_TARGET(ea->fcport)) { if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ea->fcport->do_prli_nvme = 0; else ea->fcport->do_prli_nvme = 1; } else { ea->fcport->do_prli_nvme = 0; } if (N2N_TOPO(vha->hw)) { if (ea->fcport->n2n_link_reset_cnt == vha->hw->login_retry_count && ea->fcport->flags & FCF_FCSP_DEVICE) { /* remote authentication app just started */ ea->fcport->n2n_link_reset_cnt = 0; } if (ea->fcport->n2n_link_reset_cnt < vha->hw->login_retry_count) { ea->fcport->n2n_link_reset_cnt++; vha->relogin_jif = jiffies + 2 * HZ; /* * PRLI failed. Reset link to kick start * state machine */ set_bit(N2N_LINK_RESET, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else { ql_log(ql_log_warn, vha, 0x2119, "%s %d %8phC Unable to reconnect\n", __func__, __LINE__, ea->fcport->port_name); } } else { /* * switch connect. login failed. Take connection down * and allow relogin to retrigger */ ea->fcport->flags &= ~FCF_ASYNC_SENT; ea->fcport->keep_nport_handle = 0; ea->fcport->logout_on_delete = 1; qlt_schedule_sess_for_deletion(ea->fcport); } break; } } void qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { port_id_t cid; /* conflict Nport id */ u16 lid; struct fc_port *conflict_fcport; unsigned long flags; struct fc_port *fcport = ea->fcport; ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, ea->sp->gen1, fcport->rscn_gen, ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]); if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || (fcport->fw_login_state == DSC_LS_PRLI_PEND)) { ql_dbg(ql_dbg_disc, vha, 0x20ea, "%s %d %8phC Remote is trying to login\n", __func__, __LINE__, fcport->port_name); return; } if ((fcport->disc_state == DSC_DELETE_PEND) || (fcport->disc_state == DSC_DELETED)) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return; } if (ea->sp->gen2 != fcport->login_gen) { /* target side must have changed it. */ ql_dbg(ql_dbg_disc, vha, 0x20d3, "%s %8phC generation changed\n", __func__, fcport->port_name); set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return; } else if (ea->sp->gen1 != fcport->rscn_gen) { ql_dbg(ql_dbg_disc, vha, 0x20d3, "%s %8phC RSCN generation changed\n", __func__, fcport->port_name); qla_rscn_replay(fcport); qlt_schedule_sess_for_deletion(fcport); return; } WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", ea->data[0]); switch (ea->data[0]) { case MBS_COMMAND_COMPLETE: /* * Driver must validate login state - If PRLI not complete, * force a relogin attempt via implicit LOGO, PLOGI, and PRLI * requests. */ if (vha->hw->flags.edif_enabled) { set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea->fcport->logout_on_delete = 1; ea->fcport->send_els_logo = 0; ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); qla24xx_post_gpdb_work(vha, ea->fcport, 0); } else { if (NVME_TARGET(vha->hw, fcport)) { ql_dbg(ql_dbg_disc, vha, 0x2117, "%s %d %8phC post prli\n", __func__, __LINE__, fcport->port_name); qla24xx_post_prli_work(vha, fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20ea, "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", __func__, __LINE__, fcport->port_name, fcport->loop_id, fcport->d_id.b24); set_bit(fcport->loop_id, vha->hw->loop_id_map); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); fcport->chip_reset = vha->hw->base_qpair->chip_reset; fcport->logout_on_delete = 1; fcport->send_els_logo = 0; fcport->fw_login_state = DSC_LS_PRLI_COMP; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); qla24xx_post_gpdb_work(vha, fcport, 0); } } break; case MBS_COMMAND_ERROR: ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", __func__, __LINE__, ea->fcport->port_name, ea->data[1]); qlt_schedule_sess_for_deletion(ea->fcport); break; case MBS_LOOP_ID_USED: /* data[1] = IO PARAM 1 = nport ID */ cid.b.domain = (ea->iop[1] >> 16) & 0xff; cid.b.area = (ea->iop[1] >> 8) & 0xff; cid.b.al_pa = ea->iop[1] & 0xff; cid.b.rsvd_1 = 0; ql_dbg(ql_dbg_disc, vha, 0x20ec, "%s %d %8phC lid %#x in use with pid %06x post gnl\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->loop_id, cid.b24); set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); ea->fcport->loop_id = FC_NO_LOOP_ID; qla24xx_post_gnl_work(vha, ea->fcport); break; case MBS_PORT_ID_USED: lid = ea->iop[1] & 0xffff; qlt_find_sess_invalidate_other(vha, wwn_to_u64(ea->fcport->port_name), ea->fcport->d_id, lid, &conflict_fcport); if (conflict_fcport) { /* * Another fcport share the same loop_id/nport id. * Conflict fcport needs to finish cleanup before this * fcport can proceed to login. */ conflict_fcport->conflict = ea->fcport; ea->fcport->login_pause = 1; ql_dbg(ql_dbg_disc, vha, 0x20ed, "%s %d %8phC NPortId %06x inuse with loopid 0x%x.\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->d_id.b24, lid); } else { ql_dbg(ql_dbg_disc, vha, 0x20ed, "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->d_id.b24, lid); qla2x00_clear_loop_id(ea->fcport); set_bit(lid, vha->hw->loop_id_map); ea->fcport->loop_id = lid; ea->fcport->keep_nport_handle = 0; ea->fcport->logout_on_delete = 1; qlt_schedule_sess_for_deletion(ea->fcport); } break; } return; } /****************************************************************************/ /* QLogic ISP2x00 Hardware Support Functions. */ /****************************************************************************/ static int qla83xx_nic_core_fw_load(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; uint32_t idc_major_ver, idc_minor_ver; uint16_t config[4]; qla83xx_idc_lock(vha, 0); /* SV: TODO: Assign initialization timeout from * flash-info / other param */ ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; /* Set our fcoe function presence */ if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) { ql_dbg(ql_dbg_p3p, vha, 0xb077, "Error while setting DRV-Presence.\n"); rval = QLA_FUNCTION_FAILED; goto exit; } /* Decide the reset ownership */ qla83xx_reset_ownership(vha); /* * On first protocol driver load: * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery * register. * Others: Check compatibility with current IDC Major version. */ qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver); if (ha->flags.nic_core_reset_owner) { /* Set IDC Major version */ idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION; qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver); /* Clearing IDC-Lock-Recovery register */ qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0); } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) { /* * Clear further IDC participation if we are not compatible with * the current IDC Major Version. */ ql_log(ql_log_warn, vha, 0xb07d, "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n", idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION); __qla83xx_clear_drv_presence(vha); rval = QLA_FUNCTION_FAILED; goto exit; } /* Each function sets its supported Minor version. */ qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver); idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver); if (ha->flags.nic_core_reset_owner) { memset(config, 0, sizeof(config)); if (!qla81xx_get_port_config(vha, config)) qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); } rval = qla83xx_idc_state_handler(vha); exit: qla83xx_idc_unlock(vha, 0); return rval; } /* * qla2x00_initialize_adapter * Initialize board. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qla2x00_initialize_adapter(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); /* Clear adapter flags. */ vha->flags.online = 0; ha->flags.chip_reset_done = 0; vha->flags.reset_active = 0; ha->flags.pci_channel_io_perm_failure = 0; ha->flags.eeh_busy = 0; vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); atomic_set(&vha->loop_state, LOOP_DOWN); vha->device_flags = DFLG_NO_CABLE; vha->dpc_flags = 0; vha->flags.management_server_logged_in = 0; vha->marker_needed = 0; ha->isp_abort_cnt = 0; ha->beacon_blink_led = 0; set_bit(0, ha->req_qid_map); set_bit(0, ha->rsp_qid_map); ql_dbg(ql_dbg_init, vha, 0x0040, "Configuring PCI space...\n"); rval = ha->isp_ops->pci_config(vha); if (rval) { ql_log(ql_log_warn, vha, 0x0044, "Unable to configure PCI space.\n"); return (rval); } ha->isp_ops->reset_chip(vha); /* Check for secure flash support */ if (IS_QLA28XX(ha)) { if (rd_reg_word(&reg->mailbox12) & BIT_0) ha->flags.secure_adapter = 1; ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n", (ha->flags.secure_adapter) ? "Yes" : "No"); } rval = qla2xxx_get_flash_info(vha); if (rval) { ql_log(ql_log_fatal, vha, 0x004f, "Unable to validate FLASH data.\n"); return rval; } if (IS_QLA8044(ha)) { qla8044_read_reset_template(vha); /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0. * If DONRESET_BIT0 is set, drivers should not set dev_state * to NEED_RESET. But if NEED_RESET is set, drivers should * should honor the reset. */ if (ql2xdontresethba == 1) qla8044_set_idc_dontreset(vha); } ha->isp_ops->get_flash_version(vha, req->ring); ql_dbg(ql_dbg_init, vha, 0x0061, "Configure NVRAM parameters...\n"); /* Let priority default to FCP, can be overridden by nvram_config */ ha->fc4_type_priority = FC4_PRIORITY_FCP; ha->isp_ops->nvram_config(vha); if (ha->fc4_type_priority != FC4_PRIORITY_FCP && ha->fc4_type_priority != FC4_PRIORITY_NVME) ha->fc4_type_priority = FC4_PRIORITY_FCP; ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n", ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe"); if (ha->flags.disable_serdes) { /* Mask HBA via NVRAM settings? */ ql_log(ql_log_info, vha, 0x0077, "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name); return QLA_FUNCTION_FAILED; } ql_dbg(ql_dbg_init, vha, 0x0078, "Verifying loaded RISC code...\n"); /* If smartsan enabled then require fdmi and rdp enabled */ if (ql2xsmartsan) { ql2xfdmienable = 1; ql2xrdpenable = 1; } if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { rval = ha->isp_ops->chip_diag(vha); if (rval) return (rval); rval = qla2x00_setup_chip(vha); if (rval) return (rval); } if (IS_QLA84XX(ha)) { ha->cs84xx = qla84xx_get_chip(vha); if (!ha->cs84xx) { ql_log(ql_log_warn, vha, 0x00d0, "Unable to configure ISP84XX.\n"); return QLA_FUNCTION_FAILED; } } if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) rval = qla2x00_init_rings(vha); /* No point in continuing if firmware initialization failed. */ if (rval != QLA_SUCCESS) return rval; ha->flags.chip_reset_done = 1; if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { /* Issue verify 84xx FW IOCB to complete 84xx initialization */ rval = qla84xx_init_chip(vha); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00d4, "Unable to initialize ISP84XX.\n"); qla84xx_put_chip(vha); } } /* Load the NIC Core f/w if we are the first protocol driver. */ if (IS_QLA8031(ha)) { rval = qla83xx_nic_core_fw_load(vha); if (rval) ql_log(ql_log_warn, vha, 0x0124, "Error in initializing NIC Core f/w.\n"); } if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) qla24xx_read_fcp_prio_cfg(vha); if (IS_P3P_TYPE(ha)) qla82xx_set_driver_version(vha, QLA2XXX_VERSION); else qla25xx_set_driver_version(vha, QLA2XXX_VERSION); return (rval); } /** * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. * @vha: HA context * * Returns 0 on success. */ int qla2100_pci_config(scsi_qla_host_t *vha) { uint16_t w; unsigned long flags; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); pci_write_config_word(ha->pdev, PCI_COMMAND, w); pci_disable_rom(ha->pdev); /* Get PCI bus information. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->pci_attr = rd_reg_word(&reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. * @vha: HA context * * Returns 0 on success. */ int qla2300_pci_config(scsi_qla_host_t *vha) { uint16_t w; unsigned long flags = 0; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); if (IS_QLA2322(ha) || IS_QLA6322(ha)) w &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(ha->pdev, PCI_COMMAND, w); /* * If this is a 2300 card and not 2312, reset the * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, * the 2310 also reports itself as a 2300 so we need to get the * fb revision level -- a 6 indicates it really is a 2300 and * not a 2310. */ if (IS_QLA2300(ha)) { spin_lock_irqsave(&ha->hardware_lock, flags); /* Pause RISC. */ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC); for (cnt = 0; cnt < 30000; cnt++) { if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0) break; udelay(10); } /* Select FPM registers. */ wrt_reg_word(&reg->ctrl_status, 0x20); rd_reg_word(&reg->ctrl_status); /* Get the fb rev level */ ha->fb_rev = RD_FB_CMD_REG(ha, reg); if (ha->fb_rev == FPM_2300) pci_clear_mwi(ha->pdev); /* Deselect FPM registers. */ wrt_reg_word(&reg->ctrl_status, 0x0); rd_reg_word(&reg->ctrl_status); /* Release RISC module. */ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC); for (cnt = 0; cnt < 30000; cnt++) { if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0) break; udelay(10); } spin_unlock_irqrestore(&ha->hardware_lock, flags); } pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); pci_disable_rom(ha->pdev); /* Get PCI bus information. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->pci_attr = rd_reg_word(&reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. * @vha: HA context * * Returns 0 on success. */ int qla24xx_pci_config(scsi_qla_host_t *vha) { uint16_t w; unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); w &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(ha->pdev, PCI_COMMAND, w); pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) pcix_set_mmrbc(ha->pdev, 2048); /* PCIe -- adjust Maximum Read Request Size (2048). */ if (pci_is_pcie(ha->pdev)) pcie_set_readrq(ha->pdev, 4096); pci_disable_rom(ha->pdev); ha->chip_revision = ha->pdev->revision; /* Get PCI bus information. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->pci_attr = rd_reg_dword(&reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. * @vha: HA context * * Returns 0 on success. */ int qla25xx_pci_config(scsi_qla_host_t *vha) { uint16_t w; struct qla_hw_data *ha = vha->hw; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); w &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(ha->pdev, PCI_COMMAND, w); /* PCIe -- adjust Maximum Read Request Size (2048). */ if (pci_is_pcie(ha->pdev)) pcie_set_readrq(ha->pdev, 4096); pci_disable_rom(ha->pdev); ha->chip_revision = ha->pdev->revision; return QLA_SUCCESS; } /** * qla2x00_isp_firmware() - Choose firmware image. * @vha: HA context * * Returns 0 on success. */ static int qla2x00_isp_firmware(scsi_qla_host_t *vha) { int rval; uint16_t loop_id, topo, sw_cap; uint8_t domain, area, al_pa; struct qla_hw_data *ha = vha->hw; /* Assume loading risc code */ rval = QLA_FUNCTION_FAILED; if (ha->flags.disable_risc_code_load) { ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n"); /* Verify checksum of loaded RISC code. */ rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); if (rval == QLA_SUCCESS) { /* And, verify we are not in ROM code. */ rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); } } if (rval) ql_dbg(ql_dbg_init, vha, 0x007a, "**** Load RISC code ****.\n"); return (rval); } /** * qla2x00_reset_chip() - Reset ISP chip. * @vha: HA context * * Returns 0 on success. */ int qla2x00_reset_chip(scsi_qla_host_t *vha) { unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint32_t cnt; uint16_t cmd; int rval = QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(ha->pdev))) return rval; ha->isp_ops->disable_intrs(ha); spin_lock_irqsave(&ha->hardware_lock, flags); /* Turn off master enable */ cmd = 0; pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); cmd &= ~PCI_COMMAND_MASTER; pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); if (!IS_QLA2100(ha)) { /* Pause RISC. */ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC); if (IS_QLA2200(ha) || IS_QLA2300(ha)) { for (cnt = 0; cnt < 30000; cnt++) { if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0) break; udelay(100); } } else { rd_reg_word(&reg->hccr); /* PCI Posting. */ udelay(10); } /* Select FPM registers. */ wrt_reg_word(&reg->ctrl_status, 0x20); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ /* FPM Soft Reset. */ wrt_reg_word(&reg->fpm_diag_config, 0x100); rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */ /* Toggle Fpm Reset. */ if (!IS_QLA2200(ha)) { wrt_reg_word(&reg->fpm_diag_config, 0x0); rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */ } /* Select frame buffer registers. */ wrt_reg_word(&reg->ctrl_status, 0x10); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ /* Reset frame buffer FIFOs. */ if (IS_QLA2200(ha)) { WRT_FB_CMD_REG(ha, reg, 0xa000); RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ } else { WRT_FB_CMD_REG(ha, reg, 0x00fc); /* Read back fb_cmd until zero or 3 seconds max */ for (cnt = 0; cnt < 3000; cnt++) { if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) break; udelay(100); } } /* Select RISC module registers. */ wrt_reg_word(&reg->ctrl_status, 0); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ /* Reset RISC processor. */ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC); rd_reg_word(&reg->hccr); /* PCI Posting. */ /* Release RISC processor. */ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC); rd_reg_word(&reg->hccr); /* PCI Posting. */ } wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT); wrt_reg_word(&reg->hccr, HCCR_CLR_HOST_INT); /* Reset ISP chip. */ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET); /* Wait for RISC to recover from reset. */ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { /* * It is necessary to for a delay here since the card doesn't * respond to PCI reads during a reset. On some architectures * this will result in an MCA. */ udelay(20); for (cnt = 30000; cnt; cnt--) { if ((rd_reg_word(&reg->ctrl_status) & CSR_ISP_SOFT_RESET) == 0) break; udelay(100); } } else udelay(10); /* Reset RISC processor. */ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC); wrt_reg_word(&reg->semaphore, 0); /* Release RISC processor. */ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC); rd_reg_word(&reg->hccr); /* PCI Posting. */ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { for (cnt = 0; cnt < 30000; cnt++) { if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) break; udelay(100); } } else udelay(100); /* Turn on master enable */ cmd |= PCI_COMMAND_MASTER; pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); /* Disable RISC pause on FPM parity error. */ if (!IS_QLA2100(ha)) { wrt_reg_word(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE); rd_reg_word(&reg->hccr); /* PCI Posting. */ } spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC. * @vha: HA context * * Returns 0 on success. */ static int qla81xx_reset_mpi(scsi_qla_host_t *vha) { uint16_t mb[4] = {0x1010, 0, 1, 0}; if (!IS_QLA81XX(vha->hw)) return QLA_SUCCESS; return qla81xx_write_mpi_register(vha, mb); } static int qla_chk_risc_recovery(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; __le16 __iomem *mbptr = &reg->mailbox0; int i; u16 mb[32]; int rc = QLA_SUCCESS; if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return rc; /* this check is only valid after RISC reset */ mb[0] = rd_reg_word(mbptr); mbptr++; if (mb[0] == 0xf) { rc = QLA_FUNCTION_FAILED; for (i = 1; i < 32; i++) { mb[i] = rd_reg_word(mbptr); mbptr++; } ql_log(ql_log_warn, vha, 0x1015, "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]); ql_log(ql_log_warn, vha, 0x1015, "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14], mb[15]); ql_log(ql_log_warn, vha, 0x1015, "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22], mb[23]); ql_log(ql_log_warn, vha, 0x1015, "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30], mb[31]); } return rc; } /** * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. * @vha: HA context * * Returns 0 on success. */ static inline int qla24xx_reset_risc(scsi_qla_host_t *vha) { unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; uint32_t cnt; uint16_t wd; static int abts_cnt; /* ISP abort retry counts */ int rval = QLA_SUCCESS; int print = 1; spin_lock_irqsave(&ha->hardware_lock, flags); /* Reset RISC. */ wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); for (cnt = 0; cnt < 30000; cnt++) { if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0) break; udelay(10); } if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE)) set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", rd_reg_dword(&reg->hccr), rd_reg_dword(&reg->ctrl_status), (rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE)); wrt_reg_dword(&reg->ctrl_status, CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); udelay(100); /* Wait for firmware to complete NVRAM accesses. */ rd_reg_word(&reg->mailbox0); for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { barrier(); if (cnt) udelay(5); else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, "HCCR: 0x%x, MailBox0 Status 0x%x\n", rd_reg_dword(&reg->hccr), rd_reg_word(&reg->mailbox0)); /* Wait for soft-reset to complete. */ rd_reg_dword(&reg->ctrl_status); for (cnt = 0; cnt < 60; cnt++) { barrier(); if ((rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET) == 0) break; udelay(5); } if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET)) set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, "HCCR: 0x%x, Soft Reset status: 0x%x\n", rd_reg_dword(&reg->hccr), rd_reg_dword(&reg->ctrl_status)); /* If required, do an MPI FW reset now */ if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) { if (++abts_cnt < 5) { set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); } else { /* * We exhausted the ISP abort retries. We have to * set the board offline. */ abts_cnt = 0; vha->flags.online = 0; } } } wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET); rd_reg_dword(&reg->hccr); wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE); rd_reg_dword(&reg->hccr); wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET); mdelay(10); rd_reg_dword(&reg->hccr); wd = rd_reg_word(&reg->mailbox0); for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) { barrier(); if (cnt) { mdelay(1); if (print && qla_chk_risc_recovery(vha)) print = 0; wd = rd_reg_word(&reg->mailbox0); } else { rval = QLA_FUNCTION_TIMEOUT; ql_log(ql_log_warn, vha, 0x015e, "RISC reset timeout\n"); } } if (rval == QLA_SUCCESS) set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, "Host Risc 0x%x, mailbox0 0x%x\n", rd_reg_dword(&reg->hccr), rd_reg_word(&reg->mailbox0)); spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f, "Driver in %s mode\n", IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); if (IS_NOPOLLING_TYPE(ha)) ha->isp_ops->enable_intrs(ha); return rval; } static void qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data) { struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET); *data = rd_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET); } static void qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data) { struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET); wrt_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data); } static void qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) { uint32_t wd32 = 0; uint delta_msec = 100; uint elapsed_msec = 0; uint timeout_msec; ulong n; if (vha->hw->pdev->subsystem_device != 0x0175 && vha->hw->pdev->subsystem_device != 0x0240) return; wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); udelay(100); attempt: timeout_msec = TIMEOUT_SEMAPHORE; n = timeout_msec / delta_msec; while (n--) { qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET); qla25xx_read_risc_sema_reg(vha, &wd32); if (wd32 & RISC_SEMAPHORE) break; msleep(delta_msec); elapsed_msec += delta_msec; if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) goto force; } if (!(wd32 & RISC_SEMAPHORE)) goto force; if (!(wd32 & RISC_SEMAPHORE_FORCE)) goto acquired; qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR); timeout_msec = TIMEOUT_SEMAPHORE_FORCE; n = timeout_msec / delta_msec; while (n--) { qla25xx_read_risc_sema_reg(vha, &wd32); if (!(wd32 & RISC_SEMAPHORE_FORCE)) break; msleep(delta_msec); elapsed_msec += delta_msec; if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) goto force; } if (wd32 & RISC_SEMAPHORE_FORCE) qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR); goto attempt; force: qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET); acquired: return; } /** * qla24xx_reset_chip() - Reset ISP24xx chip. * @vha: HA context * * Returns 0 on success. */ int qla24xx_reset_chip(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int rval = QLA_FUNCTION_FAILED; if (pci_channel_offline(ha->pdev) && ha->flags.pci_channel_io_perm_failure) { return rval; } ha->isp_ops->disable_intrs(ha); qla25xx_manipulate_risc_semaphore(vha); /* Perform RISC reset. */ rval = qla24xx_reset_risc(vha); return rval; } /** * qla2x00_chip_diag() - Test chip for proper operation. * @vha: HA context * * Returns 0 on success. */ int qla2x00_chip_diag(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; unsigned long flags = 0; uint16_t data; uint32_t cnt; uint16_t mb[5]; struct req_que *req = ha->req_q_map[0]; /* Assume a failed state */ rval = QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n", &reg->flash_address); spin_lock_irqsave(&ha->hardware_lock, flags); /* Reset ISP chip. */ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET); /* * We need to have a delay here since the card will not respond while * in reset causing an MCA on some architectures. */ udelay(20); data = qla2x00_debounce_register(&reg->ctrl_status); for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { udelay(5); data = rd_reg_word(&reg->ctrl_status); barrier(); } if (!cnt) goto chip_diag_failed; ql_dbg(ql_dbg_init, vha, 0x007c, "Reset register cleared by chip reset.\n"); /* Reset RISC processor. */ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC); wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC); /* Workaround for QLA2312 PCI parity error */ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { udelay(5); data = RD_MAILBOX_REG(ha, reg, 0); barrier(); } } else udelay(10); if (!cnt) goto chip_diag_failed; /* Check product ID of chip */ ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n"); mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); mb[3] = RD_MAILBOX_REG(ha, reg, 3); mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || mb[3] != PROD_ID_3) { ql_log(ql_log_warn, vha, 0x0062, "Wrong product ID = 0x%x,0x%x,0x%x.\n", mb[1], mb[2], mb[3]); goto chip_diag_failed; } ha->product_id[0] = mb[1]; ha->product_id[1] = mb[2]; ha->product_id[2] = mb[3]; ha->product_id[3] = mb[4]; /* Adjust fw RISC transfer size */ if (req->length > 1024) ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; else ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; if (IS_QLA2200(ha) && RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { /* Limit firmware transfer size with a 2200A */ ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n"); ha->device_type |= DT_ISP2200A; ha->fw_transfer_size = 128; } /* Wrap Incoming Mailboxes Test. */ spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n"); rval = qla2x00_mbx_reg_test(vha); if (rval) ql_log(ql_log_warn, vha, 0x0080, "Failed mailbox send register test.\n"); else /* Flag a successful rval */ rval = QLA_SUCCESS; spin_lock_irqsave(&ha->hardware_lock, flags); chip_diag_failed: if (rval) ql_log(ql_log_info, vha, 0x0081, "Chip diagnostics **** FAILED ****.\n"); spin_unlock_irqrestore(&ha->hardware_lock, flags); return (rval); } /** * qla24xx_chip_diag() - Test ISP24xx for proper operation. * @vha: HA context * * Returns 0 on success. */ int qla24xx_chip_diag(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; rval = qla2x00_mbx_reg_test(vha); if (rval) { ql_log(ql_log_warn, vha, 0x0082, "Failed mailbox send register test.\n"); } else { /* Flag a successful rval */ rval = QLA_SUCCESS; } return rval; } static void qla2x00_init_fce_trace(scsi_qla_host_t *vha) { int rval; dma_addr_t tc_dma; void *tc; struct qla_hw_data *ha = vha->hw; if (!IS_FWI2_CAPABLE(ha)) return; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return; if (ha->fce) { ql_dbg(ql_dbg_init, vha, 0x00bd, "%s: FCE Mem is already allocated.\n", __func__); return; } /* Allocate memory for Fibre Channel Event Buffer. */ tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, GFP_KERNEL); if (!tc) { ql_log(ql_log_warn, vha, 0x00be, "Unable to allocate (%d KB) for FCE.\n", FCE_SIZE / 1024); return; } rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, ha->fce_mb, &ha->fce_bufs); if (rval) { ql_log(ql_log_warn, vha, 0x00bf, "Unable to initialize FCE (%d).\n", rval); dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); return; } ql_dbg(ql_dbg_init, vha, 0x00c0, "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024); ha->flags.fce_enabled = 1; ha->fce_dma = tc_dma; ha->fce = tc; } static void qla2x00_init_eft_trace(scsi_qla_host_t *vha) { int rval; dma_addr_t tc_dma; void *tc; struct qla_hw_data *ha = vha->hw; if (!IS_FWI2_CAPABLE(ha)) return; if (ha->eft) { ql_dbg(ql_dbg_init, vha, 0x00bd, "%s: EFT Mem is already allocated.\n", __func__); return; } /* Allocate memory for Extended Trace Buffer. */ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, GFP_KERNEL); if (!tc) { ql_log(ql_log_warn, vha, 0x00c1, "Unable to allocate (%d KB) for EFT.\n", EFT_SIZE / 1024); return; } rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); if (rval) { ql_log(ql_log_warn, vha, 0x00c2, "Unable to initialize EFT (%d).\n", rval); dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); return; } ql_dbg(ql_dbg_init, vha, 0x00c3, "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); ha->eft_dma = tc_dma; ha->eft = tc; } static void qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) { qla2x00_init_fce_trace(vha); qla2x00_init_eft_trace(vha); } void qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) { uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, eft_size, fce_size, mq_size; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; struct qla2xxx_fw_dump *fw_dump; if (ha->fw_dump) { ql_dbg(ql_dbg_init, vha, 0x00bd, "Firmware dump already allocated.\n"); return; } ha->fw_dumped = 0; ha->fw_dump_cap_flags = 0; dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; req_q_size = rsp_q_size = 0; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { fixed_size = sizeof(struct qla2100_fw_dump); } else if (IS_QLA23XX(ha)) { fixed_size = offsetof(struct qla2300_fw_dump, data_ram); mem_size = (ha->fw_memory_size - 0x11000 + 1) * sizeof(uint16_t); } else if (IS_FWI2_CAPABLE(ha)) { if (IS_QLA83XX(ha)) fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); else if (IS_QLA81XX(ha)) fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); else if (IS_QLA25XX(ha)) fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); else fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); mem_size = (ha->fw_memory_size - 0x100000 + 1) * sizeof(uint32_t); if (ha->mqenable) { if (!IS_QLA83XX(ha)) mq_size = sizeof(struct qla2xxx_mq_chain); /* * Allocate maximum buffer size for all queues - Q0. * Resizing must be done at end-of-dump processing. */ mq_size += (ha->max_req_queues - 1) * (req->length * sizeof(request_t)); mq_size += (ha->max_rsp_queues - 1) * (rsp->length * sizeof(response_t)); } if (ha->tgt.atio_ring) mq_size += ha->tgt.atio_q_length * sizeof(request_t); qla2x00_init_fce_trace(vha); if (ha->fce) fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; qla2x00_init_eft_trace(vha); if (ha->eft) eft_size = EFT_SIZE; } if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { struct fwdt *fwdt = ha->fwdt; uint j; for (j = 0; j < 2; j++, fwdt++) { if (!fwdt->template) { ql_dbg(ql_dbg_init, vha, 0x00ba, "-> fwdt%u no template\n", j); continue; } ql_dbg(ql_dbg_init, vha, 0x00fa, "-> fwdt%u calculating fwdump size...\n", j); fwdt->dump_size = qla27xx_fwdt_calculate_dump_size( vha, fwdt->template); ql_dbg(ql_dbg_init, vha, 0x00fa, "-> fwdt%u calculated fwdump size = %#lx bytes\n", j, fwdt->dump_size); dump_size += fwdt->dump_size; } /* Add space for spare MPI fw dump. */ dump_size += ha->fwdt[1].dump_size; } else { req_q_size = req->length * sizeof(request_t); rsp_q_size = rsp->length * sizeof(response_t); dump_size = offsetof(struct qla2xxx_fw_dump, isp); dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; ha->chain_offset = dump_size; dump_size += mq_size + fce_size; if (ha->exchoffld_buf) dump_size += sizeof(struct qla2xxx_offld_chain) + ha->exchoffld_size; if (ha->exlogin_buf) dump_size += sizeof(struct qla2xxx_offld_chain) + ha->exlogin_size; } if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) { ql_dbg(ql_dbg_init, vha, 0x00c5, "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n", __func__, dump_size, ha->fw_dump_len, ha->fw_dump_alloc_len); fw_dump = vmalloc(dump_size); if (!fw_dump) { ql_log(ql_log_warn, vha, 0x00c4, "Unable to allocate (%d KB) for firmware dump.\n", dump_size / 1024); } else { mutex_lock(&ha->optrom_mutex); if (ha->fw_dumped) { memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len); vfree(ha->fw_dump); ha->fw_dump = fw_dump; ha->fw_dump_alloc_len = dump_size; ql_dbg(ql_dbg_init, vha, 0x00c5, "Re-Allocated (%d KB) and save firmware dump.\n", dump_size / 1024); } else { vfree(ha->fw_dump); ha->fw_dump = fw_dump; ha->fw_dump_len = ha->fw_dump_alloc_len = dump_size; ql_dbg(ql_dbg_init, vha, 0x00c5, "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ha->mpi_fw_dump = (char *)fw_dump + ha->fwdt[1].dump_size; mutex_unlock(&ha->optrom_mutex); return; } ha->fw_dump->signature[0] = 'Q'; ha->fw_dump->signature[1] = 'L'; ha->fw_dump->signature[2] = 'G'; ha->fw_dump->signature[3] = 'C'; ha->fw_dump->version = htonl(1); ha->fw_dump->fixed_size = htonl(fixed_size); ha->fw_dump->mem_size = htonl(mem_size); ha->fw_dump->req_q_size = htonl(req_q_size); ha->fw_dump->rsp_q_size = htonl(rsp_q_size); ha->fw_dump->eft_size = htonl(eft_size); ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma)); ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma)); ha->fw_dump->header_size = htonl(offsetof (struct qla2xxx_fw_dump, isp)); } mutex_unlock(&ha->optrom_mutex); } } } static int qla81xx_mpi_sync(scsi_qla_host_t *vha) { #define MPS_MASK 0xe0 int rval; uint16_t dc; uint32_t dw; if (!IS_QLA81XX(vha->hw)) return QLA_SUCCESS; rval = qla2x00_write_ram_word(vha, 0x7c00, 1); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x0105, "Unable to acquire semaphore.\n"); goto done; } pci_read_config_word(vha->hw->pdev, 0x54, &dc); rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n"); goto done_release; } dc &= MPS_MASK; if (dc == (dw & MPS_MASK)) goto done_release; dw &= ~MPS_MASK; dw |= dc; rval = qla2x00_write_ram_word(vha, 0x7a15, dw); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n"); } done_release: rval = qla2x00_write_ram_word(vha, 0x7c00, 0); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x006d, "Unable to release semaphore.\n"); } done: return rval; } int qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) { /* Don't try to reallocate the array */ if (req->outstanding_cmds) return QLA_SUCCESS; if (!IS_FWI2_CAPABLE(ha)) req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; else { if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) req->num_outstanding_cmds = ha->cur_fw_xcb_count; else req->num_outstanding_cmds = ha->cur_fw_iocb_count; } req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, sizeof(srb_t *), GFP_KERNEL); if (!req->outstanding_cmds) { /* * Try to allocate a minimal size just so we can get through * initialization. */ req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS; req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, sizeof(srb_t *), GFP_KERNEL); if (!req->outstanding_cmds) { ql_log(ql_log_fatal, NULL, 0x0126, "Failed to allocate memory for " "outstanding_cmds for req_que %p.\n", req); req->num_outstanding_cmds = 0; return QLA_FUNCTION_FAILED; } } return QLA_SUCCESS; } #define PRINT_FIELD(_field, _flag, _str) { \ if (a0->_field & _flag) {\ if (p) {\ strcat(ptr, "|");\ ptr++;\ leftover--;\ } \ len = snprintf(ptr, leftover, "%s", _str); \ p = 1;\ leftover -= len;\ ptr += len; \ } \ } static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha) { #define STR_LEN 64 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data; u8 str[STR_LEN], *ptr, p; int leftover, len; memset(str, 0, STR_LEN); snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name); ql_dbg(ql_dbg_init, vha, 0x015a, "SFP MFG Name: %s\n", str); memset(str, 0, STR_LEN); snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn); ql_dbg(ql_dbg_init, vha, 0x015c, "SFP Part Name: %s\n", str); /* media */ memset(str, 0, STR_LEN); ptr = str; leftover = STR_LEN; p = len = 0; PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX"); PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair"); PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax"); PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax"); PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um"); PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um"); PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode"); ql_dbg(ql_dbg_init, vha, 0x0160, "SFP Media: %s\n", str); /* link length */ memset(str, 0, STR_LEN); ptr = str; leftover = STR_LEN; p = len = 0; PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long"); PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short"); PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate"); PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long"); PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium"); ql_dbg(ql_dbg_init, vha, 0x0196, "SFP Link Length: %s\n", str); memset(str, 0, STR_LEN); ptr = str; leftover = STR_LEN; p = len = 0; PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)"); PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)"); PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)"); PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)"); PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)"); ql_dbg(ql_dbg_init, vha, 0x016e, "SFP FC Link Tech: %s\n", str); if (a0->length_km) ql_dbg(ql_dbg_init, vha, 0x016f, "SFP Distant: %d km\n", a0->length_km); if (a0->length_100m) ql_dbg(ql_dbg_init, vha, 0x0170, "SFP Distant: %d m\n", a0->length_100m*100); if (a0->length_50um_10m) ql_dbg(ql_dbg_init, vha, 0x0189, "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10); if (a0->length_62um_10m) ql_dbg(ql_dbg_init, vha, 0x018a, "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10); if (a0->length_om4_10m) ql_dbg(ql_dbg_init, vha, 0x0194, "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10); if (a0->length_om3_10m) ql_dbg(ql_dbg_init, vha, 0x0195, "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10); } /** * qla24xx_detect_sfp() * * @vha: adapter state pointer. * * @return * 0 -- Configure firmware to use short-range settings -- normal * buffer-to-buffer credits. * * 1 -- Configure firmware to use long-range settings -- extra * buffer-to-buffer credits should be allocated with * ha->lr_distance containing distance settings from NVRAM or SFP * (if supported). */ int qla24xx_detect_sfp(scsi_qla_host_t *vha) { int rc, used_nvram; struct sff_8247_a0 *a; struct qla_hw_data *ha = vha->hw; struct nvram_81xx *nv = ha->nvram; #define LR_DISTANCE_UNKNOWN 2 static const char * const types[] = { "Short", "Long" }; static const char * const lengths[] = { "(10km)", "(5km)", "" }; u8 ll = 0; /* Seed with NVRAM settings. */ used_nvram = 0; ha->flags.lr_detected = 0; if (IS_BPM_RANGE_CAPABLE(ha) && (nv->enhanced_features & NEF_LR_DIST_ENABLE)) { used_nvram = 1; ha->flags.lr_detected = 1; ha->lr_distance = (nv->enhanced_features >> LR_DIST_NV_POS) & LR_DIST_NV_MASK; } if (!IS_BPM_ENABLED(vha)) goto out; /* Determine SR/LR capabilities of SFP/Transceiver. */ rc = qla2x00_read_sfp_dev(vha, NULL, 0); if (rc) goto out; used_nvram = 0; a = (struct sff_8247_a0 *)vha->hw->sfp_data; qla2xxx_print_sfp_info(vha); ha->flags.lr_detected = 0; ll = a->fc_ll_cc7; if (ll & FC_LL_VL || ll & FC_LL_L) { /* Long range, track length. */ ha->flags.lr_detected = 1; if (a->length_km > 5 || a->length_100m > 50) ha->lr_distance = LR_DISTANCE_10K; else ha->lr_distance = LR_DISTANCE_5K; } out: ql_dbg(ql_dbg_async, vha, 0x507b, "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n", types[ha->flags.lr_detected], ha->flags.lr_detected ? lengths[ha->lr_distance] : lengths[LR_DISTANCE_UNKNOWN], used_nvram, ll, ha->flags.lr_detected, ha->lr_distance); return ha->flags.lr_detected; } static void __qla_adjust_iocb_limit(struct qla_qpair *qpair) { u8 num_qps; u16 limit; struct qla_hw_data *ha = qpair->vha->hw; num_qps = ha->num_qpairs + 1; limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100; qpair->fwres.iocbs_total = ha->orig_fw_iocb_count; qpair->fwres.iocbs_limit = limit; qpair->fwres.iocbs_qp_limit = limit / num_qps; qpair->fwres.exch_total = ha->orig_fw_xcb_count; qpair->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100; } void qla_init_iocb_limit(scsi_qla_host_t *vha) { u8 i; struct qla_hw_data *ha = vha->hw; __qla_adjust_iocb_limit(ha->base_qpair); ha->base_qpair->fwres.iocbs_used = 0; ha->base_qpair->fwres.exch_used = 0; for (i = 0; i < ha->max_qpairs; i++) { if (ha->queue_pair_map[i]) { __qla_adjust_iocb_limit(ha->queue_pair_map[i]); ha->queue_pair_map[i]->fwres.iocbs_used = 0; ha->queue_pair_map[i]->fwres.exch_used = 0; } } ha->fwres.iocb_total = ha->orig_fw_iocb_count; ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100; ha->fwres.exch_total = ha->orig_fw_xcb_count; ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100; atomic_set(&ha->fwres.iocb_used, 0); atomic_set(&ha->fwres.exch_used, 0); } void qla_adjust_iocb_limit(scsi_qla_host_t *vha) { u8 i; struct qla_hw_data *ha = vha->hw; __qla_adjust_iocb_limit(ha->base_qpair); for (i = 0; i < ha->max_qpairs; i++) { if (ha->queue_pair_map[i]) __qla_adjust_iocb_limit(ha->queue_pair_map[i]); } } /** * qla2x00_setup_chip() - Load and start RISC firmware. * @vha: HA context * * Returns 0 on success. */ static int qla2x00_setup_chip(scsi_qla_host_t *vha) { int rval; uint32_t srisc_address = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; unsigned long flags; uint16_t fw_major_version; int done_once = 0; if (IS_P3P_TYPE(ha)) { rval = ha->isp_ops->load_risc(vha, &srisc_address); if (rval == QLA_SUCCESS) { qla2x00_stop_firmware(vha); goto enable_82xx_npiv; } else goto failed; } if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { /* Disable SRAM, Instruction RAM and GP RAM parity. */ spin_lock_irqsave(&ha->hardware_lock, flags); wrt_reg_word(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0)); rd_reg_word(&reg->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); } qla81xx_mpi_sync(vha); execute_fw_with_lr: /* Load firmware sequences */ rval = ha->isp_ops->load_risc(vha, &srisc_address); if (rval == QLA_SUCCESS) { ql_dbg(ql_dbg_init, vha, 0x00c9, "Verifying Checksum of loaded RISC code.\n"); rval = qla2x00_verify_checksum(vha, srisc_address); if (rval == QLA_SUCCESS) { /* Start firmware execution. */ ql_dbg(ql_dbg_init, vha, 0x00ca, "Starting firmware.\n"); if (ql2xexlogins) ha->flags.exlogins_enabled = 1; if (qla_is_exch_offld_enabled(vha)) ha->flags.exchoffld_enabled = 1; rval = qla2x00_execute_fw(vha, srisc_address); /* Retrieve firmware information. */ if (rval == QLA_SUCCESS) { /* Enable BPM support? */ if (!done_once++ && qla24xx_detect_sfp(vha)) { ql_dbg(ql_dbg_init, vha, 0x00ca, "Re-starting firmware -- BPM.\n"); /* Best-effort - re-init. */ ha->isp_ops->reset_chip(vha); ha->isp_ops->chip_diag(vha); goto execute_fw_with_lr; } if (IS_ZIO_THRESHOLD_CAPABLE(ha)) qla27xx_set_zio_threshold(vha, ha->last_zio_threshold); rval = qla2x00_set_exlogins_buffer(vha); if (rval != QLA_SUCCESS) goto failed; rval = qla2x00_set_exchoffld_buffer(vha); if (rval != QLA_SUCCESS) goto failed; enable_82xx_npiv: fw_major_version = ha->fw_major_version; if (IS_P3P_TYPE(ha)) qla82xx_check_md_needed(vha); else rval = qla2x00_get_fw_version(vha); if (rval != QLA_SUCCESS) goto failed; ha->flags.npiv_supported = 0; if (IS_QLA2XXX_MIDTYPE(ha) && (ha->fw_attributes & BIT_2)) { ha->flags.npiv_supported = 1; if ((!ha->max_npiv_vports) || ((ha->max_npiv_vports + 1) % MIN_MULTI_ID_FABRIC)) ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; } qla2x00_get_resource_cnts(vha); qla_init_iocb_limit(vha); /* * Allocate the array of outstanding commands * now that we know the firmware resources. */ rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); if (rval != QLA_SUCCESS) goto failed; if (!fw_major_version && !(IS_P3P_TYPE(ha))) qla2x00_alloc_offload_mem(vha); if (ql2xallocfwdump && !(IS_P3P_TYPE(ha))) qla2x00_alloc_fw_dump(vha); } else { goto failed; } } else { ql_log(ql_log_fatal, vha, 0x00cd, "ISP Firmware failed checksum.\n"); goto failed; } /* Enable PUREX PASSTHRU */ if (ql2xrdpenable || ha->flags.scm_supported_f || ha->flags.edif_enabled) qla25xx_set_els_cmds_supported(vha); } else goto failed; if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { /* Enable proper parity. */ spin_lock_irqsave(&ha->hardware_lock, flags); if (IS_QLA2300(ha)) /* SRAM parity */ wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x1); else /* SRAM, Instruction RAM and GP RAM parity */ wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x7); rd_reg_word(&reg->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); } if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flags.fac_supported = 1; else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { uint32_t size; rval = qla81xx_fac_get_sector_size(vha, &size); if (rval == QLA_SUCCESS) { ha->flags.fac_supported = 1; ha->fdt_block_size = size << 2; } else { ql_log(ql_log_warn, vha, 0x00ce, "Unsupported FAC firmware (%d.%02d.%02d).\n", ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version); if (IS_QLA83XX(ha)) { ha->flags.fac_supported = 0; rval = QLA_SUCCESS; } } } failed: if (rval) { ql_log(ql_log_fatal, vha, 0x00cf, "Setup chip ****FAILED****.\n"); } return (rval); } /** * qla2x00_init_response_q_entries() - Initializes response queue entries. * @rsp: response queue * * Beginning of request ring has initialization control block already built * by nvram config routine. * * Returns 0 on success. */ void qla2x00_init_response_q_entries(struct rsp_que *rsp) { uint16_t cnt; response_t *pkt; rsp->ring_ptr = rsp->ring; rsp->ring_index = 0; rsp->status_srb = NULL; pkt = rsp->ring_ptr; for (cnt = 0; cnt < rsp->length; cnt++) { pkt->signature = RESPONSE_PROCESSED; pkt++; } } /** * qla2x00_update_fw_options() - Read and process firmware options. * @vha: HA context * * Returns 0 on success. */ void qla2x00_update_fw_options(scsi_qla_host_t *vha) { uint16_t swing, emphasis, tx_sens, rx_sens; struct qla_hw_data *ha = vha->hw; memset(ha->fw_options, 0, sizeof(ha->fw_options)); qla2x00_get_fw_options(vha, ha->fw_options); if (IS_QLA2100(ha) || IS_QLA2200(ha)) return; /* Serial Link options. */ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115, "Serial link options.\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109, ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options)); ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; if (ha->fw_seriallink_options[3] & BIT_2) { ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; /* 1G settings */ swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); emphasis = (ha->fw_seriallink_options[2] & (BIT_4 | BIT_3)) >> 3; tx_sens = ha->fw_seriallink_options[0] & (BIT_3 | BIT_2 | BIT_1 | BIT_0); rx_sens = (ha->fw_seriallink_options[0] & (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; ha->fw_options[10] = (emphasis << 14) | (swing << 8); if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { if (rx_sens == 0x0) rx_sens = 0x3; ha->fw_options[10] |= (tx_sens << 4) | rx_sens; } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) ha->fw_options[10] |= BIT_5 | ((rx_sens & (BIT_1 | BIT_0)) << 2) | (tx_sens & (BIT_1 | BIT_0)); /* 2G settings */ swing = (ha->fw_seriallink_options[2] & (BIT_7 | BIT_6 | BIT_5)) >> 5; emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); tx_sens = ha->fw_seriallink_options[1] & (BIT_3 | BIT_2 | BIT_1 | BIT_0); rx_sens = (ha->fw_seriallink_options[1] & (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; ha->fw_options[11] = (emphasis << 14) | (swing << 8); if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { if (rx_sens == 0x0) rx_sens = 0x3; ha->fw_options[11] |= (tx_sens << 4) | rx_sens; } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) ha->fw_options[11] |= BIT_5 | ((rx_sens & (BIT_1 | BIT_0)) << 2) | (tx_sens & (BIT_1 | BIT_0)); } /* FCP2 options. */ /* Return command IOCBs without waiting for an ABTS to complete. */ ha->fw_options[3] |= BIT_13; /* LED scheme. */ if (ha->flags.enable_led_scheme) ha->fw_options[2] |= BIT_12; /* Detect ISP6312. */ if (IS_QLA6312(ha)) ha->fw_options[2] |= BIT_13; /* Set Retry FLOGI in case of P2P connection */ if (ha->operating_mode == P2P) { ha->fw_options[2] |= BIT_3; ql_dbg(ql_dbg_disc, vha, 0x2100, "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", __func__, ha->fw_options[2]); } /* Update firmware options. */ qla2x00_set_fw_options(vha, ha->fw_options); } void qla24xx_update_fw_options(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; if (IS_P3P_TYPE(ha)) return; /* Hold status IOCBs until ABTS response received. */ if (ql2xfwholdabts) ha->fw_options[3] |= BIT_12; /* Set Retry FLOGI in case of P2P connection */ if (ha->operating_mode == P2P) { ha->fw_options[2] |= BIT_3; ql_dbg(ql_dbg_disc, vha, 0x2101, "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", __func__, ha->fw_options[2]); } /* Move PUREX, ABTS RX & RIDA to ATIOQ */ if (ql2xmvasynctoatio && !ha->flags.edif_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) ha->fw_options[2] |= BIT_11; else ha->fw_options[2] &= ~BIT_11; } if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { /* * Tell FW to track each exchange to prevent * driver from using stale exchange. */ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) ha->fw_options[2] |= BIT_4; else ha->fw_options[2] &= ~(BIT_4); /* Reserve 1/2 of emergency exchanges for ELS.*/ if (qla2xuseresexchforels) ha->fw_options[2] |= BIT_8; else ha->fw_options[2] &= ~BIT_8; /* * N2N: set Secure=1 for PLOGI ACC and * fw shal not send PRLI after PLOGI Acc */ if (ha->flags.edif_enabled && DBELL_ACTIVE(vha)) { ha->fw_options[3] |= BIT_15; ha->flags.n2n_fw_acc_sec = 1; } else { ha->fw_options[3] &= ~BIT_15; ha->flags.n2n_fw_acc_sec = 0; } } if (ql2xrdpenable || ha->flags.scm_supported_f || ha->flags.edif_enabled) ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB; /* Enable Async 8130/8131 events -- transceiver insertion/removal */ if (IS_BPM_RANGE_CAPABLE(ha)) ha->fw_options[3] |= BIT_10; ql_dbg(ql_dbg_init, vha, 0x00e8, "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", __func__, ha->fw_options[1], ha->fw_options[2], ha->fw_options[3], vha->host->active_mode); if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) qla2x00_set_fw_options(vha, ha->fw_options); /* Update Serial Link options. */ if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) return; rval = qla2x00_set_serdes_params(vha, le16_to_cpu(ha->fw_seriallink_options24[1]), le16_to_cpu(ha->fw_seriallink_options24[2]), le16_to_cpu(ha->fw_seriallink_options24[3])); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x0104, "Unable to update Serial Link options (%x).\n", rval); } } void qla2x00_config_rings(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; /* Setup ring parameters in initialization control block. */ ha->init_cb->request_q_outpointer = cpu_to_le16(0); ha->init_cb->response_q_inpointer = cpu_to_le16(0); ha->init_cb->request_q_length = cpu_to_le16(req->length); ha->init_cb->response_q_length = cpu_to_le16(rsp->length); put_unaligned_le64(req->dma, &ha->init_cb->request_q_address); put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address); wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0); wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0); wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0); wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0); rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ } void qla24xx_config_rings(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; device_reg_t *reg = ISP_QUE_REG(ha, 0); struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; struct qla_msix_entry *msix; struct init_cb_24xx *icb; uint16_t rid = 0; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; /* Setup ring parameters in initialization control block. */ icb = (struct init_cb_24xx *)ha->init_cb; icb->request_q_outpointer = cpu_to_le16(0); icb->response_q_inpointer = cpu_to_le16(0); icb->request_q_length = cpu_to_le16(req->length); icb->response_q_length = cpu_to_le16(rsp->length); put_unaligned_le64(req->dma, &icb->request_q_address); put_unaligned_le64(rsp->dma, &icb->response_q_address); /* Setup ATIO queue dma pointers for target mode */ icb->atio_q_inpointer = cpu_to_le16(0); icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address); if (IS_SHADOW_REG_CAPABLE(ha)) icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29); if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS); icb->rid = cpu_to_le16(rid); if (ha->flags.msix_enabled) { msix = &ha->msix_entries[1]; ql_dbg(ql_dbg_init, vha, 0x0019, "Registering vector 0x%x for base que.\n", msix->entry); icb->msix = cpu_to_le16(msix->entry); } /* Use alternate PCI bus number */ if (MSB(rid)) icb->firmware_options_2 |= cpu_to_le32(BIT_19); /* Use alternate PCI devfn */ if (LSB(rid)) icb->firmware_options_2 |= cpu_to_le32(BIT_18); /* Use Disable MSIX Handshake mode for capable adapters */ if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && (ha->flags.msix_enabled)) { icb->firmware_options_2 &= cpu_to_le32(~BIT_22); ha->flags.disable_msix_handshake = 1; ql_dbg(ql_dbg_init, vha, 0x00fe, "MSIX Handshake Disable Mode turned on.\n"); } else { icb->firmware_options_2 |= cpu_to_le32(BIT_22); } icb->firmware_options_2 |= cpu_to_le32(BIT_23); wrt_reg_dword(&reg->isp25mq.req_q_in, 0); wrt_reg_dword(&reg->isp25mq.req_q_out, 0); wrt_reg_dword(&reg->isp25mq.rsp_q_in, 0); wrt_reg_dword(&reg->isp25mq.rsp_q_out, 0); } else { wrt_reg_dword(&reg->isp24.req_q_in, 0); wrt_reg_dword(&reg->isp24.req_q_out, 0); wrt_reg_dword(&reg->isp24.rsp_q_in, 0); wrt_reg_dword(&reg->isp24.rsp_q_out, 0); } qlt_24xx_config_rings(vha); /* If the user has configured the speed, set it here */ if (ha->set_data_rate) { ql_dbg(ql_dbg_init, vha, 0x00fd, "Speed set by user : %s Gbps \n", qla2x00_get_link_speed_str(ha, ha->set_data_rate)); icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13); } /* PCI posting */ rd_reg_word(&ioreg->hccr); } /** * qla2x00_init_rings() - Initializes firmware. * @vha: HA context * * Beginning of request ring has initialization control block already built * by nvram config routine. * * Returns 0 on success. */ int qla2x00_init_rings(scsi_qla_host_t *vha) { int rval; unsigned long flags = 0; int cnt, que; struct qla_hw_data *ha = vha->hw; struct req_que *req; struct rsp_que *rsp; struct mid_init_cb_24xx *mid_init_cb = (struct mid_init_cb_24xx *) ha->init_cb; spin_lock_irqsave(&ha->hardware_lock, flags); /* Clear outstanding commands array. */ for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req || !test_bit(que, ha->req_qid_map)) continue; req->out_ptr = (uint16_t *)(req->ring + req->length); *req->out_ptr = 0; for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) req->outstanding_cmds[cnt] = NULL; req->current_outstanding_cmd = 1; /* Initialize firmware. */ req->ring_ptr = req->ring; req->ring_index = 0; req->cnt = req->length; } for (que = 0; que < ha->max_rsp_queues; que++) { rsp = ha->rsp_q_map[que]; if (!rsp || !test_bit(que, ha->rsp_qid_map)) continue; rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); *rsp->in_ptr = 0; /* Initialize response queue entries */ if (IS_QLAFX00(ha)) qlafx00_init_response_q_entries(rsp); else qla2x00_init_response_q_entries(rsp); } ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; ha->tgt.atio_ring_index = 0; /* Initialize ATIO queue entries */ qlt_init_atio_q_entries(vha); ha->isp_ops->config_rings(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (IS_QLAFX00(ha)) { rval = qlafx00_init_firmware(vha, ha->init_cb_size); goto next_check; } /* Update any ISP specific firmware options before initialization. */ ha->isp_ops->update_fw_options(vha); ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware FW opt 1-3= %08x %08x %08x.\n", le32_to_cpu(mid_init_cb->init_cb.firmware_options_1), le32_to_cpu(mid_init_cb->init_cb.firmware_options_2), le32_to_cpu(mid_init_cb->init_cb.firmware_options_3)); if (ha->flags.npiv_supported) { if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); } if (IS_FWI2_CAPABLE(ha)) { mid_init_cb->options = cpu_to_le16(BIT_1); mid_init_cb->init_cb.execution_throttle = cpu_to_le16(ha->cur_fw_xcb_count); ha->flags.dport_enabled = (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & BIT_7) != 0; ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", (ha->flags.dport_enabled) ? "enabled" : "disabled"); /* FA-WWPN Status */ ha->flags.fawwpn_enabled = (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & BIT_6) != 0; ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); /* Init_cb will be reused for other command(s). Save a backup copy of port_name */ memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE); } /* ELS pass through payload is limit by frame size. */ if (ha->flags.edif_enabled) mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD); QLA_FW_STARTED(ha); rval = qla2x00_init_firmware(vha, ha->init_cb_size); next_check: if (rval) { QLA_FW_STOPPED(ha); ql_log(ql_log_fatal, vha, 0x00d2, "Init Firmware **** FAILED ****.\n"); } else { ql_dbg(ql_dbg_init, vha, 0x00d3, "Init Firmware -- success.\n"); vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0; } return (rval); } /** * qla2x00_fw_ready() - Waits for firmware ready. * @vha: HA context * * Returns 0 on success. */ static int qla2x00_fw_ready(scsi_qla_host_t *vha) { int rval; unsigned long wtime, mtime, cs84xx_time; uint16_t min_wait; /* Minimum wait time if loop is down */ uint16_t wait_time; /* Wait time if loop is coming ready */ uint16_t state[6]; struct qla_hw_data *ha = vha->hw; if (IS_QLAFX00(vha->hw)) return qlafx00_fw_ready(vha); /* Time to wait for loop down */ if (IS_P3P_TYPE(ha)) min_wait = 30; else min_wait = 20; /* * Firmware should take at most one RATOV to login, plus 5 seconds for * our own processing. */ if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { wait_time = min_wait; } /* Min wait time if loop down */ mtime = jiffies + (min_wait * HZ); /* wait time before firmware ready */ wtime = jiffies + (wait_time * HZ); /* Wait for ISP to finish LIP */ if (!vha->flags.init_done) ql_log(ql_log_info, vha, 0x801e, "Waiting for LIP to complete.\n"); do { memset(state, -1, sizeof(state)); rval = qla2x00_get_firmware_state(vha, state); if (rval == QLA_SUCCESS) { if (state[0] < FSTATE_LOSS_OF_SYNC) { vha->device_flags &= ~DFLG_NO_CABLE; } if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { ql_dbg(ql_dbg_taskm, vha, 0x801f, "fw_state=%x 84xx=%x.\n", state[0], state[2]); if ((state[2] & FSTATE_LOGGED_IN) && (state[2] & FSTATE_WAITING_FOR_VERIFY)) { ql_dbg(ql_dbg_taskm, vha, 0x8028, "Sending verify iocb.\n"); cs84xx_time = jiffies; rval = qla84xx_init_chip(vha); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x8007, "Init chip failed.\n"); break; } /* Add time taken to initialize. */ cs84xx_time = jiffies - cs84xx_time; wtime += cs84xx_time; mtime += cs84xx_time; ql_dbg(ql_dbg_taskm, vha, 0x8008, "Increasing wait time by %ld. " "New time %ld.\n", cs84xx_time, wtime); } } else if (state[0] == FSTATE_READY) { ql_dbg(ql_dbg_taskm, vha, 0x8037, "F/W Ready - OK.\n"); qla2x00_get_retry_cnt(vha, &ha->retry_count, &ha->login_timeout, &ha->r_a_tov); rval = QLA_SUCCESS; break; } rval = QLA_FUNCTION_FAILED; if (atomic_read(&vha->loop_down_timer) && state[0] != FSTATE_READY) { /* Loop down. Timeout on min_wait for states * other than Wait for Login. */ if (time_after_eq(jiffies, mtime)) { ql_log(ql_log_info, vha, 0x8038, "Cable is unplugged...\n"); vha->device_flags |= DFLG_NO_CABLE; break; } } } else { /* Mailbox cmd failed. Timeout on min_wait. */ if (time_after_eq(jiffies, mtime) || ha->flags.isp82xx_fw_hung) break; } if (time_after_eq(jiffies, wtime)) break; /* Delay for a while */ msleep(500); } while (1); ql_dbg(ql_dbg_taskm, vha, 0x803a, "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0], state[1], state[2], state[3], state[4], state[5], jiffies); if (rval && !(vha->device_flags & DFLG_NO_CABLE)) { ql_log(ql_log_warn, vha, 0x803b, "Firmware ready **** FAILED ****.\n"); } return (rval); } /* * qla2x00_configure_hba * Setup adapter context. * * Input: * ha = adapter state pointer. * * Returns: * 0 = success * * Context: * Kernel context. */ static int qla2x00_configure_hba(scsi_qla_host_t *vha) { int rval; uint16_t loop_id; uint16_t topo; uint16_t sw_cap; uint8_t al_pa; uint8_t area; uint8_t domain; char connect_type[22]; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); port_id_t id; unsigned long flags; /* Get host addresses. */ rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); if (rval != QLA_SUCCESS) { if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || IS_CNA_CAPABLE(ha) || (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { ql_dbg(ql_dbg_disc, vha, 0x2008, "Loop is in a transition state.\n"); } else { ql_log(ql_log_warn, vha, 0x2009, "Unable to get host loop ID.\n"); if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) { ql_log(ql_log_warn, vha, 0x1151, "Doing link init.\n"); if (qla24xx_link_initialize(vha) == QLA_SUCCESS) return rval; } set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } return (rval); } if (topo == 4) { ql_log(ql_log_info, vha, 0x200a, "Cannot get topology - retrying.\n"); return (QLA_FUNCTION_FAILED); } vha->loop_id = loop_id; /* initialize */ ha->min_external_loopid = SNS_FIRST_LOOP_ID; ha->operating_mode = LOOP; switch (topo) { case 0: ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n"); ha->switch_cap = 0; ha->current_topology = ISP_CFG_NL; strcpy(connect_type, "(Loop)"); break; case 1: ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n"); ha->switch_cap = sw_cap; ha->current_topology = ISP_CFG_FL; strcpy(connect_type, "(FL_Port)"); break; case 2: ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n"); ha->switch_cap = 0; ha->operating_mode = P2P; ha->current_topology = ISP_CFG_N; strcpy(connect_type, "(N_Port-to-N_Port)"); break; case 3: ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n"); ha->switch_cap = sw_cap; ha->operating_mode = P2P; ha->current_topology = ISP_CFG_F; strcpy(connect_type, "(F_Port)"); break; default: ql_dbg(ql_dbg_disc, vha, 0x200f, "HBA in unknown topology %x, using NL.\n", topo); ha->switch_cap = 0; ha->current_topology = ISP_CFG_NL; strcpy(connect_type, "(Loop)"); break; } /* Save Host port and loop ID. */ /* byte order - Big Endian */ id.b.domain = domain; id.b.area = area; id.b.al_pa = al_pa; id.b.rsvd_1 = 0; spin_lock_irqsave(&ha->hardware_lock, flags); if (vha->hw->flags.edif_enabled) { if (topo != 2) qla_update_host_map(vha, id); } else if (!(topo == 2 && ha->flags.n2n_bigger)) qla_update_host_map(vha, id); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!vha->flags.init_done) ql_log(ql_log_info, vha, 0x2010, "Topology - %s, Host Loop address 0x%x.\n", connect_type, vha->loop_id); return(rval); } inline void qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, const char *def) { char *st, *en; uint16_t index; uint64_t zero[2] = { 0 }; struct qla_hw_data *ha = vha->hw; int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); if (len > sizeof(zero)) len = sizeof(zero); if (memcmp(model, &zero, len) != 0) { memcpy(ha->model_number, model, len); st = en = ha->model_number; en += len - 1; while (en > st) { if (*en != 0x20 && *en != 0x00) break; *en-- = '\0'; } index = (ha->pdev->subsystem_device & 0xff); if (use_tbl && ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) strscpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], sizeof(ha->model_desc)); } else { index = (ha->pdev->subsystem_device & 0xff); if (use_tbl && ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) { strscpy(ha->model_number, qla2x00_model_name[index * 2], sizeof(ha->model_number)); strscpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], sizeof(ha->model_desc)); } else { strscpy(ha->model_number, def, sizeof(ha->model_number)); } } if (IS_FWI2_CAPABLE(ha)) qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, sizeof(ha->model_desc)); } /* On sparc systems, obtain port and node WWN from firmware * properties. */ static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) { #ifdef CONFIG_SPARC struct qla_hw_data *ha = vha->hw; struct pci_dev *pdev = ha->pdev; struct device_node *dp = pci_device_to_OF_node(pdev); const u8 *val; int len; val = of_get_property(dp, "port-wwn", &len); if (val && len >= WWN_SIZE) memcpy(nv->port_name, val, WWN_SIZE); val = of_get_property(dp, "node-wwn", &len); if (val && len >= WWN_SIZE) memcpy(nv->node_name, val, WWN_SIZE); #endif } /* * NVRAM configuration for ISP 2xxx * * Input: * ha = adapter block pointer. * * Output: * initialization control block in response_ring * host adapters parameters in host adapter block * * Returns: * 0 = success. */ int qla2x00_nvram_config(scsi_qla_host_t *vha) { int rval; uint8_t chksum = 0; uint16_t cnt; uint8_t *dptr1, *dptr2; struct qla_hw_data *ha = vha->hw; init_cb_t *icb = ha->init_cb; nvram_t *nv = ha->nvram; uint8_t *ptr = ha->nvram; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; rval = QLA_SUCCESS; /* Determine NVRAM starting address. */ ha->nvram_size = sizeof(*nv); ha->nvram_base = 0; if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) if ((rd_reg_word(&reg->ctrl_status) >> 14) == 1) ha->nvram_base = 0x80; /* Get NVRAM data and calculate checksum. */ ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) chksum += *ptr++; ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f, "Contents of NVRAM.\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110, nv, ha->nvram_size); /* Bad NVRAM data, set defaults parameters. */ if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || nv->nvram_version < 1) { /* Reset NVRAM data. */ ql_log(ql_log_warn, vha, 0x0064, "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n", chksum, nv->id, nv->nvram_version); ql_log(ql_log_warn, vha, 0x0065, "Falling back to " "functioning (yet invalid -- WWPN) defaults.\n"); /* * Set default initialization control block. */ memset(nv, 0, ha->nvram_size); nv->parameter_block_version = ICB_VERSION; if (IS_QLA23XX(ha)) { nv->firmware_options[0] = BIT_2 | BIT_1; nv->firmware_options[1] = BIT_7 | BIT_5; nv->add_firmware_options[0] = BIT_5; nv->add_firmware_options[1] = BIT_5 | BIT_4; nv->frame_payload_size = cpu_to_le16(2048); nv->special_options[1] = BIT_7; } else if (IS_QLA2200(ha)) { nv->firmware_options[0] = BIT_2 | BIT_1; nv->firmware_options[1] = BIT_7 | BIT_5; nv->add_firmware_options[0] = BIT_5; nv->add_firmware_options[1] = BIT_5 | BIT_4; nv->frame_payload_size = cpu_to_le16(1024); } else if (IS_QLA2100(ha)) { nv->firmware_options[0] = BIT_3 | BIT_1; nv->firmware_options[1] = BIT_5; nv->frame_payload_size = cpu_to_le16(1024); } nv->max_iocb_allocation = cpu_to_le16(256); nv->execution_throttle = cpu_to_le16(16); nv->retry_count = 8; nv->retry_delay = 1; nv->port_name[0] = 33; nv->port_name[3] = 224; nv->port_name[4] = 139; qla2xxx_nvram_wwn_from_ofw(vha, nv); nv->login_timeout = 4; /* * Set default host adapter parameters */ nv->host_p[1] = BIT_2; nv->reset_delay = 5; nv->port_down_retry_count = 8; nv->max_luns_per_target = cpu_to_le16(8); nv->link_down_timeout = 60; rval = 1; } /* Reset Initialization control block */ memset(icb, 0, ha->init_cb_size); /* * Setup driver NVRAM options. */ nv->firmware_options[0] |= (BIT_6 | BIT_1); nv->firmware_options[0] &= ~(BIT_5 | BIT_4); nv->firmware_options[1] |= (BIT_5 | BIT_0); nv->firmware_options[1] &= ~BIT_4; if (IS_QLA23XX(ha)) { nv->firmware_options[0] |= BIT_2; nv->firmware_options[0] &= ~BIT_3; nv->special_options[0] &= ~BIT_6; nv->add_firmware_options[1] |= BIT_5 | BIT_4; if (IS_QLA2300(ha)) { if (ha->fb_rev == FPM_2310) { strcpy(ha->model_number, "QLA2310"); } else { strcpy(ha->model_number, "QLA2300"); } } else { qla2x00_set_model_info(vha, nv->model_number, sizeof(nv->model_number), "QLA23xx"); } } else if (IS_QLA2200(ha)) { nv->firmware_options[0] |= BIT_2; /* * 'Point-to-point preferred, else loop' is not a safe * connection mode setting. */ if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == (BIT_5 | BIT_4)) { /* Force 'loop preferred, else point-to-point'. */ nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); nv->add_firmware_options[0] |= BIT_5; } strcpy(ha->model_number, "QLA22xx"); } else /*if (IS_QLA2100(ha))*/ { strcpy(ha->model_number, "QLA2100"); } /* * Copy over NVRAM RISC parameter block to initialization control block. */ dptr1 = (uint8_t *)icb; dptr2 = (uint8_t *)&nv->parameter_block_version; cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; while (cnt--) *dptr1++ = *dptr2++; /* Copy 2nd half. */ dptr1 = (uint8_t *)icb->add_firmware_options; cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; while (cnt--) *dptr1++ = *dptr2++; ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); /* Use alternate WWN? */ if (nv->host_p[1] & BIT_7) { memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); } /* Prepare nodename */ if ((icb->firmware_options[1] & BIT_6) == 0) { /* * Firmware will apply the following mask if the nodename was * not provided. */ memcpy(icb->node_name, icb->port_name, WWN_SIZE); icb->node_name[0] &= 0xF0; } /* * Set host adapter parameters. */ /* * BIT_7 in the host-parameters section allows for modification to * internal driver logging. */ if (nv->host_p[0] & BIT_7) ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); /* Always load RISC code on non ISP2[12]00 chips. */ if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) ha->flags.disable_risc_code_load = 0; ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; ha->flags.disable_serdes = 0; ha->operating_mode = (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; memcpy(ha->fw_seriallink_options, nv->seriallink_options, sizeof(ha->fw_seriallink_options)); /* save HBA serial number */ ha->serial0 = icb->port_name[5]; ha->serial1 = icb->port_name[6]; ha->serial2 = icb->port_name[7]; memcpy(vha->node_name, icb->node_name, WWN_SIZE); memcpy(vha->port_name, icb->port_name, WWN_SIZE); icb->execution_throttle = cpu_to_le16(0xFFFF); ha->retry_count = nv->retry_count; /* Set minimum login_timeout to 4 seconds. */ if (nv->login_timeout != ql2xlogintimeout) nv->login_timeout = ql2xlogintimeout; if (nv->login_timeout < 4) nv->login_timeout = 4; ha->login_timeout = nv->login_timeout; /* Set minimum RATOV to 100 tenths of a second. */ ha->r_a_tov = 100; ha->loop_reset_delay = nv->reset_delay; /* Link Down Timeout = 0: * * When Port Down timer expires we will start returning * I/O's to OS with "DID_NO_CONNECT". * * Link Down Timeout != 0: * * The driver waits for the link to come up after link down * before returning I/Os to OS with "DID_NO_CONNECT". */ if (nv->link_down_timeout == 0) { ha->loop_down_abort_time = (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); } else { ha->link_down_timeout = nv->link_down_timeout; ha->loop_down_abort_time = (LOOP_DOWN_TIME - ha->link_down_timeout); } /* * Need enough time to try and get the port back. */ ha->port_down_retry_count = nv->port_down_retry_count; if (qlport_down_retry) ha->port_down_retry_count = qlport_down_retry; /* Set login_retry_count */ ha->login_retry_count = nv->retry_count; if (ha->port_down_retry_count == nv->port_down_retry_count && ha->port_down_retry_count > 3) ha->login_retry_count = ha->port_down_retry_count; else if (ha->port_down_retry_count > (int)ha->login_retry_count) ha->login_retry_count = ha->port_down_retry_count; if (ql2xloginretrycount) ha->login_retry_count = ql2xloginretrycount; icb->lun_enables = cpu_to_le16(0); icb->command_resource_count = 0; icb->immediate_notify_resource_count = 0; icb->timeout = cpu_to_le16(0); if (IS_QLA2100(ha) || IS_QLA2200(ha)) { /* Enable RIO */ icb->firmware_options[0] &= ~BIT_3; icb->add_firmware_options[0] &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); icb->add_firmware_options[0] |= BIT_2; icb->response_accumulation_timer = 3; icb->interrupt_delay_timer = 5; vha->flags.process_response_queue = 1; } else { /* Enable ZIO. */ if (!vha->flags.init_done) { ha->zio_mode = icb->add_firmware_options[0] & (BIT_3 | BIT_2 | BIT_1 | BIT_0); ha->zio_timer = icb->interrupt_delay_timer ? icb->interrupt_delay_timer : 2; } icb->add_firmware_options[0] &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); vha->flags.process_response_queue = 0; if (ha->zio_mode != QLA_ZIO_DISABLED) { ha->zio_mode = QLA_ZIO_MODE_6; ql_log(ql_log_info, vha, 0x0068, "ZIO mode %d enabled; timer delay (%d us).\n", ha->zio_mode, ha->zio_timer * 100); icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; vha->flags.process_response_queue = 1; } } if (rval) { ql_log(ql_log_warn, vha, 0x0069, "NVRAM configuration failed.\n"); } return (rval); } void qla2x00_set_fcport_state(fc_port_t *fcport, int state) { int old_state; old_state = atomic_read(&fcport->state); atomic_set(&fcport->state, state); /* Don't print state transitions during initial allocation of fcport */ if (old_state && old_state != state) { ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n", fcport->port_name, port_state_str[old_state], port_state_str[state], fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); } } /** * qla2x00_alloc_fcport() - Allocate a generic fcport. * @vha: HA context * @flags: allocation flags * * Returns a pointer to the allocated fcport, or NULL, if none available. */ fc_port_t * qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) { fc_port_t *fcport; fcport = kzalloc(sizeof(fc_port_t), flags); if (!fcport) return NULL; fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, flags); if (!fcport->ct_desc.ct_sns) { ql_log(ql_log_warn, vha, 0xd049, "Failed to allocate ct_sns request.\n"); kfree(fcport); return NULL; } /* Setup fcport template structure. */ fcport->vha = vha; fcport->port_type = FCT_UNKNOWN; fcport->loop_id = FC_NO_LOOP_ID; qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); fcport->supported_classes = FC_COS_UNSPECIFIED; fcport->fp_speed = PORT_SPEED_UNKNOWN; fcport->disc_state = DSC_DELETED; fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; fcport->deleted = QLA_SESS_DELETED; fcport->login_retry = vha->hw->login_retry_count; fcport->chip_reset = vha->hw->base_qpair->chip_reset; fcport->logout_on_delete = 1; fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; fcport->tgt_short_link_down_cnt = 0; fcport->dev_loss_tmo = 0; if (!fcport->ct_desc.ct_sns) { ql_log(ql_log_warn, vha, 0xd049, "Failed to allocate ct_sns request.\n"); kfree(fcport); return NULL; } INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); INIT_WORK(&fcport->free_work, qlt_free_session_done); INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); INIT_LIST_HEAD(&fcport->gnl_entry); INIT_LIST_HEAD(&fcport->list); INIT_LIST_HEAD(&fcport->unsol_ctx_head); INIT_LIST_HEAD(&fcport->sess_cmd_list); spin_lock_init(&fcport->sess_cmd_lock); spin_lock_init(&fcport->edif.sa_list_lock); INIT_LIST_HEAD(&fcport->edif.tx_sa_list); INIT_LIST_HEAD(&fcport->edif.rx_sa_list); spin_lock_init(&fcport->edif.indx_list_lock); INIT_LIST_HEAD(&fcport->edif.edif_indx_list); return fcport; } void qla2x00_free_fcport(fc_port_t *fcport) { if (fcport->ct_desc.ct_sns) { dma_free_coherent(&fcport->vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns, fcport->ct_desc.ct_sns_dma); fcport->ct_desc.ct_sns = NULL; } qla_edif_flush_sa_ctl_lists(fcport); list_del(&fcport->list); qla2x00_clear_loop_id(fcport); qla_edif_list_del(fcport); kfree(fcport); } static void qla_get_login_template(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int rval; u32 *bp, sz; __be32 *q; memset(ha->init_cb, 0, ha->init_cb_size); sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size); rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, ha->init_cb, sz); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_init, vha, 0x00d1, "PLOGI ELS param read fail.\n"); return; } q = (__be32 *)&ha->plogi_els_payld.fl_csp; bp = (uint32_t *)ha->init_cb; cpu_to_be32_array(q, bp, sz / 4); ha->flags.plogi_template_valid = 1; } /* * qla2x00_configure_loop * Updates Fibre Channel Device Database with what is actually on loop. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success. * 1 = error. * 2 = database was full and device was not configured. */ static int qla2x00_configure_loop(scsi_qla_host_t *vha) { int rval; unsigned long flags, save_flags; struct qla_hw_data *ha = vha->hw; rval = QLA_SUCCESS; /* Get Initiator ID */ if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { rval = qla2x00_configure_hba(vha); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2013, "Unable to configure HBA.\n"); return (rval); } } save_flags = flags = vha->dpc_flags; ql_dbg(ql_dbg_disc, vha, 0x2014, "Configure loop -- dpc flags = 0x%lx.\n", flags); /* * If we have both an RSCN and PORT UPDATE pending then handle them * both at the same time. */ clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); clear_bit(RSCN_UPDATE, &vha->dpc_flags); qla2x00_get_data_rate(vha); qla_get_login_template(vha); /* Determine what we need to do */ if ((ha->current_topology == ISP_CFG_FL || ha->current_topology == ISP_CFG_F) && (test_bit(LOCAL_LOOP_UPDATE, &flags))) { set_bit(RSCN_UPDATE, &flags); clear_bit(LOCAL_LOOP_UPDATE, &flags); } else if (ha->current_topology == ISP_CFG_NL || ha->current_topology == ISP_CFG_N) { clear_bit(RSCN_UPDATE, &flags); set_bit(LOCAL_LOOP_UPDATE, &flags); } else if (!vha->flags.online || (test_bit(ABORT_ISP_ACTIVE, &flags))) { set_bit(RSCN_UPDATE, &flags); set_bit(LOCAL_LOOP_UPDATE, &flags); } if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { ql_dbg(ql_dbg_disc, vha, 0x2015, "Loop resync needed, failing.\n"); rval = QLA_FUNCTION_FAILED; } else rval = qla2x00_configure_local_loop(vha); } if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { if (LOOP_TRANSITION(vha)) { ql_dbg(ql_dbg_disc, vha, 0x2099, "Needs RSCN update and loop transition.\n"); rval = QLA_FUNCTION_FAILED; } else rval = qla2x00_configure_fabric(vha); } if (rval == QLA_SUCCESS) { if (atomic_read(&vha->loop_down_timer) || test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { rval = QLA_FUNCTION_FAILED; } else { atomic_set(&vha->loop_state, LOOP_READY); ql_dbg(ql_dbg_disc, vha, 0x2069, "LOOP READY.\n"); ha->flags.fw_init_done = 1; /* * use link up to wake up app to get ready for * authentication. */ if (ha->flags.edif_enabled && DBELL_INACTIVE(vha)) qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); /* * Process any ATIO queue entries that came in * while we weren't online. */ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { spin_lock_irqsave(&ha->tgt.atio_lock, flags); qlt_24xx_process_atio_queue(vha, 0); spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); } } } if (rval) { ql_dbg(ql_dbg_disc, vha, 0x206a, "%s *** FAILED ***.\n", __func__); } else { ql_dbg(ql_dbg_disc, vha, 0x206b, "%s: exiting normally. local port wwpn %8phN id %06x)\n", __func__, vha->port_name, vha->d_id.b24); } /* Restore state if a resync event occurred during processing */ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); if (test_bit(RSCN_UPDATE, &save_flags)) { set_bit(RSCN_UPDATE, &vha->dpc_flags); } } return (rval); } static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha) { unsigned long flags; fc_port_t *fcport; ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__); if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->n2n_flag) { qla24xx_fcport_handle_login(vha, fcport); return QLA_SUCCESS; } } spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_retry++; spin_unlock_irqrestore(&vha->work_lock, flags); if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } return QLA_FUNCTION_FAILED; } static void qla_reinitialize_link(scsi_qla_host_t *vha) { int rval; atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); rval = qla2x00_full_login_lip(vha); if (rval == QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n"); } else { ql_dbg(ql_dbg_disc, vha, 0xd051, "Link reinitialization failed (%d)\n", rval); } } /* * qla2x00_configure_local_loop * Updates Fibre Channel Device Database with local loop devices. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success. */ static int qla2x00_configure_local_loop(scsi_qla_host_t *vha) { int rval, rval2; int found; fc_port_t *fcport, *new_fcport; uint16_t index; uint16_t entries; struct gid_list_info *gid; uint16_t loop_id; uint8_t domain, area, al_pa; struct qla_hw_data *ha = vha->hw; unsigned long flags; /* Inititae N2N login. */ if (N2N_TOPO(ha)) return qla2x00_configure_n2n_loop(vha); new_fcport = NULL; entries = MAX_FIBRE_DEVICES_LOOP; /* Get list of logged in devices. */ memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, &entries); if (rval != QLA_SUCCESS) goto err; ql_dbg(ql_dbg_disc, vha, 0x2011, "Entries in ID list (%d).\n", entries); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, ha->gid_list, entries * sizeof(*ha->gid_list)); if (entries == 0) { spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_retry++; spin_unlock_irqrestore(&vha->work_lock, flags); if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { u8 loop_map_entries = 0; int rc; rc = qla2x00_get_fcal_position_map(vha, NULL, &loop_map_entries); if (rc == QLA_SUCCESS && loop_map_entries > 1) { /* * There are devices that are still not logged * in. Reinitialize to give them a chance. */ qla_reinitialize_link(vha); return QLA_FUNCTION_FAILED; } set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } } else { vha->scan.scan_retry = 0; } list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->scan_state = QLA_FCPORT_SCAN; } /* Allocate temporary fcport for any new fcports discovered. */ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { ql_log(ql_log_warn, vha, 0x2012, "Memory allocation failed for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; goto err; } new_fcport->flags &= ~FCF_FABRIC_DEVICE; /* Add devices to port list. */ gid = ha->gid_list; for (index = 0; index < entries; index++) { domain = gid->domain; area = gid->area; al_pa = gid->al_pa; if (IS_QLA2100(ha) || IS_QLA2200(ha)) loop_id = gid->loop_id_2100; else loop_id = le16_to_cpu(gid->loop_id); gid = (void *)gid + ha->gid_list_info_size; /* Bypass reserved domain fields. */ if ((domain & 0xf0) == 0xf0) continue; /* Bypass if not same domain and area of adapter. */ if (area && domain && ((area != vha->d_id.b.area) || (domain != vha->d_id.b.domain)) && (ha->current_topology == ISP_CFG_NL)) continue; /* Bypass invalid local loop ID. */ if (loop_id > LAST_LOCAL_LOOP_ID) continue; memset(new_fcport->port_name, 0, WWN_SIZE); /* Fill in member data. */ new_fcport->d_id.b.domain = domain; new_fcport->d_id.b.area = area; new_fcport->d_id.b.al_pa = al_pa; new_fcport->loop_id = loop_id; new_fcport->scan_state = QLA_FCPORT_FOUND; rval2 = qla2x00_get_port_database(vha, new_fcport, 0); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2097, "Failed to retrieve fcport information " "-- get_port_database=%x, loop_id=0x%04x.\n", rval2, new_fcport->loop_id); /* Skip retry if N2N */ if (ha->current_topology != ISP_CFG_N) { ql_dbg(ql_dbg_disc, vha, 0x2105, "Scheduling resync.\n"); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); continue; } } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* Check for matching device in port list. */ found = 0; fcport = NULL; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (memcmp(new_fcport->port_name, fcport->port_name, WWN_SIZE)) continue; fcport->flags &= ~FCF_FABRIC_DEVICE; fcport->loop_id = new_fcport->loop_id; fcport->port_type = new_fcport->port_type; fcport->d_id.b24 = new_fcport->d_id.b24; memcpy(fcport->node_name, new_fcport->node_name, WWN_SIZE); fcport->scan_state = QLA_FCPORT_FOUND; if (fcport->login_retry == 0) { fcport->login_retry = vha->hw->login_retry_count; ql_dbg(ql_dbg_disc, vha, 0x2135, "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", fcport->port_name, fcport->loop_id, fcport->login_retry); } found++; break; } if (!found) { /* New device, add to fcports list. */ list_add_tail(&new_fcport->list, &vha->vp_fcports); /* Allocate a new replacement fcport. */ fcport = new_fcport; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { ql_log(ql_log_warn, vha, 0xd031, "Failed to allocate memory for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; goto err; } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); new_fcport->flags &= ~FCF_FABRIC_DEVICE; } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); /* Base iIDMA settings on HBA port speed. */ fcport->fp_speed = ha->link_data_rate; } list_for_each_entry(fcport, &vha->vp_fcports, list) { if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; if (fcport->scan_state == QLA_FCPORT_SCAN) { if ((qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) { qla2x00_mark_device_lost(vha, fcport, ql2xplogiabsentdevice); if (fcport->loop_id != FC_NO_LOOP_ID && (fcport->flags & FCF_FCP2_DEVICE) == 0 && fcport->port_type != FCT_INITIATOR && fcport->port_type != FCT_BROADCAST) { ql_dbg(ql_dbg_disc, vha, 0x20f0, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); qlt_schedule_sess_for_deletion(fcport); continue; } } } if (fcport->scan_state == QLA_FCPORT_FOUND) qla24xx_fcport_handle_login(vha, fcport); } qla2x00_free_fcport(new_fcport); return rval; err: ql_dbg(ql_dbg_disc, vha, 0x2098, "Configure local loop error exit: rval=%x.\n", rval); return rval; } static void qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { int rval; uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; if (!IS_IIDMA_CAPABLE(ha)) return; if (atomic_read(&fcport->state) != FCS_ONLINE) return; if (fcport->fp_speed == PORT_SPEED_UNKNOWN || fcport->fp_speed > ha->link_data_rate || !ha->flags.gpsc_supported) return; rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, mb); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2004, "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]); } else { ql_dbg(ql_dbg_disc, vha, 0x2005, "iIDMA adjusted to %s GB/s (%X) on %8phN.\n", qla2x00_get_link_speed_str(ha, fcport->fp_speed), fcport->fp_speed, fcport->port_name); } } void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) { qla2x00_iidma_fcport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); } int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA); if (!e) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; return qla2x00_post_work(vha, e); } /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ static void qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) { struct fc_rport_identifiers rport_ids; struct fc_rport *rport; unsigned long flags; if (atomic_read(&fcport->state) == FCS_ONLINE) return; rport_ids.node_name = wwn_to_u64(fcport->node_name); rport_ids.port_name = wwn_to_u64(fcport->port_name); rport_ids.port_id = fcport->d_id.b.domain << 16 | fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); if (!rport) { ql_log(ql_log_warn, vha, 0x2006, "Unable to allocate fc remote port.\n"); return; } spin_lock_irqsave(fcport->vha->host->host_lock, flags); *((fc_port_t **)rport->dd_data) = fcport; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); fcport->dev_loss_tmo = rport->dev_loss_tmo; rport->supported_classes = fcport->supported_classes; rport_ids.roles = FC_PORT_ROLE_UNKNOWN; if (fcport->port_type == FCT_INITIATOR) rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; if (fcport->port_type == FCT_TARGET) rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; if (fcport->port_type & FCT_NVME_INITIATOR) rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; if (fcport->port_type & FCT_NVME_TARGET) rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; if (fcport->port_type & FCT_NVME_DISCOVERY) rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; fc_remote_port_rolechg(rport, rport_ids.roles); ql_dbg(ql_dbg_disc, vha, 0x20ee, "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n", __func__, fcport->port_name, vha->host_no, rport->scsi_target_id, rport, (fcport->port_type == FCT_TARGET) ? "tgt" : ((fcport->port_type & FCT_NVME) ? "nvme" : "ini")); } /* * qla2x00_update_fcport * Updates device on list. * * Input: * ha = adapter block pointer. * fcport = port structure pointer. * * Return: * 0 - Success * BIT_0 - error * * Context: * Kernel context. */ void qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { unsigned long flags; if (IS_SW_RESV_ADDR(fcport->d_id)) return; ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", __func__, fcport->port_name); qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); fcport->login_retry = vha->hw->login_retry_count; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); spin_lock_irqsave(&vha->work_lock, flags); fcport->deleted = 0; spin_unlock_irqrestore(&vha->work_lock, flags); if (vha->hw->current_topology == ISP_CFG_NL) fcport->logout_on_delete = 0; else fcport->logout_on_delete = 1; fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0; if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) { fcport->tgt_short_link_down_cnt++; fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; } switch (vha->hw->current_topology) { case ISP_CFG_N: case ISP_CFG_NL: fcport->keep_nport_handle = 1; break; default: break; } qla2x00_iidma_fcport(vha, fcport); qla2x00_dfs_create_rport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); switch (vha->host->active_mode) { case MODE_INITIATOR: qla2x00_reg_remote_port(vha, fcport); break; case MODE_TARGET: if (!vha->vha_tgt.qla_tgt->tgt_stop && !vha->vha_tgt.qla_tgt->tgt_stopped) qlt_fc_port_added(vha, fcport); break; case MODE_DUAL: qla2x00_reg_remote_port(vha, fcport); if (!vha->vha_tgt.qla_tgt->tgt_stop && !vha->vha_tgt.qla_tgt->tgt_stopped) qlt_fc_port_added(vha, fcport); break; default: break; } if (NVME_TARGET(vha->hw, fcport)) qla_nvme_register_remote(vha, fcport); qla2x00_set_fcport_state(fcport, FCS_ONLINE); if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { if (fcport->id_changed) { fcport->id_changed = 0; ql_dbg(ql_dbg_disc, vha, 0x20d7, "%s %d %8phC post gfpnid fcp_cnt %d\n", __func__, __LINE__, fcport->port_name, vha->fcport_count); qla24xx_post_gfpnid_work(vha, fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20d7, "%s %d %8phC post gpsc fcp_cnt %d\n", __func__, __LINE__, fcport->port_name, vha->fcport_count); qla24xx_post_gpsc_work(vha, fcport); } } qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); } void qla_register_fcport_fn(struct work_struct *work) { fc_port_t *fcport = container_of(work, struct fc_port, reg_work); u32 rscn_gen = fcport->rscn_gen; u16 data[2]; if (IS_SW_RESV_ADDR(fcport->d_id)) return; qla2x00_update_fcport(fcport->vha, fcport); ql_dbg(ql_dbg_disc, fcport->vha, 0x911e, "%s rscn gen %d/%d next DS %d\n", __func__, rscn_gen, fcport->rscn_gen, fcport->next_disc_state); if (rscn_gen != fcport->rscn_gen) { /* RSCN(s) came in while registration */ switch (fcport->next_disc_state) { case DSC_DELETE_PEND: qlt_schedule_sess_for_deletion(fcport); break; case DSC_ADISC: data[0] = data[1] = 0; qla2x00_post_async_adisc_work(fcport->vha, fcport, data); break; default: break; } } } /* * qla2x00_configure_fabric * Setup SNS devices with loop ID's. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success. * BIT_0 = error */ static int qla2x00_configure_fabric(scsi_qla_host_t *vha) { int rval; fc_port_t *fcport; uint16_t mb[MAILBOX_REGISTER_COUNT]; uint16_t loop_id; struct qla_hw_data *ha = vha->hw; int discovery_gen; /* If FL port exists, then SNS is present */ if (IS_FWI2_CAPABLE(ha)) loop_id = NPH_F_PORT; else loop_id = SNS_FL_PORT; rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x20a0, "MBX_GET_PORT_NAME failed, No FL Port.\n"); vha->device_flags &= ~SWITCH_FOUND; return (QLA_SUCCESS); } vha->device_flags |= SWITCH_FOUND; rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0); if (rval != QLA_SUCCESS) ql_dbg(ql_dbg_disc, vha, 0x20ff, "Failed to get Fabric Port Name\n"); if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { rval = qla2x00_send_change_request(vha, 0x3, 0); if (rval != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x121, "Failed to enable receiving of RSCN requests: 0x%x.\n", rval); } do { qla2x00_mgmt_svr_login(vha); /* Ensure we are logged into the SNS. */ loop_id = NPH_SNS_LID(ha); rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 0xfc, mb, BIT_1|BIT_0); if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_disc, vha, 0x20a1, "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n", loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return rval; } /* FDMI support. */ if (ql2xfdmienable && test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) qla2x00_fdmi_register(vha); if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { if (qla2x00_rft_id(vha)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x20a2, "Register FC-4 TYPE failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; } if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x209a, "Register FC-4 Features failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; } if (vha->flags.nvme_enabled) { if (qla2x00_rff_id(vha, FC_TYPE_NVME)) { ql_dbg(ql_dbg_disc, vha, 0x2049, "Register NVME FC Type Features failed.\n"); } } if (qla2x00_rnn_id(vha)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x2104, "Register Node Name failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; } else if (qla2x00_rsnn_nn(vha)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x209b, "Register Symbolic Node Name failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; } } /* Mark the time right before querying FW for connected ports. * This process is long, asynchronous and by the time it's done, * collected information might not be accurate anymore. E.g. * disconnected port might have re-connected and a brand new * session has been created. In this case session's generation * will be newer than discovery_gen. */ qlt_do_generation_tick(vha, &discovery_gen); if (USE_ASYNC_SCAN(ha)) { rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI, NULL); if (rval) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } else { list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->scan_state = QLA_FCPORT_SCAN; rval = qla2x00_find_all_fabric_devs(vha); } if (rval != QLA_SUCCESS) break; } while (0); if (!vha->nvme_local_port && vha->flags.nvme_enabled) qla_nvme_register_hba(vha); if (rval) ql_dbg(ql_dbg_disc, vha, 0x2068, "Configure fabric error exit rval=%d.\n", rval); return (rval); } /* * qla2x00_find_all_fabric_devs * * Input: * ha = adapter block pointer. * dev = database device entry pointer. * * Returns: * 0 = success. * * Context: * Kernel context. */ static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) { int rval; uint16_t loop_id; fc_port_t *fcport, *new_fcport; int found; sw_info_t *swl; int swl_idx; int first_dev, last_dev; port_id_t wrap = {}, nxt_d_id; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); unsigned long flags; rval = QLA_SUCCESS; /* Try GID_PT to get device list, else GAN. */ if (!ha->swl) ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), GFP_KERNEL); swl = ha->swl; if (!swl) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x209c, "GID_PT allocations failed, fallback on GA_NXT.\n"); } else { memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { swl = NULL; if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) return rval; } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { swl = NULL; if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) return rval; } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { swl = NULL; if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) return rval; } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { swl = NULL; if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) return rval; } /* If other queries succeeded probe for FC-4 type */ if (swl) { qla2x00_gff_id(vha, swl); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) return rval; } } swl_idx = 0; /* Allocate temporary fcport for any new fcports discovered. */ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { ql_log(ql_log_warn, vha, 0x209d, "Failed to allocate memory for fcport.\n"); return (QLA_MEMORY_ALLOC_FAILED); } new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); /* Set start port ID scan at adapter ID. */ first_dev = 1; last_dev = 0; /* Starting free loop ID. */ loop_id = ha->min_external_loopid; for (; loop_id <= ha->max_loop_id; loop_id++) { if (qla2x00_is_reserved_id(vha, loop_id)) continue; if (ha->current_topology == ISP_CFG_FL && (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))) { atomic_set(&vha->loop_down_timer, 0); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); break; } if (swl != NULL) { if (last_dev) { wrap.b24 = new_fcport->d_id.b24; } else { new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; memcpy(new_fcport->node_name, swl[swl_idx].node_name, WWN_SIZE); memcpy(new_fcport->port_name, swl[swl_idx].port_name, WWN_SIZE); memcpy(new_fcport->fabric_port_name, swl[swl_idx].fabric_port_name, WWN_SIZE); new_fcport->fp_speed = swl[swl_idx].fp_speed; new_fcport->fc4_type = swl[swl_idx].fc4_type; new_fcport->nvme_flag = 0; if (vha->flags.nvme_enabled && swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) { ql_log(ql_log_info, vha, 0x2131, "FOUND: NVME port %8phC as FC Type 28h\n", new_fcport->port_name); } if (swl[swl_idx].d_id.b.rsvd_1 != 0) { last_dev = 1; } swl_idx++; } } else { /* Send GA_NXT to the switch */ rval = qla2x00_ga_nxt(vha, new_fcport); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x209e, "SNS scan failed -- assuming " "zero-entry result.\n"); rval = QLA_SUCCESS; break; } } /* If wrap on switch device list, exit. */ if (first_dev) { wrap.b24 = new_fcport->d_id.b24; first_dev = 0; } else if (new_fcport->d_id.b24 == wrap.b24) { ql_dbg(ql_dbg_disc, vha, 0x209f, "Device wrap (%02x%02x%02x).\n", new_fcport->d_id.b.domain, new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa); break; } /* Bypass if same physical adapter. */ if (new_fcport->d_id.b24 == base_vha->d_id.b24) continue; /* Bypass virtual ports of the same host. */ if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24)) continue; /* Bypass if same domain and area of adapter. */ if (((new_fcport->d_id.b24 & 0xffff00) == (vha->d_id.b24 & 0xffff00)) && ha->current_topology == ISP_CFG_FL) continue; /* Bypass reserved domain fields. */ if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) continue; /* Bypass ports whose FCP-4 type is not FCP_SCSI */ if (ql2xgffidenable && (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) && new_fcport->fc4_type != 0)) continue; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* Locate matching device in database. */ found = 0; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (memcmp(new_fcport->port_name, fcport->port_name, WWN_SIZE)) continue; fcport->scan_state = QLA_FCPORT_FOUND; found++; /* Update port state. */ memcpy(fcport->fabric_port_name, new_fcport->fabric_port_name, WWN_SIZE); fcport->fp_speed = new_fcport->fp_speed; /* * If address the same and state FCS_ONLINE * (or in target mode), nothing changed. */ if (fcport->d_id.b24 == new_fcport->d_id.b24 && (atomic_read(&fcport->state) == FCS_ONLINE || (vha->host->active_mode == MODE_TARGET))) { break; } if (fcport->login_retry == 0) fcport->login_retry = vha->hw->login_retry_count; /* * If device was not a fabric device before. */ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { fcport->d_id.b24 = new_fcport->d_id.b24; qla2x00_clear_loop_id(fcport); fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); break; } /* * Port ID changed or device was marked to be updated; * Log it out if still logged in and mark it for * relogin later. */ if (qla_tgt_mode_enabled(base_vha)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, "port changed FC ID, %8phC" " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", fcport->port_name, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->loop_id, new_fcport->d_id.b.domain, new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa); fcport->d_id.b24 = new_fcport->d_id.b24; break; } fcport->d_id.b24 = new_fcport->d_id.b24; fcport->flags |= FCF_LOGIN_NEEDED; break; } if (found && NVME_TARGET(vha->hw, fcport)) { if (fcport->disc_state == DSC_DELETE_PEND) { qla2x00_set_fcport_disc_state(fcport, DSC_GNL); vha->fcport_count--; fcport->login_succ = 0; } } if (found) { spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); continue; } /* If device was not in our fcports list, then add it. */ new_fcport->scan_state = QLA_FCPORT_FOUND; list_add_tail(&new_fcport->list, &vha->vp_fcports); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); /* Allocate a new replacement fcport. */ nxt_d_id.b24 = new_fcport->d_id.b24; new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { ql_log(ql_log_warn, vha, 0xd032, "Memory allocation failed for fcport.\n"); return (QLA_MEMORY_ALLOC_FAILED); } new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); new_fcport->d_id.b24 = nxt_d_id.b24; } qla2x00_free_fcport(new_fcport); /* * Logout all previous fabric dev marked lost, except FCP2 devices. */ list_for_each_entry(fcport, &vha->vp_fcports, list) { if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) continue; if (fcport->scan_state == QLA_FCPORT_SCAN) { if ((qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) { qla2x00_mark_device_lost(vha, fcport, ql2xplogiabsentdevice); if (fcport->loop_id != FC_NO_LOOP_ID && (fcport->flags & FCF_FCP2_DEVICE) == 0 && fcport->port_type != FCT_INITIATOR && fcport->port_type != FCT_BROADCAST) { ql_dbg(ql_dbg_disc, vha, 0x20f0, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); qlt_schedule_sess_for_deletion(fcport); continue; } } } if (fcport->scan_state == QLA_FCPORT_FOUND && (fcport->flags & FCF_LOGIN_NEEDED) != 0) qla24xx_fcport_handle_login(vha, fcport); } return (rval); } /* FW does not set aside Loop id for MGMT Server/FFFFFAh */ int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha) { int loop_id = FC_NO_LOOP_ID; int lid = NPH_MGMT_SERVER - vha->vp_idx; unsigned long flags; struct qla_hw_data *ha = vha->hw; if (vha->vp_idx == 0) { set_bit(NPH_MGMT_SERVER, ha->loop_id_map); return NPH_MGMT_SERVER; } /* pick id from high and work down to low */ spin_lock_irqsave(&ha->vport_slock, flags); for (; lid > 0; lid--) { if (!test_bit(lid, vha->hw->loop_id_map)) { set_bit(lid, vha->hw->loop_id_map); loop_id = lid; break; } } spin_unlock_irqrestore(&ha->vport_slock, flags); return loop_id; } /* * qla2x00_fabric_login * Issue fabric login command. * * Input: * ha = adapter block pointer. * device = pointer to FC device type structure. * * Returns: * 0 - Login successfully * 1 - Login failed * 2 - Initiator device * 3 - Fatal error */ int qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t *next_loopid) { int rval; int retry; uint16_t tmp_loopid; uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; retry = 0; tmp_loopid = 0; for (;;) { ql_dbg(ql_dbg_disc, vha, 0x2000, "Trying Fabric Login w/loop id 0x%04x for port " "%02x%02x%02x.\n", fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); /* Login fcport on switch. */ rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, mb, BIT_0); if (rval != QLA_SUCCESS) { return rval; } if (mb[0] == MBS_PORT_ID_USED) { /* * Device has another loop ID. The firmware team * recommends the driver perform an implicit login with * the specified ID again. The ID we just used is save * here so we return with an ID that can be tried by * the next login. */ retry++; tmp_loopid = fcport->loop_id; fcport->loop_id = mb[1]; ql_dbg(ql_dbg_disc, vha, 0x2001, "Fabric Login: port in use - next loop " "id=0x%04x, port id= %02x%02x%02x.\n", fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); } else if (mb[0] == MBS_COMMAND_COMPLETE) { /* * Login succeeded. */ if (retry) { /* A retry occurred before. */ *next_loopid = tmp_loopid; } else { /* * No retry occurred before. Just increment the * ID value for next login. */ *next_loopid = (fcport->loop_id + 1); } if (mb[1] & BIT_0) { fcport->port_type = FCT_INITIATOR; } else { fcport->port_type = FCT_TARGET; if (mb[1] & BIT_1) { fcport->flags |= FCF_FCP2_DEVICE; } } if (mb[10] & BIT_0) fcport->supported_classes |= FC_COS_CLASS2; if (mb[10] & BIT_1) fcport->supported_classes |= FC_COS_CLASS3; if (IS_FWI2_CAPABLE(ha)) { if (mb[10] & BIT_7) fcport->flags |= FCF_CONF_COMP_SUPPORTED; } rval = QLA_SUCCESS; break; } else if (mb[0] == MBS_LOOP_ID_USED) { /* * Loop ID already used, try next loop ID. */ fcport->loop_id++; rval = qla2x00_find_new_loop_id(vha, fcport); if (rval != QLA_SUCCESS) { /* Ran out of loop IDs to use */ break; } } else if (mb[0] == MBS_COMMAND_ERROR) { /* * Firmware possibly timed out during login. If NO * retries are left to do then the device is declared * dead. */ *next_loopid = fcport->loop_id; ha->isp_ops->fabric_logout(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); qla2x00_mark_device_lost(vha, fcport, 1); rval = 1; break; } else { /* * unrecoverable / not handled error */ ql_dbg(ql_dbg_disc, vha, 0x2002, "Failed=%x port_id=%02x%02x%02x loop_id=%x " "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->loop_id, jiffies); *next_loopid = fcport->loop_id; ha->isp_ops->fabric_logout(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); qla2x00_clear_loop_id(fcport); fcport->login_retry = 0; rval = 3; break; } } return (rval); } /* * qla2x00_local_device_login * Issue local device login command. * * Input: * ha = adapter block pointer. * loop_id = loop id of device to login to. * * Returns (Where's the #define!!!!): * 0 - Login successfully * 1 - Login failed * 3 - Fatal error */ int qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) { int rval; uint16_t mb[MAILBOX_REGISTER_COUNT]; memset(mb, 0, sizeof(mb)); rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); if (rval == QLA_SUCCESS) { /* Interrogate mailbox registers for any errors */ if (mb[0] == MBS_COMMAND_ERROR) rval = 1; else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) /* device not in PCB table */ rval = 3; } return (rval); } /* * qla2x00_loop_resync * Resync with fibre channel devices. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qla2x00_loop_resync(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; uint32_t wait_time; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); if (vha->flags.online) { if (!(rval = qla2x00_fw_ready(vha))) { /* Wait at most MAX_TARGET RSCNs for a stable link. */ wait_time = 256; do { if (!IS_QLAFX00(vha->hw)) { /* * Issue a marker after FW becomes * ready. */ qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, MK_SYNC_ALL); vha->marker_needed = 0; } /* Remap devices on Loop. */ clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); if (IS_QLAFX00(vha->hw)) qlafx00_configure_devices(vha); else qla2x00_configure_loop(vha); wait_time--; } while (!atomic_read(&vha->loop_down_timer) && !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) && wait_time && (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))); } } if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) return (QLA_FUNCTION_FAILED); if (rval) ql_dbg(ql_dbg_disc, vha, 0x206c, "%s *** FAILED ***.\n", __func__); return (rval); } /* * qla2x00_perform_loop_resync * Description: This function will set the appropriate flags and call * qla2x00_loop_resync. If successful loop will be resynced * Arguments : scsi_qla_host_t pointer * returm : Success or Failure */ int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) { int32_t rval = 0; if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { /*Configure the flags so that resync happens properly*/ atomic_set(&ha->loop_down_timer, 0); if (!(ha->device_flags & DFLG_NO_CABLE)) { atomic_set(&ha->loop_state, LOOP_UP); set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); rval = qla2x00_loop_resync(ha); } else atomic_set(&ha->loop_state, LOOP_DEAD); clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); } return rval; } /* Assumes idc_lock always held on entry */ void qla83xx_reset_ownership(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t drv_presence, drv_presence_mask; uint32_t dev_part_info1, dev_part_info2, class_type; uint32_t class_type_mask = 0x3; uint16_t fcoe_other_function = 0xffff, i; if (IS_QLA8044(ha)) { drv_presence = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); dev_part_info1 = qla8044_rd_direct(vha, QLA8044_CRB_DEV_PART_INFO_INDEX); dev_part_info2 = qla8044_rd_direct(vha, QLA8044_CRB_DEV_PART_INFO2); } else { qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); } for (i = 0; i < 8; i++) { class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && (i != ha->portnum)) { fcoe_other_function = i; break; } } if (fcoe_other_function == 0xffff) { for (i = 0; i < 8; i++) { class_type = ((dev_part_info2 >> (i * 4)) & class_type_mask); if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && ((i + 8) != ha->portnum)) { fcoe_other_function = i + 8; break; } } } /* * Prepare drv-presence mask based on fcoe functions present. * However consider only valid physical fcoe function numbers (0-15). */ drv_presence_mask = ~((1 << (ha->portnum)) | ((fcoe_other_function == 0xffff) ? 0 : (1 << (fcoe_other_function)))); /* We are the reset owner iff: * - No other protocol drivers present. * - This is the lowest among fcoe functions. */ if (!(drv_presence & drv_presence_mask) && (ha->portnum < fcoe_other_function)) { ql_dbg(ql_dbg_p3p, vha, 0xb07f, "This host is Reset owner.\n"); ha->flags.nic_core_reset_owner = 1; } } static int __qla83xx_set_drv_ack(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; uint32_t drv_ack; rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); if (rval == QLA_SUCCESS) { drv_ack |= (1 << ha->portnum); rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); } return rval; } static int __qla83xx_clear_drv_ack(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; uint32_t drv_ack; rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); if (rval == QLA_SUCCESS) { drv_ack &= ~(1 << ha->portnum); rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); } return rval; } /* Assumes idc-lock always held on entry */ void qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type) { struct qla_hw_data *ha = vha->hw; uint32_t idc_audit_reg = 0, duration_secs = 0; switch (audit_type) { case IDC_AUDIT_TIMESTAMP: ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); idc_audit_reg = (ha->portnum) | (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); break; case IDC_AUDIT_COMPLETION: duration_secs = ((jiffies_to_msecs(jiffies) - jiffies_to_msecs(ha->idc_audit_ts)) / 1000); idc_audit_reg = (ha->portnum) | (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8); qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); break; default: ql_log(ql_log_warn, vha, 0xb078, "Invalid audit type specified.\n"); break; } } /* Assumes idc_lock always held on entry */ static int qla83xx_initiating_reset(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t idc_control, dev_state; __qla83xx_get_idc_control(vha, &idc_control); if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) { ql_log(ql_log_info, vha, 0xb080, "NIC Core reset has been disabled. idc-control=0x%x\n", idc_control); return QLA_FUNCTION_FAILED; } /* Set NEED-RESET iff in READY state and we are the reset-owner */ qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_NEED_RESET); ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n"); qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); } else { ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", qdev_state(dev_state)); /* SV: XXX: Is timeout required here? */ /* Wait for IDC state change READY -> NEED_RESET */ while (dev_state == QLA8XXX_DEV_READY) { qla83xx_idc_unlock(vha, 0); msleep(200); qla83xx_idc_lock(vha, 0); qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); } } /* Send IDC ack by writing to drv-ack register */ __qla83xx_set_drv_ack(vha); return QLA_SUCCESS; } int __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control) { return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control); } int __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control) { return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control); } static int qla83xx_check_driver_presence(scsi_qla_host_t *vha) { uint32_t drv_presence = 0; struct qla_hw_data *ha = vha->hw; qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); if (drv_presence & (1 << ha->portnum)) return QLA_SUCCESS; else return QLA_TEST_FAILED; } int qla83xx_nic_core_reset(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_p3p, vha, 0xb058, "Entered %s().\n", __func__); if (vha->device_flags & DFLG_DEV_FAILED) { ql_log(ql_log_warn, vha, 0xb059, "Device in unrecoverable FAILED state.\n"); return QLA_FUNCTION_FAILED; } qla83xx_idc_lock(vha, 0); if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xb05a, "Function=0x%x has been removed from IDC participation.\n", ha->portnum); rval = QLA_FUNCTION_FAILED; goto exit; } qla83xx_reset_ownership(vha); rval = qla83xx_initiating_reset(vha); /* * Perform reset if we are the reset-owner, * else wait till IDC state changes to READY/FAILED. */ if (rval == QLA_SUCCESS) { rval = qla83xx_idc_state_handler(vha); if (rval == QLA_SUCCESS) ha->flags.nic_core_hung = 0; __qla83xx_clear_drv_ack(vha); } exit: qla83xx_idc_unlock(vha, 0); ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__); return rval; } int qla2xxx_mctp_dump(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int rval = QLA_FUNCTION_FAILED; if (!IS_MCTP_CAPABLE(ha)) { /* This message can be removed from the final version */ ql_log(ql_log_info, vha, 0x506d, "This board is not MCTP capable\n"); return rval; } if (!ha->mctp_dump) { ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); if (!ha->mctp_dump) { ql_log(ql_log_warn, vha, 0x506e, "Failed to allocate memory for mctp dump\n"); return rval; } } #define MCTP_DUMP_STR_ADDR 0x00000000 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x506f, "Failed to capture mctp dump\n"); } else { ql_log(ql_log_info, vha, 0x5070, "Mctp dump capture for host (%ld/%p).\n", vha->host_no, ha->mctp_dump); ha->mctp_dumped = 1; } if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { ha->flags.nic_core_reset_hdlr_active = 1; rval = qla83xx_restart_nic_firmware(vha); if (rval) /* NIC Core reset failed. */ ql_log(ql_log_warn, vha, 0x5071, "Failed to restart nic firmware\n"); else ql_dbg(ql_dbg_p3p, vha, 0xb084, "Restarted NIC firmware successfully.\n"); ha->flags.nic_core_reset_hdlr_active = 0; } return rval; } /* * qla2x00_quiesce_io * Description: This function will block the new I/Os * Its not aborting any I/Os as context * is not destroyed during quiescence * Arguments: scsi_qla_host_t * return : void */ void qla2x00_quiesce_io(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp, *tvp; unsigned long flags; ql_dbg(ql_dbg_dpc, vha, 0x401d, "Quiescing I/O - ha=%p.\n", ha); atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); qla2x00_mark_all_devices_lost(vha); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_mark_all_devices_lost(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } spin_unlock_irqrestore(&ha->vport_slock, flags); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); } /* Wait for pending cmds to complete */ WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != QLA_SUCCESS); } void qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp, *tvp; unsigned long flags; fc_port_t *fcport; u16 i; /* For ISP82XX, driver waits for completion of the commands. * online flag should be set. */ if (!(IS_P3P_TYPE(ha))) vha->flags.online = 0; ha->flags.chip_reset_done = 0; clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); vha->qla_stats.total_isp_aborts++; ql_log(ql_log_info, vha, 0x00af, "Performing ISP error recovery - ha=%p.\n", ha); ha->flags.purge_mbox = 1; /* For ISP82XX, reset_chip is just disabling interrupts. * Driver waits for the completion of the commands. * the interrupts need to be enabled. */ if (!(IS_P3P_TYPE(ha))) ha->isp_ops->reset_chip(vha); ha->link_data_rate = PORT_SPEED_UNKNOWN; SAVE_TOPO(ha); ha->flags.rida_fmt2 = 0; ha->flags.n2n_ae = 0; ha->flags.lip_ae = 0; ha->current_topology = 0; QLA_FW_STOPPED(ha); ha->flags.fw_init_done = 0; ha->chip_reset++; ha->base_qpair->chip_reset = ha->chip_reset; ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0; ha->base_qpair->prev_completion_cnt = 0; for (i = 0; i < ha->max_qpairs; i++) { if (ha->queue_pair_map[i]) { ha->queue_pair_map[i]->chip_reset = ha->base_qpair->chip_reset; ha->queue_pair_map[i]->cmd_cnt = ha->queue_pair_map[i]->cmd_completion_cnt = 0; ha->base_qpair->prev_completion_cnt = 0; } } /* purge MBox commands */ spin_lock_irqsave(&ha->hardware_lock, flags); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) { clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); complete(&ha->mbx_intr_comp); } spin_unlock_irqrestore(&ha->hardware_lock, flags); i = 0; while (atomic_read(&ha->num_pend_mbx_stage2) || atomic_read(&ha->num_pend_mbx_stage1)) { msleep(20); i++; if (i > 50) break; } ha->flags.purge_mbox = 0; atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); qla2x00_mark_all_devices_lost(vha); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_mark_all_devices_lost(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } spin_unlock_irqrestore(&ha->vport_slock, flags); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); } /* Clear all async request states across all VPs. */ list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); fcport->scan_state = 0; } spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); list_for_each_entry(fcport, &vp->vp_fcports, list) fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } spin_unlock_irqrestore(&ha->vport_slock, flags); /* Make sure for ISP 82XX IO DMA is complete */ if (IS_P3P_TYPE(ha)) { qla82xx_chip_reset_cleanup(vha); ql_log(ql_log_info, vha, 0x00b4, "Done chip reset cleanup.\n"); /* Done waiting for pending commands. Reset online flag */ vha->flags.online = 0; } /* Requeue all commands in outstanding command list. */ qla2x00_abort_all_cmds(vha, DID_RESET << 16); /* memory barrier */ wmb(); } /* * qla2x00_abort_isp * Resets ISP and aborts all outstanding commands. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qla2x00_abort_isp(scsi_qla_host_t *vha) { int rval; uint8_t status = 0; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp, *tvp; struct req_que *req = ha->req_q_map[0]; unsigned long flags; if (vha->flags.online) { qla2x00_abort_isp_cleanup(vha); vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS; vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS; if (vha->hw->flags.port_isolated) return status; if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x803f, "ISP Abort - ISP reg disconnect, exiting.\n"); return status; } if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) { ha->flags.chip_reset_done = 1; vha->flags.online = 1; status = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); return status; } if (IS_QLA8031(ha)) { ql_dbg(ql_dbg_p3p, vha, 0xb05c, "Clearing fcoe driver presence.\n"); if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS) ql_dbg(ql_dbg_p3p, vha, 0xb073, "Error while clearing DRV-Presence.\n"); } if (unlikely(pci_channel_offline(ha->pdev) && ha->flags.pci_channel_io_perm_failure)) { clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); status = 0; return status; } switch (vha->qlini_mode) { case QLA2XXX_INI_MODE_DISABLED: if (!qla_tgt_mode_enabled(vha)) return 0; break; case QLA2XXX_INI_MODE_DUAL: if (!qla_dual_mode_enabled(vha) && !qla_ini_mode_enabled(vha)) return 0; break; case QLA2XXX_INI_MODE_ENABLED: default: break; } ha->isp_ops->get_flash_version(vha, req->ring); if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x803f, "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n"); return status; } ha->isp_ops->nvram_config(vha); if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x803f, "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n"); return status; } if (!qla2x00_restart_isp(vha)) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); if (!atomic_read(&vha->loop_down_timer)) { /* * Issue marker command only when we are going * to start the I/O . */ vha->marker_needed = 1; } vha->flags.online = 1; ha->isp_ops->enable_intrs(ha); ha->isp_abort_cnt = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); if (IS_QLA81XX(ha) || IS_QLA8031(ha)) qla2x00_get_fw_version(vha); if (ha->fce) { ha->flags.fce_enabled = 1; memset(ha->fce, 0, fce_calc_size(ha->fce_bufs)); rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs); if (rval) { ql_log(ql_log_warn, vha, 0x8033, "Unable to reinitialize FCE " "(%d).\n", rval); ha->flags.fce_enabled = 0; } } if (ha->eft) { memset(ha->eft, 0, EFT_SIZE); rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS); if (rval) { ql_log(ql_log_warn, vha, 0x8034, "Unable to reinitialize EFT " "(%d).\n", rval); } } } else { /* failed the ISP abort */ vha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (ha->isp_abort_cnt == 0) { ql_log(ql_log_fatal, vha, 0x8035, "ISP error recover failed - " "board disabled.\n"); /* * The next call disables the board * completely. */ qla2x00_abort_isp_cleanup(vha); vha->flags.online = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); status = 0; } else { /* schedule another ISP abort */ ha->isp_abort_cnt--; ql_dbg(ql_dbg_taskm, vha, 0x8020, "ISP abort - retry remaining %d.\n", ha->isp_abort_cnt); status = 1; } } else { ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; ql_dbg(ql_dbg_taskm, vha, 0x8021, "ISP error recovery - retrying (%d) " "more times.\n", ha->isp_abort_cnt); set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); status = 1; } } } if (vha->hw->flags.port_isolated) { qla2x00_abort_isp_cleanup(vha); return status; } if (!status) { ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); qla2x00_configure_hba(vha); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { if (vp->vp_idx) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_vp_abort_isp(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } } spin_unlock_irqrestore(&ha->vport_slock, flags); if (IS_QLA8031(ha)) { ql_dbg(ql_dbg_p3p, vha, 0xb05d, "Setting back fcoe driver presence.\n"); if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS) ql_dbg(ql_dbg_p3p, vha, 0xb074, "Error while setting DRV-Presence.\n"); } } else { ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n", __func__); } return(status); } /* * qla2x00_restart_isp * restarts the ISP after a reset * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ static int qla2x00_restart_isp(scsi_qla_host_t *vha) { int status; struct qla_hw_data *ha = vha->hw; /* If firmware needs to be loaded */ if (qla2x00_isp_firmware(vha)) { vha->flags.online = 0; status = ha->isp_ops->chip_diag(vha); if (status) return status; status = qla2x00_setup_chip(vha); if (status) return status; } status = qla2x00_init_rings(vha); if (status) return status; clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); ha->flags.chip_reset_done = 1; /* Initialize the queues in use */ qla25xx_init_queues(ha); status = qla2x00_fw_ready(vha); if (status) { /* if no cable then assume it's good */ return vha->device_flags & DFLG_NO_CABLE ? 0 : status; } /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return 0; } static int qla25xx_init_queues(struct qla_hw_data *ha) { struct rsp_que *rsp = NULL; struct req_que *req = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); int ret = -1; int i; for (i = 1; i < ha->max_rsp_queues; i++) { rsp = ha->rsp_q_map[i]; if (rsp && test_bit(i, ha->rsp_qid_map)) { rsp->options &= ~BIT_0; ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != QLA_SUCCESS) ql_dbg(ql_dbg_init, base_vha, 0x00ff, "%s Rsp que: %d init failed.\n", __func__, rsp->id); else ql_dbg(ql_dbg_init, base_vha, 0x0100, "%s Rsp que: %d inited.\n", __func__, rsp->id); } } for (i = 1; i < ha->max_req_queues; i++) { req = ha->req_q_map[i]; if (req && test_bit(i, ha->req_qid_map)) { /* Clear outstanding commands array. */ req->options &= ~BIT_0; ret = qla25xx_init_req_que(base_vha, req); if (ret != QLA_SUCCESS) ql_dbg(ql_dbg_init, base_vha, 0x0101, "%s Req que: %d init failed.\n", __func__, req->id); else ql_dbg(ql_dbg_init, base_vha, 0x0102, "%s Req que: %d inited.\n", __func__, req->id); } } return ret; } /* * qla2x00_reset_adapter * Reset adapter. * * Input: * ha = adapter block pointer. */ int qla2x00_reset_adapter(scsi_qla_host_t *vha) { unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; vha->flags.online = 0; ha->isp_ops->disable_intrs(ha); spin_lock_irqsave(&ha->hardware_lock, flags); wrt_reg_word(&reg->hccr, HCCR_RESET_RISC); rd_reg_word(&reg->hccr); /* PCI Posting. */ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC); rd_reg_word(&reg->hccr); /* PCI Posting. */ spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } int qla24xx_reset_adapter(scsi_qla_host_t *vha) { unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; vha->flags.online = 0; ha->isp_ops->disable_intrs(ha); spin_lock_irqsave(&ha->hardware_lock, flags); wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET); rd_reg_dword(&reg->hccr); wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE); rd_reg_dword(&reg->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (IS_NOPOLLING_TYPE(ha)) ha->isp_ops->enable_intrs(ha); return QLA_SUCCESS; } /* On sparc systems, obtain port and node WWN from firmware * properties. */ static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, struct nvram_24xx *nv) { #ifdef CONFIG_SPARC struct qla_hw_data *ha = vha->hw; struct pci_dev *pdev = ha->pdev; struct device_node *dp = pci_device_to_OF_node(pdev); const u8 *val; int len; val = of_get_property(dp, "port-wwn", &len); if (val && len >= WWN_SIZE) memcpy(nv->port_name, val, WWN_SIZE); val = of_get_property(dp, "node-wwn", &len); if (val && len >= WWN_SIZE) memcpy(nv->node_name, val, WWN_SIZE); #endif } int qla24xx_nvram_config(scsi_qla_host_t *vha) { int rval; struct init_cb_24xx *icb; struct nvram_24xx *nv; __le32 *dptr; uint8_t *dptr1, *dptr2; uint32_t chksum; uint16_t cnt; struct qla_hw_data *ha = vha->hw; rval = QLA_SUCCESS; icb = (struct init_cb_24xx *)ha->init_cb; nv = ha->nvram; /* Determine NVRAM starting address. */ if (ha->port_no == 0) { ha->nvram_base = FA_NVRAM_FUNC0_ADDR; ha->vpd_base = FA_NVRAM_VPD0_ADDR; } else { ha->nvram_base = FA_NVRAM_FUNC1_ADDR; ha->vpd_base = FA_NVRAM_VPD1_ADDR; } ha->nvram_size = sizeof(*nv); ha->vpd_size = FA_NVRAM_VPD_SIZE; /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; ha->isp_ops->read_nvram(vha, ha->vpd, ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); /* Get NVRAM data into cache and calculate checksum. */ dptr = (__force __le32 *)nv; ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size); for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) chksum += le32_to_cpu(*dptr); ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, "Contents of NVRAM\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d, nv, ha->nvram_size); /* Bad NVRAM data, set defaults parameters. */ if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || le16_to_cpu(nv->nvram_version) < ICB_VERSION) { /* Reset NVRAM data. */ ql_log(ql_log_warn, vha, 0x006b, "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", chksum, nv->id, nv->nvram_version); ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv)); ql_log(ql_log_warn, vha, 0x006c, "Falling back to functioning (yet invalid -- WWPN) " "defaults.\n"); /* * Set default initialization control block. */ memset(nv, 0, ha->nvram_size); nv->nvram_version = cpu_to_le16(ICB_VERSION); nv->version = cpu_to_le16(ICB_VERSION); nv->frame_payload_size = cpu_to_le16(2048); nv->execution_throttle = cpu_to_le16(0xFFFF); nv->exchange_count = cpu_to_le16(0); nv->hard_address = cpu_to_le16(124); nv->port_name[0] = 0x21; nv->port_name[1] = 0x00 + ha->port_no + 1; nv->port_name[2] = 0x00; nv->port_name[3] = 0xe0; nv->port_name[4] = 0x8b; nv->port_name[5] = 0x1c; nv->port_name[6] = 0x55; nv->port_name[7] = 0x86; nv->node_name[0] = 0x20; nv->node_name[1] = 0x00; nv->node_name[2] = 0x00; nv->node_name[3] = 0xe0; nv->node_name[4] = 0x8b; nv->node_name[5] = 0x1c; nv->node_name[6] = 0x55; nv->node_name[7] = 0x86; qla24xx_nvram_wwn_from_ofw(vha, nv); nv->login_retry_count = cpu_to_le16(8); nv->interrupt_delay_timer = cpu_to_le16(0); nv->login_timeout = cpu_to_le16(0); nv->firmware_options_1 = cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); nv->firmware_options_2 = cpu_to_le32(2 << 4); nv->firmware_options_2 |= cpu_to_le32(BIT_12); nv->firmware_options_3 = cpu_to_le32(2 << 13); nv->host_p = cpu_to_le32(BIT_11|BIT_10); nv->efi_parameters = cpu_to_le32(0); nv->reset_delay = 5; nv->max_luns_per_target = cpu_to_le16(128); nv->port_down_retry_count = cpu_to_le16(30); nv->link_down_timeout = cpu_to_le16(30); rval = 1; } if (qla_tgt_mode_enabled(vha)) { /* Don't enable full login after initial LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Don't enable LIP full login for initiator */ nv->host_p &= cpu_to_le32(~BIT_10); } qlt_24xx_config_nvram_stage1(vha, nv); /* Reset Initialization control block */ memset(icb, 0, ha->init_cb_size); /* Copy 1st segment. */ dptr1 = (uint8_t *)icb; dptr2 = (uint8_t *)&nv->version; cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; while (cnt--) *dptr1++ = *dptr2++; icb->login_retry_count = nv->login_retry_count; icb->link_down_on_nos = nv->link_down_on_nos; /* Copy 2nd segment. */ dptr1 = (uint8_t *)&icb->interrupt_delay_timer; dptr2 = (uint8_t *)&nv->interrupt_delay_timer; cnt = (uint8_t *)&icb->reserved_3 - (uint8_t *)&icb->interrupt_delay_timer; while (cnt--) *dptr1++ = *dptr2++; ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); /* * Setup driver NVRAM options. */ qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), "QLA2462"); qlt_24xx_config_nvram_stage2(vha, icb); if (nv->host_p & cpu_to_le32(BIT_15)) { /* Use alternate WWN? */ memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); } /* Prepare nodename */ if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { /* * Firmware will apply the following mask if the nodename was * not provided. */ memcpy(icb->node_name, icb->port_name, WWN_SIZE); icb->node_name[0] &= 0xF0; } /* Set host adapter parameters. */ ha->flags.disable_risc_code_load = 0; ha->flags.enable_lip_reset = 0; ha->flags.enable_lip_full_login = le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0; ha->flags.enable_target_reset = le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0; ha->flags.enable_led_scheme = 0; ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & (BIT_6 | BIT_5 | BIT_4)) >> 4; memcpy(ha->fw_seriallink_options24, nv->seriallink_options, sizeof(ha->fw_seriallink_options24)); /* save HBA serial number */ ha->serial0 = icb->port_name[5]; ha->serial1 = icb->port_name[6]; ha->serial2 = icb->port_name[7]; memcpy(vha->node_name, icb->node_name, WWN_SIZE); memcpy(vha->port_name, icb->port_name, WWN_SIZE); icb->execution_throttle = cpu_to_le16(0xFFFF); ha->retry_count = le16_to_cpu(nv->login_retry_count); /* Set minimum login_timeout to 4 seconds. */ if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) nv->login_timeout = cpu_to_le16(ql2xlogintimeout); if (le16_to_cpu(nv->login_timeout) < 4) nv->login_timeout = cpu_to_le16(4); ha->login_timeout = le16_to_cpu(nv->login_timeout); /* Set minimum RATOV to 100 tenths of a second. */ ha->r_a_tov = 100; ha->loop_reset_delay = nv->reset_delay; /* Link Down Timeout = 0: * * When Port Down timer expires we will start returning * I/O's to OS with "DID_NO_CONNECT". * * Link Down Timeout != 0: * * The driver waits for the link to come up after link down * before returning I/Os to OS with "DID_NO_CONNECT". */ if (le16_to_cpu(nv->link_down_timeout) == 0) { ha->loop_down_abort_time = (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); } else { ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); ha->loop_down_abort_time = (LOOP_DOWN_TIME - ha->link_down_timeout); } /* Need enough time to try and get the port back. */ ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); if (qlport_down_retry) ha->port_down_retry_count = qlport_down_retry; /* Set login_retry_count */ ha->login_retry_count = le16_to_cpu(nv->login_retry_count); if (ha->port_down_retry_count == le16_to_cpu(nv->port_down_retry_count) && ha->port_down_retry_count > 3) ha->login_retry_count = ha->port_down_retry_count; else if (ha->port_down_retry_count > (int)ha->login_retry_count) ha->login_retry_count = ha->port_down_retry_count; if (ql2xloginretrycount) ha->login_retry_count = ql2xloginretrycount; /* N2N: driver will initiate Login instead of FW */ icb->firmware_options_3 |= cpu_to_le32(BIT_8); /* Enable ZIO. */ if (!vha->flags.init_done) { ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & (BIT_3 | BIT_2 | BIT_1 | BIT_0); ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? le16_to_cpu(icb->interrupt_delay_timer) : 2; } icb->firmware_options_2 &= cpu_to_le32( ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); if (ha->zio_mode != QLA_ZIO_DISABLED) { ha->zio_mode = QLA_ZIO_MODE_6; ql_log(ql_log_info, vha, 0x006f, "ZIO mode %d enabled; timer delay (%d us).\n", ha->zio_mode, ha->zio_timer * 100); icb->firmware_options_2 |= cpu_to_le32( (uint32_t)ha->zio_mode); icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); } if (rval) { ql_log(ql_log_warn, vha, 0x0070, "NVRAM configuration failed.\n"); } return (rval); } static void qla27xx_print_image(struct scsi_qla_host *vha, char *name, struct qla27xx_image_status *image_status) { ql_dbg(ql_dbg_init, vha, 0x018b, "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n", name, "status", image_status->image_status_mask, le16_to_cpu(image_status->generation), image_status->ver_major, image_status->ver_minor, image_status->bitmap, le32_to_cpu(image_status->checksum), le32_to_cpu(image_status->signature)); } static bool qla28xx_check_aux_image_status_signature( struct qla27xx_image_status *image_status) { ulong signature = le32_to_cpu(image_status->signature); return signature != QLA28XX_AUX_IMG_STATUS_SIGN; } static bool qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status) { ulong signature = le32_to_cpu(image_status->signature); return signature != QLA27XX_IMG_STATUS_SIGN && signature != QLA28XX_IMG_STATUS_SIGN; } static ulong qla27xx_image_status_checksum(struct qla27xx_image_status *image_status) { __le32 *p = (__force __le32 *)image_status; uint n = sizeof(*image_status) / sizeof(*p); uint32_t sum = 0; for ( ; n--; p++) sum += le32_to_cpup(p); return sum; } static inline uint qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask) { return aux->bitmap & bitmask ? QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE; } static void qla28xx_component_status( struct active_regions *active_regions, struct qla27xx_image_status *aux) { active_regions->aux.board_config = qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG); active_regions->aux.vpd_nvram = qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM); active_regions->aux.npiv_config_0_1 = qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1); active_regions->aux.npiv_config_2_3 = qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3); active_regions->aux.nvme_params = qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NVME_PARAMS); } static int qla27xx_compare_image_generation( struct qla27xx_image_status *pri_image_status, struct qla27xx_image_status *sec_image_status) { /* calculate generation delta as uint16 (this accounts for wrap) */ int16_t delta = le16_to_cpu(pri_image_status->generation) - le16_to_cpu(sec_image_status->generation); ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta); return delta; } void qla28xx_get_aux_images( struct scsi_qla_host *vha, struct active_regions *active_regions) { struct qla_hw_data *ha = vha->hw; struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status; bool valid_pri_image = false, valid_sec_image = false; bool active_pri_image = false, active_sec_image = false; if (!ha->flt_region_aux_img_status_pri) { ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n"); goto check_sec_image; } qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, ha->flt_region_aux_img_status_pri, sizeof(pri_aux_image_status) >> 2); qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018b, "Primary aux image signature (%#x) not valid\n", le32_to_cpu(pri_aux_image_status.signature)); goto check_sec_image; } if (qla27xx_image_status_checksum(&pri_aux_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018c, "Primary aux image checksum failed\n"); goto check_sec_image; } valid_pri_image = true; if (pri_aux_image_status.image_status_mask & 1) { ql_dbg(ql_dbg_init, vha, 0x018d, "Primary aux image is active\n"); active_pri_image = true; } check_sec_image: if (!ha->flt_region_aux_img_status_sec) { ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary aux image not addressed\n"); goto check_valid_image; } qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, ha->flt_region_aux_img_status_sec, sizeof(sec_aux_image_status) >> 2); qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018b, "Secondary aux image signature (%#x) not valid\n", le32_to_cpu(sec_aux_image_status.signature)); goto check_valid_image; } if (qla27xx_image_status_checksum(&sec_aux_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018c, "Secondary aux image checksum failed\n"); goto check_valid_image; } valid_sec_image = true; if (sec_aux_image_status.image_status_mask & 1) { ql_dbg(ql_dbg_init, vha, 0x018d, "Secondary aux image is active\n"); active_sec_image = true; } check_valid_image: if (valid_pri_image && active_pri_image && valid_sec_image && active_sec_image) { if (qla27xx_compare_image_generation(&pri_aux_image_status, &sec_aux_image_status) >= 0) { qla28xx_component_status(active_regions, &pri_aux_image_status); } else { qla28xx_component_status(active_regions, &sec_aux_image_status); } } else if (valid_pri_image && active_pri_image) { qla28xx_component_status(active_regions, &pri_aux_image_status); } else if (valid_sec_image && active_sec_image) { qla28xx_component_status(active_regions, &sec_aux_image_status); } ql_dbg(ql_dbg_init, vha, 0x018f, "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u, NVME=%u\n", active_regions->aux.board_config, active_regions->aux.vpd_nvram, active_regions->aux.npiv_config_0_1, active_regions->aux.npiv_config_2_3, active_regions->aux.nvme_params); } void qla27xx_get_active_image(struct scsi_qla_host *vha, struct active_regions *active_regions) { struct qla_hw_data *ha = vha->hw; struct qla27xx_image_status pri_image_status, sec_image_status; bool valid_pri_image = false, valid_sec_image = false; bool active_pri_image = false, active_sec_image = false; if (!ha->flt_region_img_status_pri) { ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n"); goto check_sec_image; } if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status, ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) != QLA_SUCCESS) { WARN_ON_ONCE(true); goto check_sec_image; } qla27xx_print_image(vha, "Primary image", &pri_image_status); if (qla27xx_check_image_status_signature(&pri_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018b, "Primary image signature (%#x) not valid\n", le32_to_cpu(pri_image_status.signature)); goto check_sec_image; } if (qla27xx_image_status_checksum(&pri_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018c, "Primary image checksum failed\n"); goto check_sec_image; } valid_pri_image = true; if (pri_image_status.image_status_mask & 1) { ql_dbg(ql_dbg_init, vha, 0x018d, "Primary image is active\n"); active_pri_image = true; } check_sec_image: if (!ha->flt_region_img_status_sec) { ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n"); goto check_valid_image; } qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); qla27xx_print_image(vha, "Secondary image", &sec_image_status); if (qla27xx_check_image_status_signature(&sec_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018b, "Secondary image signature (%#x) not valid\n", le32_to_cpu(sec_image_status.signature)); goto check_valid_image; } if (qla27xx_image_status_checksum(&sec_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018c, "Secondary image checksum failed\n"); goto check_valid_image; } valid_sec_image = true; if (sec_image_status.image_status_mask & 1) { ql_dbg(ql_dbg_init, vha, 0x018d, "Secondary image is active\n"); active_sec_image = true; } check_valid_image: if (valid_pri_image && active_pri_image) active_regions->global = QLA27XX_PRIMARY_IMAGE; if (valid_sec_image && active_sec_image) { if (!active_regions->global || qla27xx_compare_image_generation( &pri_image_status, &sec_image_status) < 0) { active_regions->global = QLA27XX_SECONDARY_IMAGE; } } ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n", active_regions->global == QLA27XX_DEFAULT_IMAGE ? "default (boot/fw)" : active_regions->global == QLA27XX_PRIMARY_IMAGE ? "primary" : active_regions->global == QLA27XX_SECONDARY_IMAGE ? "secondary" : "invalid", active_regions->global); } bool qla24xx_risc_firmware_invalid(uint32_t *dword) { return !(dword[4] | dword[5] | dword[6] | dword[7]) || !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]); } static int qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, uint32_t faddr) { int rval; uint templates, segments, fragment; ulong i; uint j; ulong dlen; uint32_t *dcode; uint32_t risc_addr, risc_size, risc_attr = 0; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct fwdt *fwdt = ha->fwdt; ql_dbg(ql_dbg_init, vha, 0x008b, "FW: Loading firmware from flash (%x).\n", faddr); dcode = (uint32_t *)req->ring; qla24xx_read_flash_data(vha, dcode, faddr, 8); if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_fatal, vha, 0x008c, "Unable to verify the integrity of flash firmware " "image.\n"); ql_log(ql_log_fatal, vha, 0x008d, "Firmware data: %08x %08x %08x %08x.\n", dcode[0], dcode[1], dcode[2], dcode[3]); return QLA_FUNCTION_FAILED; } dcode = (uint32_t *)req->ring; *srisc_addr = 0; segments = FA_RISC_CODE_SEGMENTS; for (j = 0; j < segments; j++) { ql_dbg(ql_dbg_init, vha, 0x008d, "-> Loading segment %u...\n", j); qla24xx_read_flash_data(vha, dcode, faddr, 10); risc_addr = be32_to_cpu((__force __be32)dcode[2]); risc_size = be32_to_cpu((__force __be32)dcode[3]); if (!*srisc_addr) { *srisc_addr = risc_addr; risc_attr = be32_to_cpu((__force __be32)dcode[9]); } dlen = ha->fw_transfer_size >> 2; for (fragment = 0; risc_size; fragment++) { if (dlen > risc_size) dlen = risc_size; ql_dbg(ql_dbg_init, vha, 0x008e, "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n", fragment, risc_addr, faddr, dlen); qla24xx_read_flash_data(vha, dcode, faddr, dlen); for (i = 0; i < dlen; i++) dcode[i] = swab32(dcode[i]); rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval) { ql_log(ql_log_fatal, vha, 0x008f, "-> Failed load firmware fragment %u.\n", fragment); return QLA_FUNCTION_FAILED; } faddr += dlen; risc_addr += dlen; risc_size -= dlen; } } if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_SUCCESS; templates = (risc_attr & BIT_9) ? 2 : 1; ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates); for (j = 0; j < templates; j++, fwdt++) { vfree(fwdt->template); fwdt->template = NULL; fwdt->length = 0; dcode = (uint32_t *)req->ring; qla24xx_read_flash_data(vha, dcode, faddr, 7); risc_size = be32_to_cpu((__force __be32)dcode[2]); ql_dbg(ql_dbg_init, vha, 0x0161, "-> fwdt%u template array at %#x (%#x dwords)\n", j, faddr, risc_size); if (!risc_size || !~risc_size) { ql_dbg(ql_dbg_init, vha, 0x0162, "-> fwdt%u failed to read array\n", j); goto failed; } /* skip header and ignore checksum */ faddr += 7; risc_size -= 8; ql_dbg(ql_dbg_init, vha, 0x0163, "-> fwdt%u template allocate template %#x words...\n", j, risc_size); fwdt->template = vmalloc_array(risc_size, sizeof(*dcode)); if (!fwdt->template) { ql_log(ql_log_warn, vha, 0x0164, "-> fwdt%u failed allocate template.\n", j); goto failed; } dcode = fwdt->template; qla24xx_read_flash_data(vha, dcode, faddr, risc_size); if (!qla27xx_fwdt_template_valid(dcode)) { ql_log(ql_log_warn, vha, 0x0165, "-> fwdt%u failed template validate\n", j); goto failed; } dlen = qla27xx_fwdt_template_size(dcode); ql_dbg(ql_dbg_init, vha, 0x0166, "-> fwdt%u template size %#lx bytes (%#lx words)\n", j, dlen, dlen / sizeof(*dcode)); if (dlen > risc_size * sizeof(*dcode)) { ql_log(ql_log_warn, vha, 0x0167, "-> fwdt%u template exceeds array (%-lu bytes)\n", j, dlen - risc_size * sizeof(*dcode)); goto failed; } fwdt->length = dlen; ql_dbg(ql_dbg_init, vha, 0x0168, "-> fwdt%u loaded template ok\n", j); faddr += risc_size + 1; } return QLA_SUCCESS; failed: vfree(fwdt->template); fwdt->template = NULL; fwdt->length = 0; return QLA_SUCCESS; } #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" int qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; int i, fragment; uint16_t *wcode; __be16 *fwcode; uint32_t risc_addr, risc_size, fwclen, wlen, *seg; struct fw_blob *blob; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; /* Load firmware blob. */ blob = qla2x00_request_firmware(vha); if (!blob) { ql_log(ql_log_info, vha, 0x0083, "Firmware image unavailable.\n"); ql_log(ql_log_info, vha, 0x0084, "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); return QLA_FUNCTION_FAILED; } rval = QLA_SUCCESS; wcode = (uint16_t *)req->ring; *srisc_addr = 0; fwcode = (__force __be16 *)blob->fw->data; fwclen = 0; /* Validate firmware image by checking version. */ if (blob->fw->size < 8 * sizeof(uint16_t)) { ql_log(ql_log_fatal, vha, 0x0085, "Unable to verify integrity of firmware image (%zd).\n", blob->fw->size); goto fail_fw_integrity; } for (i = 0; i < 4; i++) wcode[i] = be16_to_cpu(fwcode[i + 4]); if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && wcode[2] == 0 && wcode[3] == 0)) { ql_log(ql_log_fatal, vha, 0x0086, "Unable to verify integrity of firmware image.\n"); ql_log(ql_log_fatal, vha, 0x0087, "Firmware data: %04x %04x %04x %04x.\n", wcode[0], wcode[1], wcode[2], wcode[3]); goto fail_fw_integrity; } seg = blob->segs; while (*seg && rval == QLA_SUCCESS) { risc_addr = *seg; *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; risc_size = be16_to_cpu(fwcode[3]); /* Validate firmware image size. */ fwclen += risc_size * sizeof(uint16_t); if (blob->fw->size < fwclen) { ql_log(ql_log_fatal, vha, 0x0088, "Unable to verify integrity of firmware image " "(%zd).\n", blob->fw->size); goto fail_fw_integrity; } fragment = 0; while (risc_size > 0 && rval == QLA_SUCCESS) { wlen = (uint16_t)(ha->fw_transfer_size >> 1); if (wlen > risc_size) wlen = risc_size; ql_dbg(ql_dbg_init, vha, 0x0089, "Loading risc segment@ risc addr %x number of " "words 0x%x.\n", risc_addr, wlen); for (i = 0; i < wlen; i++) wcode[i] = swab16((__force u32)fwcode[i]); rval = qla2x00_load_ram(vha, req->dma, risc_addr, wlen); if (rval) { ql_log(ql_log_fatal, vha, 0x008a, "Failed to load segment %d of firmware.\n", fragment); break; } fwcode += wlen; risc_addr += wlen; risc_size -= wlen; fragment++; } /* Next segment. */ seg++; } return rval; fail_fw_integrity: return QLA_FUNCTION_FAILED; } static int qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; uint templates, segments, fragment; uint32_t *dcode; ulong dlen; uint32_t risc_addr, risc_size, risc_attr = 0; ulong i; uint j; struct fw_blob *blob; __be32 *fwcode; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct fwdt *fwdt = ha->fwdt; ql_dbg(ql_dbg_init, vha, 0x0090, "-> FW: Loading via request-firmware.\n"); blob = qla2x00_request_firmware(vha); if (!blob) { ql_log(ql_log_warn, vha, 0x0092, "-> Firmware file not found.\n"); return QLA_FUNCTION_FAILED; } fwcode = (__force __be32 *)blob->fw->data; dcode = (__force uint32_t *)fwcode; if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_fatal, vha, 0x0093, "Unable to verify integrity of firmware image (%zd).\n", blob->fw->size); ql_log(ql_log_fatal, vha, 0x0095, "Firmware data: %08x %08x %08x %08x.\n", dcode[0], dcode[1], dcode[2], dcode[3]); return QLA_FUNCTION_FAILED; } dcode = (uint32_t *)req->ring; *srisc_addr = 0; segments = FA_RISC_CODE_SEGMENTS; for (j = 0; j < segments; j++) { ql_dbg(ql_dbg_init, vha, 0x0096, "-> Loading segment %u...\n", j); risc_addr = be32_to_cpu(fwcode[2]); risc_size = be32_to_cpu(fwcode[3]); if (!*srisc_addr) { *srisc_addr = risc_addr; risc_attr = be32_to_cpu(fwcode[9]); } dlen = ha->fw_transfer_size >> 2; for (fragment = 0; risc_size; fragment++) { if (dlen > risc_size) dlen = risc_size; ql_dbg(ql_dbg_init, vha, 0x0097, "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n", fragment, risc_addr, (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data), dlen); for (i = 0; i < dlen; i++) dcode[i] = swab32((__force u32)fwcode[i]); rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval) { ql_log(ql_log_fatal, vha, 0x0098, "-> Failed load firmware fragment %u.\n", fragment); return QLA_FUNCTION_FAILED; } fwcode += dlen; risc_addr += dlen; risc_size -= dlen; } } if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_SUCCESS; templates = (risc_attr & BIT_9) ? 2 : 1; ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates); for (j = 0; j < templates; j++, fwdt++) { vfree(fwdt->template); fwdt->template = NULL; fwdt->length = 0; risc_size = be32_to_cpu(fwcode[2]); ql_dbg(ql_dbg_init, vha, 0x0171, "-> fwdt%u template array at %#x (%#x dwords)\n", j, (uint32_t)((void *)fwcode - (void *)blob->fw->data), risc_size); if (!risc_size || !~risc_size) { ql_dbg(ql_dbg_init, vha, 0x0172, "-> fwdt%u failed to read array\n", j); goto failed; } /* skip header and ignore checksum */ fwcode += 7; risc_size -= 8; ql_dbg(ql_dbg_init, vha, 0x0173, "-> fwdt%u template allocate template %#x words...\n", j, risc_size); fwdt->template = vmalloc_array(risc_size, sizeof(*dcode)); if (!fwdt->template) { ql_log(ql_log_warn, vha, 0x0174, "-> fwdt%u failed allocate template.\n", j); goto failed; } dcode = fwdt->template; for (i = 0; i < risc_size; i++) dcode[i] = (__force u32)fwcode[i]; if (!qla27xx_fwdt_template_valid(dcode)) { ql_log(ql_log_warn, vha, 0x0175, "-> fwdt%u failed template validate\n", j); goto failed; } dlen = qla27xx_fwdt_template_size(dcode); ql_dbg(ql_dbg_init, vha, 0x0176, "-> fwdt%u template size %#lx bytes (%#lx words)\n", j, dlen, dlen / sizeof(*dcode)); if (dlen > risc_size * sizeof(*dcode)) { ql_log(ql_log_warn, vha, 0x0177, "-> fwdt%u template exceeds array (%-lu bytes)\n", j, dlen - risc_size * sizeof(*dcode)); goto failed; } fwdt->length = dlen; ql_dbg(ql_dbg_init, vha, 0x0178, "-> fwdt%u loaded template ok\n", j); fwcode += risc_size + 1; } return QLA_SUCCESS; failed: vfree(fwdt->template); fwdt->template = NULL; fwdt->length = 0; return QLA_SUCCESS; } int qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; if (ql2xfwloadbin == 1) return qla81xx_load_risc(vha, srisc_addr); /* * FW Load priority: * 1) Firmware via request-firmware interface (.bin file). * 2) Firmware residing in flash. */ rval = qla24xx_load_risc_blob(vha, srisc_addr); if (rval == QLA_SUCCESS) return rval; return qla24xx_load_risc_flash(vha, srisc_addr, vha->hw->flt_region_fw); } int qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; struct qla_hw_data *ha = vha->hw; struct active_regions active_regions = { }; if (ql2xfwloadbin == 2) goto try_blob_fw; /* FW Load priority: * 1) Firmware residing in flash. * 2) Firmware via request-firmware interface (.bin file). * 3) Golden-Firmware residing in flash -- (limited operation). */ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto try_primary_fw; qla27xx_get_active_image(vha, &active_regions); if (active_regions.global != QLA27XX_SECONDARY_IMAGE) goto try_primary_fw; ql_dbg(ql_dbg_init, vha, 0x008b, "Loading secondary firmware image.\n"); rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec); if (!rval) return rval; try_primary_fw: ql_dbg(ql_dbg_init, vha, 0x008b, "Loading primary firmware image.\n"); rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); if (!rval) return rval; try_blob_fw: rval = qla24xx_load_risc_blob(vha, srisc_addr); if (!rval || !ha->flt_region_gold_fw) return rval; ql_log(ql_log_info, vha, 0x0099, "Attempting to fallback to golden firmware.\n"); rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); if (rval) return rval; ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n"); ha->flags.running_gold_fw = 1; return rval; } void qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) { int ret, retries; struct qla_hw_data *ha = vha->hw; if (ha->flags.pci_channel_io_perm_failure) return; if (!IS_FWI2_CAPABLE(ha)) return; if (!ha->fw_major_version) return; if (!ha->flags.fw_started) return; ret = qla2x00_stop_firmware(vha); for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && ret != QLA_INVALID_COMMAND && retries ; retries--) { ha->isp_ops->reset_chip(vha); if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) continue; if (qla2x00_setup_chip(vha) != QLA_SUCCESS) continue; ql_log(ql_log_info, vha, 0x8015, "Attempting retry of stop-firmware command.\n"); ret = qla2x00_stop_firmware(vha); } QLA_FW_STOPPED(ha); ha->flags.fw_init_done = 0; } int qla24xx_configure_vhba(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; int rval2; uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); if (!vha->vp_idx) return -EINVAL; rval = qla2x00_fw_ready(base_vha); if (rval == QLA_SUCCESS) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); } vha->flags.management_server_logged_in = 0; /* Login to SNS first */ rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { if (rval2 == QLA_MEMORY_ALLOC_FAILED) ql_dbg(ql_dbg_init, vha, 0x0120, "Failed SNS login: loop_id=%x, rval2=%d\n", NPH_SNS, rval2); else ql_dbg(ql_dbg_init, vha, 0x0103, "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " "mb[2]=%x mb[6]=%x mb[7]=%x.\n", NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); return (QLA_FUNCTION_FAILED); } atomic_set(&vha->loop_down_timer, 0); atomic_set(&vha->loop_state, LOOP_UP); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); rval = qla2x00_loop_resync(base_vha); return rval; } /* 84XX Support **************************************************************/ static LIST_HEAD(qla_cs84xx_list); static DEFINE_MUTEX(qla_cs84xx_mutex); static struct qla_chip_state_84xx * qla84xx_get_chip(struct scsi_qla_host *vha) { struct qla_chip_state_84xx *cs84xx; struct qla_hw_data *ha = vha->hw; mutex_lock(&qla_cs84xx_mutex); /* Find any shared 84xx chip. */ list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { if (cs84xx->bus == ha->pdev->bus) { kref_get(&cs84xx->kref); goto done; } } cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); if (!cs84xx) goto done; kref_init(&cs84xx->kref); spin_lock_init(&cs84xx->access_lock); mutex_init(&cs84xx->fw_update_mutex); cs84xx->bus = ha->pdev->bus; list_add_tail(&cs84xx->list, &qla_cs84xx_list); done: mutex_unlock(&qla_cs84xx_mutex); return cs84xx; } static void __qla84xx_chip_release(struct kref *kref) { struct qla_chip_state_84xx *cs84xx = container_of(kref, struct qla_chip_state_84xx, kref); mutex_lock(&qla_cs84xx_mutex); list_del(&cs84xx->list); mutex_unlock(&qla_cs84xx_mutex); kfree(cs84xx); } void qla84xx_put_chip(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; if (ha->cs84xx) kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); } static int qla84xx_init_chip(scsi_qla_host_t *vha) { int rval; uint16_t status[2]; struct qla_hw_data *ha = vha->hw; mutex_lock(&ha->cs84xx->fw_update_mutex); rval = qla84xx_verify_chip(vha, status); mutex_unlock(&ha->cs84xx->fw_update_mutex); return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED : QLA_SUCCESS; } /* 81XX Support **************************************************************/ int qla81xx_nvram_config(scsi_qla_host_t *vha) { int rval; struct init_cb_81xx *icb; struct nvram_81xx *nv; __le32 *dptr; uint8_t *dptr1, *dptr2; uint32_t chksum; uint16_t cnt; struct qla_hw_data *ha = vha->hw; uint32_t faddr; struct active_regions active_regions = { }; rval = QLA_SUCCESS; icb = (struct init_cb_81xx *)ha->init_cb; nv = ha->nvram; /* Determine NVRAM starting address. */ ha->nvram_size = sizeof(*nv); ha->vpd_size = FA_NVRAM_VPD_SIZE; if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) ha->vpd_size = FA_VPD_SIZE_82XX; if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) qla28xx_get_aux_images(vha, &active_regions); /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; faddr = ha->flt_region_vpd; if (IS_QLA28XX(ha)) { if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) faddr = ha->flt_region_vpd_sec; ql_dbg(ql_dbg_init, vha, 0x0110, "Loading %s nvram image.\n", active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? "primary" : "secondary"); } ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size); /* Get NVRAM data into cache and calculate checksum. */ faddr = ha->flt_region_nvram; if (IS_QLA28XX(ha)) { if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) faddr = ha->flt_region_nvram_sec; } ql_dbg(ql_dbg_init, vha, 0x0110, "Loading %s nvram image.\n", active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? "primary" : "secondary"); ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); dptr = (__force __le32 *)nv; for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) chksum += le32_to_cpu(*dptr); ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, "Contents of NVRAM:\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112, nv, ha->nvram_size); /* Bad NVRAM data, set defaults parameters. */ if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || le16_to_cpu(nv->nvram_version) < ICB_VERSION) { /* Reset NVRAM data. */ ql_log(ql_log_info, vha, 0x0073, "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", chksum, nv->id, le16_to_cpu(nv->nvram_version)); ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv)); ql_log(ql_log_info, vha, 0x0074, "Falling back to functioning (yet invalid -- WWPN) " "defaults.\n"); /* * Set default initialization control block. */ memset(nv, 0, ha->nvram_size); nv->nvram_version = cpu_to_le16(ICB_VERSION); nv->version = cpu_to_le16(ICB_VERSION); nv->frame_payload_size = cpu_to_le16(2048); nv->execution_throttle = cpu_to_le16(0xFFFF); nv->exchange_count = cpu_to_le16(0); nv->port_name[0] = 0x21; nv->port_name[1] = 0x00 + ha->port_no + 1; nv->port_name[2] = 0x00; nv->port_name[3] = 0xe0; nv->port_name[4] = 0x8b; nv->port_name[5] = 0x1c; nv->port_name[6] = 0x55; nv->port_name[7] = 0x86; nv->node_name[0] = 0x20; nv->node_name[1] = 0x00; nv->node_name[2] = 0x00; nv->node_name[3] = 0xe0; nv->node_name[4] = 0x8b; nv->node_name[5] = 0x1c; nv->node_name[6] = 0x55; nv->node_name[7] = 0x86; nv->login_retry_count = cpu_to_le16(8); nv->interrupt_delay_timer = cpu_to_le16(0); nv->login_timeout = cpu_to_le16(0); nv->firmware_options_1 = cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); nv->firmware_options_2 = cpu_to_le32(2 << 4); nv->firmware_options_2 |= cpu_to_le32(BIT_12); nv->firmware_options_3 = cpu_to_le32(2 << 13); nv->host_p = cpu_to_le32(BIT_11|BIT_10); nv->efi_parameters = cpu_to_le32(0); nv->reset_delay = 5; nv->max_luns_per_target = cpu_to_le16(128); nv->port_down_retry_count = cpu_to_le16(30); nv->link_down_timeout = cpu_to_le16(180); nv->enode_mac[0] = 0x00; nv->enode_mac[1] = 0xC0; nv->enode_mac[2] = 0xDD; nv->enode_mac[3] = 0x04; nv->enode_mac[4] = 0x05; nv->enode_mac[5] = 0x06 + ha->port_no + 1; rval = 1; } if (IS_T10_PI_CAPABLE(ha)) nv->frame_payload_size &= cpu_to_le16(~7); qlt_81xx_config_nvram_stage1(vha, nv); /* Reset Initialization control block */ memset(icb, 0, ha->init_cb_size); /* Copy 1st segment. */ dptr1 = (uint8_t *)icb; dptr2 = (uint8_t *)&nv->version; cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; while (cnt--) *dptr1++ = *dptr2++; icb->login_retry_count = nv->login_retry_count; /* Copy 2nd segment. */ dptr1 = (uint8_t *)&icb->interrupt_delay_timer; dptr2 = (uint8_t *)&nv->interrupt_delay_timer; cnt = (uint8_t *)&icb->reserved_5 - (uint8_t *)&icb->interrupt_delay_timer; while (cnt--) *dptr1++ = *dptr2++; memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { icb->enode_mac[0] = 0x00; icb->enode_mac[1] = 0xC0; icb->enode_mac[2] = 0xDD; icb->enode_mac[3] = 0x04; icb->enode_mac[4] = 0x05; icb->enode_mac[5] = 0x06 + ha->port_no + 1; } /* Use extended-initialization control block. */ memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); /* * Setup driver NVRAM options. */ qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), "QLE8XXX"); qlt_81xx_config_nvram_stage2(vha, icb); /* Use alternate WWN? */ if (nv->host_p & cpu_to_le32(BIT_15)) { memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); } /* Prepare nodename */ if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { /* * Firmware will apply the following mask if the nodename was * not provided. */ memcpy(icb->node_name, icb->port_name, WWN_SIZE); icb->node_name[0] &= 0xF0; } if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { if ((nv->enhanced_features & BIT_7) == 0) ha->flags.scm_supported_a = 1; } /* Set host adapter parameters. */ ha->flags.disable_risc_code_load = 0; ha->flags.enable_lip_reset = 0; ha->flags.enable_lip_full_login = le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0; ha->flags.enable_target_reset = le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0; ha->flags.enable_led_scheme = 0; ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & (BIT_6 | BIT_5 | BIT_4)) >> 4; /* save HBA serial number */ ha->serial0 = icb->port_name[5]; ha->serial1 = icb->port_name[6]; ha->serial2 = icb->port_name[7]; memcpy(vha->node_name, icb->node_name, WWN_SIZE); memcpy(vha->port_name, icb->port_name, WWN_SIZE); icb->execution_throttle = cpu_to_le16(0xFFFF); ha->retry_count = le16_to_cpu(nv->login_retry_count); /* Set minimum login_timeout to 4 seconds. */ if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) nv->login_timeout = cpu_to_le16(ql2xlogintimeout); if (le16_to_cpu(nv->login_timeout) < 4) nv->login_timeout = cpu_to_le16(4); ha->login_timeout = le16_to_cpu(nv->login_timeout); /* Set minimum RATOV to 100 tenths of a second. */ ha->r_a_tov = 100; ha->loop_reset_delay = nv->reset_delay; /* Link Down Timeout = 0: * * When Port Down timer expires we will start returning * I/O's to OS with "DID_NO_CONNECT". * * Link Down Timeout != 0: * * The driver waits for the link to come up after link down * before returning I/Os to OS with "DID_NO_CONNECT". */ if (le16_to_cpu(nv->link_down_timeout) == 0) { ha->loop_down_abort_time = (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); } else { ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); ha->loop_down_abort_time = (LOOP_DOWN_TIME - ha->link_down_timeout); } /* Need enough time to try and get the port back. */ ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); if (qlport_down_retry) ha->port_down_retry_count = qlport_down_retry; /* Set login_retry_count */ ha->login_retry_count = le16_to_cpu(nv->login_retry_count); if (ha->port_down_retry_count == le16_to_cpu(nv->port_down_retry_count) && ha->port_down_retry_count > 3) ha->login_retry_count = ha->port_down_retry_count; else if (ha->port_down_retry_count > (int)ha->login_retry_count) ha->login_retry_count = ha->port_down_retry_count; if (ql2xloginretrycount) ha->login_retry_count = ql2xloginretrycount; /* if not running MSI-X we need handshaking on interrupts */ if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) icb->firmware_options_2 |= cpu_to_le32(BIT_22); /* Enable ZIO. */ if (!vha->flags.init_done) { ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & (BIT_3 | BIT_2 | BIT_1 | BIT_0); ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? le16_to_cpu(icb->interrupt_delay_timer) : 2; } icb->firmware_options_2 &= cpu_to_le32( ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); vha->flags.process_response_queue = 0; if (ha->zio_mode != QLA_ZIO_DISABLED) { ha->zio_mode = QLA_ZIO_MODE_6; ql_log(ql_log_info, vha, 0x0075, "ZIO mode %d enabled; timer delay (%d us).\n", ha->zio_mode, ha->zio_timer * 100); icb->firmware_options_2 |= cpu_to_le32( (uint32_t)ha->zio_mode); icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); vha->flags.process_response_queue = 1; } /* enable RIDA Format2 */ icb->firmware_options_3 |= cpu_to_le32(BIT_0); /* N2N: driver will initiate Login instead of FW */ icb->firmware_options_3 |= cpu_to_le32(BIT_8); /* Determine NVMe/FCP priority for target ports */ ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); if (rval) { ql_log(ql_log_warn, vha, 0x0076, "NVRAM configuration failed.\n"); } return (rval); } int qla82xx_restart_isp(scsi_qla_host_t *vha) { int status, rval; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp, *tvp; unsigned long flags; status = qla2x00_init_rings(vha); if (!status) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); ha->flags.chip_reset_done = 1; status = qla2x00_fw_ready(vha); if (!status) { /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); vha->flags.online = 1; set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } /* if no cable then assume it's good */ if ((vha->device_flags & DFLG_NO_CABLE)) status = 0; } if (!status) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); if (!atomic_read(&vha->loop_down_timer)) { /* * Issue marker command only when we are going * to start the I/O . */ vha->marker_needed = 1; } ha->isp_ops->enable_intrs(ha); ha->isp_abort_cnt = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); /* Update the firmware version */ status = qla82xx_check_md_needed(vha); if (ha->fce) { ha->flags.fce_enabled = 1; memset(ha->fce, 0, fce_calc_size(ha->fce_bufs)); rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs); if (rval) { ql_log(ql_log_warn, vha, 0x8001, "Unable to reinitialize FCE (%d).\n", rval); ha->flags.fce_enabled = 0; } } if (ha->eft) { memset(ha->eft, 0, EFT_SIZE); rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS); if (rval) { ql_log(ql_log_warn, vha, 0x8010, "Unable to reinitialize EFT (%d).\n", rval); } } } if (!status) { ql_dbg(ql_dbg_taskm, vha, 0x8011, "qla82xx_restart_isp succeeded.\n"); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { if (vp->vp_idx) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_vp_abort_isp(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } } spin_unlock_irqrestore(&ha->vport_slock, flags); } else { ql_log(ql_log_warn, vha, 0x8016, "qla82xx_restart_isp **** FAILED ****.\n"); } return status; } /* * qla24xx_get_fcp_prio * Gets the fcp cmd priority value for the logged in port. * Looks for a match of the port descriptors within * each of the fcp prio config entries. If a match is found, * the tag (priority) value is returned. * * Input: * vha = scsi host structure pointer. * fcport = port structure pointer. * * Return: * non-zero (if found) * -1 (if not found) * * Context: * Kernel context */ static int qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) { int i, entries; uint8_t pid_match, wwn_match; int priority; uint32_t pid1, pid2; uint64_t wwn1, wwn2; struct qla_fcp_prio_entry *pri_entry; struct qla_hw_data *ha = vha->hw; if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) return -1; priority = -1; entries = ha->fcp_prio_cfg->num_entries; pri_entry = &ha->fcp_prio_cfg->entry[0]; for (i = 0; i < entries; i++) { pid_match = wwn_match = 0; if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) { pri_entry++; continue; } /* check source pid for a match */ if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) { pid1 = pri_entry->src_pid & INVALID_PORT_ID; pid2 = vha->d_id.b24 & INVALID_PORT_ID; if (pid1 == INVALID_PORT_ID) pid_match++; else if (pid1 == pid2) pid_match++; } /* check destination pid for a match */ if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) { pid1 = pri_entry->dst_pid & INVALID_PORT_ID; pid2 = fcport->d_id.b24 & INVALID_PORT_ID; if (pid1 == INVALID_PORT_ID) pid_match++; else if (pid1 == pid2) pid_match++; } /* check source WWN for a match */ if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) { wwn1 = wwn_to_u64(vha->port_name); wwn2 = wwn_to_u64(pri_entry->src_wwpn); if (wwn2 == (uint64_t)-1) wwn_match++; else if (wwn1 == wwn2) wwn_match++; } /* check destination WWN for a match */ if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) { wwn1 = wwn_to_u64(fcport->port_name); wwn2 = wwn_to_u64(pri_entry->dst_wwpn); if (wwn2 == (uint64_t)-1) wwn_match++; else if (wwn1 == wwn2) wwn_match++; } if (pid_match == 2 || wwn_match == 2) { /* Found a matching entry */ if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) priority = pri_entry->tag; break; } pri_entry++; } return priority; } /* * qla24xx_update_fcport_fcp_prio * Activates fcp priority for the logged in fc port * * Input: * vha = scsi host structure pointer. * fcp = port structure pointer. * * Return: * QLA_SUCCESS or QLA_FUNCTION_FAILED * * Context: * Kernel context. */ int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) { int ret; int priority; uint16_t mb[5]; if (fcport->port_type != FCT_TARGET || fcport->loop_id == FC_NO_LOOP_ID) return QLA_FUNCTION_FAILED; priority = qla24xx_get_fcp_prio(vha, fcport); if (priority < 0) return QLA_FUNCTION_FAILED; if (IS_P3P_TYPE(vha->hw)) { fcport->fcp_prio = priority & 0xf; return QLA_SUCCESS; } ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); if (ret == QLA_SUCCESS) { if (fcport->fcp_prio != priority) ql_dbg(ql_dbg_user, vha, 0x709e, "Updated FCP_CMND priority - value=%d loop_id=%d " "port_id=%02x%02x%02x.\n", priority, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); fcport->fcp_prio = priority & 0xf; } else ql_dbg(ql_dbg_user, vha, 0x704f, "Unable to update FCP_CMND priority - ret=0x%x for " "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); return ret; } /* * qla24xx_update_all_fcp_prio * Activates fcp priority for all the logged in ports * * Input: * ha = adapter block pointer. * * Return: * QLA_SUCCESS or QLA_FUNCTION_FAILED * * Context: * Kernel context. */ int qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha) { int ret; fc_port_t *fcport; ret = QLA_FUNCTION_FAILED; /* We need to set priority for all logged in ports */ list_for_each_entry(fcport, &vha->vp_fcports, list) ret = qla24xx_update_fcport_fcp_prio(vha, fcport); return ret; } struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx, bool startqp) { int rsp_id = 0; int req_id = 0; int i; struct qla_hw_data *ha = vha->hw; uint16_t qpair_id = 0; struct qla_qpair *qpair = NULL; struct qla_msix_entry *msix; if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) { ql_log(ql_log_warn, vha, 0x00181, "FW/Driver is not multi-queue capable.\n"); return NULL; } if (ql2xmqsupport || ql2xnvmeenable) { qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); if (qpair == NULL) { ql_log(ql_log_warn, vha, 0x0182, "Failed to allocate memory for queue pair.\n"); return NULL; } qpair->hw = vha->hw; qpair->vha = vha; qpair->qp_lock_ptr = &qpair->qp_lock; spin_lock_init(&qpair->qp_lock); qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; /* Assign available que pair id */ mutex_lock(&ha->mq_lock); qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); if (ha->num_qpairs >= ha->max_qpairs) { mutex_unlock(&ha->mq_lock); ql_log(ql_log_warn, vha, 0x0183, "No resources to create additional q pair.\n"); goto fail_qid_map; } ha->num_qpairs++; set_bit(qpair_id, ha->qpair_qid_map); ha->queue_pair_map[qpair_id] = qpair; qpair->id = qpair_id; qpair->vp_idx = vp_idx; qpair->fw_started = ha->flags.fw_started; INIT_LIST_HEAD(&qpair->hints_list); INIT_LIST_HEAD(&qpair->dsd_list); qpair->chip_reset = ha->base_qpair->chip_reset; qpair->enable_class_2 = ha->base_qpair->enable_class_2; qpair->enable_explicit_conf = ha->base_qpair->enable_explicit_conf; for (i = 0; i < ha->msix_count; i++) { msix = &ha->msix_entries[i]; if (msix->in_use) continue; qpair->msix = msix; ql_dbg(ql_dbg_multiq, vha, 0xc00f, "Vector %x selected for qpair\n", msix->vector); break; } if (!qpair->msix) { ql_log(ql_log_warn, vha, 0x0184, "Out of MSI-X vectors!.\n"); goto fail_msix; } qpair->msix->in_use = 1; list_add_tail(&qpair->qp_list_elem, &vha->qp_list); qpair->pdev = ha->pdev; if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) qpair->reqq_start_iocbs = qla_83xx_start_iocbs; mutex_unlock(&ha->mq_lock); /* Create response queue first */ rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp); if (!rsp_id) { ql_log(ql_log_warn, vha, 0x0185, "Failed to create response queue.\n"); goto fail_rsp; } qpair->rsp = ha->rsp_q_map[rsp_id]; /* Create request queue */ req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos, startqp); if (!req_id) { ql_log(ql_log_warn, vha, 0x0186, "Failed to create request queue.\n"); goto fail_req; } qpair->req = ha->req_q_map[req_id]; qpair->rsp->req = qpair->req; qpair->rsp->qpair = qpair; if (!qpair->cpu_mapped) qla_cpu_update(qpair, raw_smp_processor_id()); if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) qpair->difdix_supported = 1; } qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); if (!qpair->srb_mempool) { ql_log(ql_log_warn, vha, 0xd036, "Failed to create srb mempool for qpair %d\n", qpair->id); goto fail_mempool; } if (qla_create_buf_pool(vha, qpair)) { ql_log(ql_log_warn, vha, 0xd036, "Failed to initialize buf pool for qpair %d\n", qpair->id); goto fail_bufpool; } /* Mark as online */ qpair->online = 1; if (!vha->flags.qpairs_available) vha->flags.qpairs_available = 1; ql_dbg(ql_dbg_multiq, vha, 0xc00d, "Request/Response queue pair created, id %d\n", qpair->id); ql_dbg(ql_dbg_init, vha, 0x0187, "Request/Response queue pair created, id %d\n", qpair->id); } return qpair; fail_bufpool: mempool_destroy(qpair->srb_mempool); fail_mempool: qla25xx_delete_req_que(vha, qpair->req); fail_req: qla25xx_delete_rsp_que(vha, qpair->rsp); fail_rsp: mutex_lock(&ha->mq_lock); qpair->msix->in_use = 0; list_del(&qpair->qp_list_elem); if (list_empty(&vha->qp_list)) vha->flags.qpairs_available = 0; fail_msix: ha->queue_pair_map[qpair_id] = NULL; clear_bit(qpair_id, ha->qpair_qid_map); ha->num_qpairs--; mutex_unlock(&ha->mq_lock); fail_qid_map: kfree(qpair); return NULL; } int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) { int ret = QLA_FUNCTION_FAILED; struct qla_hw_data *ha = qpair->hw; qpair->delete_in_progress = 1; qla_free_buf_pool(qpair); ret = qla25xx_delete_req_que(vha, qpair->req); if (ret != QLA_SUCCESS) goto fail; ret = qla25xx_delete_rsp_que(vha, qpair->rsp); if (ret != QLA_SUCCESS) goto fail; if (!list_empty(&qpair->dsd_list)) { struct dsd_dma *dsd_ptr, *tdsd_ptr; /* clean up allocated prev pool */ list_for_each_entry_safe(dsd_ptr, tdsd_ptr, &qpair->dsd_list, list) { dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); list_del(&dsd_ptr->list); kfree(dsd_ptr); } } mutex_lock(&ha->mq_lock); ha->queue_pair_map[qpair->id] = NULL; clear_bit(qpair->id, ha->qpair_qid_map); ha->num_qpairs--; list_del(&qpair->qp_list_elem); if (list_empty(&vha->qp_list)) { vha->flags.qpairs_available = 0; vha->flags.qpairs_req_created = 0; vha->flags.qpairs_rsp_created = 0; } mempool_destroy(qpair->srb_mempool); kfree(qpair); mutex_unlock(&ha->mq_lock); return QLA_SUCCESS; fail: return ret; } uint64_t qla2x00_count_set_bits(uint32_t num) { /* Brian Kernighan's Algorithm */ u64 count = 0; while (num) { num &= (num - 1); count++; } return count; } uint64_t qla2x00_get_num_tgts(scsi_qla_host_t *vha) { fc_port_t *f, *tf; u64 count = 0; f = NULL; tf = NULL; list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { if (f->port_type != FCT_TARGET) continue; count++; } return count; } int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags) { scsi_qla_host_t *vha = shost_priv(host); fc_port_t *fcport = NULL; unsigned long int_flags; if (flags & QLA2XX_HW_ERROR) vha->hw_err_cnt = 0; if (flags & QLA2XX_SHT_LNK_DWN) vha->short_link_down_cnt = 0; if (flags & QLA2XX_INT_ERR) vha->interface_err_cnt = 0; if (flags & QLA2XX_CMD_TIMEOUT) vha->cmd_timeout_cnt = 0; if (flags & QLA2XX_RESET_CMD_ERR) vha->reset_cmd_err_cnt = 0; if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->tgt_short_link_down_cnt = 0; fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); } vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; return 0; } int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags) { return qla2xxx_reset_stats(host, flags); } int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags) { return qla2xxx_reset_stats(host, flags); } int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags, void *data, u64 size) { scsi_qla_host_t *vha = shost_priv(host); struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data; struct ql_vnd_stats *rsp_data = &resp->stats; u64 ini_entry_count = 0; u64 i = 0; u64 entry_count = 0; u64 num_tgt = 0; u32 tmp_stat_type = 0; fc_port_t *fcport = NULL; unsigned long int_flags; /* Copy stat type to work on it */ tmp_stat_type = flags; if (tmp_stat_type & BIT_17) { num_tgt = qla2x00_get_num_tgts(vha); /* unset BIT_17 */ tmp_stat_type &= ~(1 << 17); } ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); entry_count = ini_entry_count + num_tgt; rsp_data->entry_count = entry_count; i = 0; if (flags & QLA2XX_HW_ERROR) { rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR; rsp_data->entry[i].tgt_num = 0x0; rsp_data->entry[i].cnt = vha->hw_err_cnt; i++; } if (flags & QLA2XX_SHT_LNK_DWN) { rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN; rsp_data->entry[i].tgt_num = 0x0; rsp_data->entry[i].cnt = vha->short_link_down_cnt; i++; } if (flags & QLA2XX_INT_ERR) { rsp_data->entry[i].stat_type = QLA2XX_INT_ERR; rsp_data->entry[i].tgt_num = 0x0; rsp_data->entry[i].cnt = vha->interface_err_cnt; i++; } if (flags & QLA2XX_CMD_TIMEOUT) { rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT; rsp_data->entry[i].tgt_num = 0x0; rsp_data->entry[i].cnt = vha->cmd_timeout_cnt; i++; } if (flags & QLA2XX_RESET_CMD_ERR) { rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR; rsp_data->entry[i].tgt_num = 0x0; rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt; i++; } /* i will continue from previous loop, as target * entries are after initiator */ if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->port_type != FCT_TARGET) continue; if (!fcport->rport) continue; rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN; rsp_data->entry[i].tgt_num = fcport->rport->number; rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt; i++; } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); } resp->status = EXT_STATUS_OK; return 0; } int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags, struct fc_rport *rport, void *data, u64 size) { struct ql_vnd_tgt_stats_resp *tgt_data = data; fc_port_t *fcport = *(fc_port_t **)rport->dd_data; tgt_data->status = 0; tgt_data->stats.entry_count = 1; tgt_data->stats.entry[0].stat_type = flags; tgt_data->stats.entry[0].tgt_num = rport->number; tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt; return 0; } int qla2xxx_disable_port(struct Scsi_Host *host) { scsi_qla_host_t *vha = shost_priv(host); vha->hw->flags.port_isolated = 1; if (qla2x00_isp_reg_stat(vha->hw)) { ql_log(ql_log_info, vha, 0x9006, "PCI/Register disconnect, exiting.\n"); qla_pci_set_eeh_busy(vha); return FAILED; } if (qla2x00_chip_is_down(vha)) return 0; if (vha->flags.online) { qla2x00_abort_isp_cleanup(vha); qla2x00_wait_for_sess_deletion(vha); } return 0; } int qla2xxx_enable_port(struct Scsi_Host *host) { scsi_qla_host_t *vha = shost_priv(host); if (qla2x00_isp_reg_stat(vha->hw)) { ql_log(ql_log_info, vha, 0x9001, "PCI/Register disconnect, exiting.\n"); qla_pci_set_eeh_busy(vha); return FAILED; } vha->hw->flags.port_isolated = 0; /* Set the flag to 1, so that isp_abort can proceed */ vha->flags.online = 1; set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); return 0; }
linux-master
drivers/scsi/qla2xxx/qla_init.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include "qla_target.h" #include <linux/utsname.h> static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *); static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *); static int qla2x00_sns_rft_id(scsi_qla_host_t *); static int qla2x00_sns_rnn_id(scsi_qla_host_t *); static int qla_async_rftid(scsi_qla_host_t *, port_id_t *); static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8); static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*); static int qla_async_rsnn_nn(scsi_qla_host_t *); /** * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query. * @vha: HA context * @arg: CT arguments * * Returns a pointer to the @vha's ms_iocb. */ void * qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) { struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; ms_pkt = (ms_iocb_entry_t *)arg->iocb; memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); ms_pkt->entry_type = MS_IOCB_TYPE; ms_pkt->entry_count = 1; SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG); ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); ms_pkt->cmd_dsd_count = cpu_to_le16(1); ms_pkt->total_dsd_count = cpu_to_le16(2); ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size); ms_pkt->req_bytecount = cpu_to_le32(arg->req_size); put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address); ms_pkt->req_dsd.length = ms_pkt->req_bytecount; put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address); ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount; vha->qla_stats.control_requests++; return (ms_pkt); } /** * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query. * @vha: HA context * @arg: CT arguments * * Returns a pointer to the @ha's ms_iocb. */ void * qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg) { struct qla_hw_data *ha = vha->hw; struct ct_entry_24xx *ct_pkt; ct_pkt = (struct ct_entry_24xx *)arg->iocb; memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); ct_pkt->entry_type = CT_IOCB_TYPE; ct_pkt->entry_count = 1; ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle); ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); ct_pkt->cmd_dsd_count = cpu_to_le16(1); ct_pkt->rsp_dsd_count = cpu_to_le16(1); ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size); ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size); put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address); ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address); ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count; ct_pkt->vp_index = vha->vp_idx; vha->qla_stats.control_requests++; return (ct_pkt); } /** * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query. * @p: CT request buffer * @cmd: GS command * @rsp_size: response size in bytes * * Returns a pointer to the intitialized @ct_req. */ static inline struct ct_sns_req * qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) { memset(p, 0, sizeof(struct ct_sns_pkt)); p->p.req.header.revision = 0x01; p->p.req.header.gs_type = 0xFC; p->p.req.header.gs_subtype = 0x02; p->p.req.command = cpu_to_be16(cmd); p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); return &p->p.req; } int qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, struct ct_sns_rsp *ct_rsp, const char *routine) { int rval; uint16_t comp_status; struct qla_hw_data *ha = vha->hw; bool lid_is_sns = false; rval = QLA_FUNCTION_FAILED; if (ms_pkt->entry_status != 0) { ql_dbg(ql_dbg_disc, vha, 0x2031, "%s failed, error status (%x) on port_id: %02x%02x%02x.\n", routine, ms_pkt->entry_status, vha->d_id.b.domain, vha->d_id.b.area, vha->d_id.b.al_pa); } else { if (IS_FWI2_CAPABLE(ha)) comp_status = le16_to_cpu( ((struct ct_entry_24xx *)ms_pkt)->comp_status); else comp_status = le16_to_cpu(ms_pkt->status); switch (comp_status) { case CS_COMPLETE: case CS_DATA_UNDERRUN: case CS_DATA_OVERRUN: /* Overrun? */ if (ct_rsp->header.response != cpu_to_be16(CT_ACCEPT_RESPONSE)) { ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077, "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n", routine, vha->d_id.b.domain, vha->d_id.b.area, vha->d_id.b.al_pa, comp_status, ct_rsp->header.response); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2078, ct_rsp, offsetof(typeof(*ct_rsp), rsp)); rval = QLA_INVALID_COMMAND; } else rval = QLA_SUCCESS; break; case CS_PORT_LOGGED_OUT: if (IS_FWI2_CAPABLE(ha)) { if (le16_to_cpu(ms_pkt->loop_id.extended) == NPH_SNS) lid_is_sns = true; } else { if (le16_to_cpu(ms_pkt->loop_id.extended) == SIMPLE_NAME_SERVER) lid_is_sns = true; } if (lid_is_sns) { ql_dbg(ql_dbg_async, vha, 0x502b, "%s failed, Name server has logged out", routine); rval = QLA_NOT_LOGGED_IN; set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); } break; case CS_TIMEOUT: rval = QLA_FUNCTION_TIMEOUT; fallthrough; default: ql_dbg(ql_dbg_disc, vha, 0x2033, "%s failed, completion status (%x) on port_id: " "%02x%02x%02x.\n", routine, comp_status, vha->d_id.b.domain, vha->d_id.b.area, vha->d_id.b.al_pa); break; } } return rval; } /** * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command. * @vha: HA context * @fcport: fcport entry to updated * * Returns 0 on success. */ int qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) { int rval; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_ga_nxt(vha, fcport); arg.iocb = ha->ms_iocb; arg.req_dma = ha->ct_sns_dma; arg.rsp_dma = ha->ct_sns_dma; arg.req_size = GA_NXT_REQ_SIZE; arg.rsp_size = GA_NXT_RSP_SIZE; arg.nport_handle = NPH_SNS; /* Issue GA_NXT */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD, GA_NXT_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2062, "GA_NXT issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { /* Populate fc_port_t entry. */ fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id); memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name, WWN_SIZE); memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name, WWN_SIZE); fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ? FS_FC4TYPE_FCP : FC4_TYPE_OTHER; if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE && ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) fcport->d_id.b.domain = 0xf0; ql_dbg(ql_dbg_disc, vha, 0x2063, "GA_NXT entry - nn %8phN pn %8phN " "port_id=%02x%02x%02x.\n", fcport->node_name, fcport->port_name, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); } return (rval); } static inline int qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha) { return vha->hw->max_fibre_devices * 4 + 16; } /** * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command. * @vha: HA context * @list: switch info entries to populate * * NOTE: Non-Nx_Ports are not requested. * * Returns 0 on success. */ int qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) { int rval; uint16_t i; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct ct_sns_gid_pt_data *gid_data; struct qla_hw_data *ha = vha->hw; uint16_t gid_pt_rsp_size; struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gid_pt(vha, list); gid_data = NULL; gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha); arg.iocb = ha->ms_iocb; arg.req_dma = ha->ct_sns_dma; arg.rsp_dma = ha->ct_sns_dma; arg.req_size = GID_PT_REQ_SIZE; arg.rsp_size = gid_pt_rsp_size; arg.nport_handle = NPH_SNS; /* Issue GID_PT */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_type */ ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE; /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2055, "GID_PT issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { /* Set port IDs in switch info list. */ for (i = 0; i < ha->max_fibre_devices; i++) { gid_data = &ct_rsp->rsp.gid_pt.entries[i]; list[i].d_id = be_to_port_id(gid_data->port_id); memset(list[i].fabric_port_name, 0, WWN_SIZE); list[i].fp_speed = PORT_SPEED_UNKNOWN; /* Last one exit. */ if (gid_data->control_byte & BIT_7) { list[i].d_id.b.rsvd_1 = gid_data->control_byte; break; } } /* * If we've used all available slots, then the switch is * reporting back more devices than we can handle with this * single call. Return a failed status, and let GA_NXT handle * the overload. */ if (i == ha->max_fibre_devices) rval = QLA_FUNCTION_FAILED; } return (rval); } /** * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query. * @vha: HA context * @list: switch info entries to populate * * Returns 0 on success. */ int qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) { int rval = QLA_SUCCESS; uint16_t i; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gpn_id(vha, list); arg.iocb = ha->ms_iocb; arg.req_dma = ha->ct_sns_dma; arg.rsp_dma = ha->ct_sns_dma; arg.req_size = GPN_ID_REQ_SIZE; arg.rsp_size = GPN_ID_RSP_SIZE; arg.nport_handle = NPH_SNS; for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GPN_ID */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2056, "GPN_ID issue IOCB failed (%d).\n", rval); break; } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GPN_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; break; } else { /* Save portname */ memcpy(list[i].port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); } /* Last device exit. */ if (list[i].d_id.b.rsvd_1 != 0) break; } return (rval); } /** * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query. * @vha: HA context * @list: switch info entries to populate * * Returns 0 on success. */ int qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) { int rval = QLA_SUCCESS; uint16_t i; struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct ct_arg arg; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gnn_id(vha, list); arg.iocb = ha->ms_iocb; arg.req_dma = ha->ct_sns_dma; arg.rsp_dma = ha->ct_sns_dma; arg.req_size = GNN_ID_REQ_SIZE; arg.rsp_size = GNN_ID_RSP_SIZE; arg.nport_handle = NPH_SNS; for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GNN_ID */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD, GNN_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2057, "GNN_ID issue IOCB failed (%d).\n", rval); break; } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GNN_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; break; } else { /* Save nodename */ memcpy(list[i].node_name, ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); ql_dbg(ql_dbg_disc, vha, 0x2058, "GID_PT entry - nn %8phN pn %8phN " "portid=%02x%02x%02x.\n", list[i].node_name, list[i].port_name, list[i].d_id.b.domain, list[i].d_id.b.area, list[i].d_id.b.al_pa); } /* Last device exit. */ if (list[i].d_id.b.rsvd_1 != 0) break; } return (rval); } static void qla2x00_async_sns_sp_done(srb_t *sp, int rc) { struct scsi_qla_host *vha = sp->vha; struct ct_sns_pkt *ct_sns; struct qla_work_evt *e; sp->rc = rc; if (rc == QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x204f, "Async done-%s exiting normally.\n", sp->name); } else if (rc == QLA_FUNCTION_TIMEOUT) { ql_dbg(ql_dbg_disc, vha, 0x204f, "Async done-%s timeout\n", sp->name); } else { ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; memset(ct_sns, 0, sizeof(*ct_sns)); sp->retry_count++; if (sp->retry_count > 3) goto err; ql_dbg(ql_dbg_disc, vha, 0x204f, "Async done-%s fail rc %x. Retry count %d\n", sp->name, rc, sp->retry_count); e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY); if (!e) goto err2; e->u.iosb.sp = sp; qla2x00_post_work(vha, e); return; } err: e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); err2: if (!e) { /* please ignore kernel warning. otherwise, we have mem leak. */ if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; } if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; } /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); return; } e->u.iosb.sp = sp; qla2x00_post_work(vha, e); } /** * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA. * @vha: HA context * * Returns 0 on success. */ int qla2x00_rft_id(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_rft_id(vha); return qla_async_rftid(vha, &vha->d_id); } static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) { int rval = QLA_MEMORY_ALLOC_FAILED; struct ct_sns_req *ct_req; srb_t *sp; struct ct_sns_pkt *ct_sns; if (!vha->flags.online) goto done; /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "rft_id"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_sns_sp_done); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xd041, "%s: Failed to allocate ct_sns request.\n", __func__); goto done_free_sp; } sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xd042, "%s: Failed to allocate ct_sns request.\n", __func__); goto done_free_sp; } ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; memset(ct_sns, 0, sizeof(*ct_sns)); ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE); /* Prepare CT arguments -- port_id, FC-4 types */ ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id); ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ if (vha->flags.nvme_enabled && qla_ini_mode_enabled(vha)) ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */ sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - hdl=%x portid %06x.\n", sp->name, sp->handle, d_id->b24); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2043, "RFT_ID issue IOCB failed (%d).\n", rval); goto done_free_sp; } return rval; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } /** * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA. * @vha: HA context * @type: not used * * Returns 0 on success. */ int qla2x00_rff_id(scsi_qla_host_t *vha, u8 type) { struct qla_hw_data *ha = vha->hw; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_disc, vha, 0x2046, "RFF_ID call not supported on ISP2100/ISP2200.\n"); return (QLA_SUCCESS); } return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type); } static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, u8 fc4feature, u8 fc4type) { int rval = QLA_MEMORY_ALLOC_FAILED; struct ct_sns_req *ct_req; srb_t *sp; struct ct_sns_pkt *ct_sns; /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "rff_id"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_sns_sp_done); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xd041, "%s: Failed to allocate ct_sns request.\n", __func__); goto done_free_sp; } sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xd042, "%s: Failed to allocate ct_sns request.\n", __func__); goto done_free_sp; } ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; memset(ct_sns, 0, sizeof(*ct_sns)); ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE); /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id); ct_req->req.rff_id.fc4_feature = fc4feature; ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */ sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - hdl=%x portid %06x feature %x type %x.\n", sp->name, sp->handle, d_id->b24, fc4feature, fc4type); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2047, "RFF_ID issue IOCB failed (%d).\n", rval); goto done_free_sp; } return rval; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } /** * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA. * @vha: HA context * * Returns 0 on success. */ int qla2x00_rnn_id(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_rnn_id(vha); return qla_async_rnnid(vha, &vha->d_id, vha->node_name); } static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, u8 *node_name) { int rval = QLA_MEMORY_ALLOC_FAILED; struct ct_sns_req *ct_req; srb_t *sp; struct ct_sns_pkt *ct_sns; /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "rnid"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_sns_sp_done); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xd041, "%s: Failed to allocate ct_sns request.\n", __func__); goto done_free_sp; } sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xd042, "%s: Failed to allocate ct_sns request.\n", __func__); goto done_free_sp; } ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; memset(ct_sns, 0, sizeof(*ct_sns)); ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE); /* Prepare CT arguments -- port_id, node_name */ ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id); memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE); sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - hdl=%x portid %06x\n", sp->name, sp->handle, d_id->b24); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x204d, "RNN_ID issue IOCB failed (%d).\n", rval); goto done_free_sp; } return rval; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } size_t qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size) { struct qla_hw_data *ha = vha->hw; if (IS_QLAFX00(ha)) return scnprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number, ha->mr.fw_version, qla2x00_version_str); return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number, ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version, qla2x00_version_str); } /** * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA. * @vha: HA context * * Returns 0 on success. */ int qla2x00_rsnn_nn(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_disc, vha, 0x2050, "RSNN_ID call unsupported on ISP2100/ISP2200.\n"); return (QLA_SUCCESS); } return qla_async_rsnn_nn(vha); } static int qla_async_rsnn_nn(scsi_qla_host_t *vha) { int rval = QLA_MEMORY_ALLOC_FAILED; struct ct_sns_req *ct_req; srb_t *sp; struct ct_sns_pkt *ct_sns; /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "rsnn_nn"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_sns_sp_done); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xd041, "%s: Failed to allocate ct_sns request.\n", __func__); goto done_free_sp; } sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xd042, "%s: Failed to allocate ct_sns request.\n", __func__); goto done_free_sp; } ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; memset(ct_sns, 0, sizeof(*ct_sns)); ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE); /* Prepare CT arguments -- node_name, symbolic node_name, size */ memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE); /* Prepare the Symbolic Node Name */ qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name, sizeof(ct_req->req.rsnn_nn.sym_node_name)); ct_req->req.rsnn_nn.name_len = (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name); sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len; sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - hdl=%x.\n", sp->name, sp->handle); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2043, "RFT_ID issue IOCB failed (%d).\n", rval); goto done_free_sp; } return rval; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } /** * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query. * @vha: HA context * @cmd: GS command * @scmd_len: Subcommand length * @data_size: response size in bytes * * Returns a pointer to the @ha's sns_cmd. */ static inline struct sns_cmd_pkt * qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len, uint16_t data_size) { uint16_t wc; struct sns_cmd_pkt *sns_cmd; struct qla_hw_data *ha = vha->hw; sns_cmd = ha->sns_cmd; memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt)); wc = data_size / 2; /* Size in 16bit words. */ sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc); put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address); sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len); sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd); wc = (data_size - 16) / 4; /* Size in 32bit words. */ sns_cmd->p.cmd.size = cpu_to_le16(wc); vha->qla_stats.control_requests++; return (sns_cmd); } /** * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command. * @vha: HA context * @fcport: fcport entry to updated * * This command uses the old Exectute SNS Command mailbox routine. * * Returns 0 on success. */ static int qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; struct sns_cmd_pkt *sns_cmd; /* Issue GA_NXT. */ /* Prepare SNS command request. */ sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN, GA_NXT_SNS_DATA_SIZE); /* Prepare SNS command arguments -- port_id. */ sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = fcport->d_id.b.area; sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain; /* Execute SNS command. */ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x205f, "GA_NXT Send SNS failed (%d).\n", rval); } else if (sns_cmd->p.gan_data[8] != 0x80 || sns_cmd->p.gan_data[9] != 0x02) { ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084, "GA_NXT failed, rejected request ga_nxt_rsp:\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074, sns_cmd->p.gan_data, 16); rval = QLA_FUNCTION_FAILED; } else { /* Populate fc_port_t entry. */ fcport->d_id.b.domain = sns_cmd->p.gan_data[17]; fcport->d_id.b.area = sns_cmd->p.gan_data[18]; fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19]; memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE); memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE); if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE && sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE) fcport->d_id.b.domain = 0xf0; ql_dbg(ql_dbg_disc, vha, 0x2061, "GA_NXT entry - nn %8phN pn %8phN " "port_id=%02x%02x%02x.\n", fcport->node_name, fcport->port_name, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); } return (rval); } /** * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command. * @vha: HA context * @list: switch info entries to populate * * This command uses the old Exectute SNS Command mailbox routine. * * NOTE: Non-Nx_Ports are not requested. * * Returns 0 on success. */ static int qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) { int rval; struct qla_hw_data *ha = vha->hw; uint16_t i; uint8_t *entry; struct sns_cmd_pkt *sns_cmd; uint16_t gid_pt_sns_data_size; gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha); /* Issue GID_PT. */ /* Prepare SNS command request. */ sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN, gid_pt_sns_data_size); /* Prepare SNS command arguments -- port_type. */ sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE; /* Execute SNS command. */ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x206d, "GID_PT Send SNS failed (%d).\n", rval); } else if (sns_cmd->p.gid_data[8] != 0x80 || sns_cmd->p.gid_data[9] != 0x02) { ql_dbg(ql_dbg_disc, vha, 0x202f, "GID_PT failed, rejected request, gid_rsp:\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081, sns_cmd->p.gid_data, 16); rval = QLA_FUNCTION_FAILED; } else { /* Set port IDs in switch info list. */ for (i = 0; i < ha->max_fibre_devices; i++) { entry = &sns_cmd->p.gid_data[(i * 4) + 16]; list[i].d_id.b.domain = entry[1]; list[i].d_id.b.area = entry[2]; list[i].d_id.b.al_pa = entry[3]; /* Last one exit. */ if (entry[0] & BIT_7) { list[i].d_id.b.rsvd_1 = entry[0]; break; } } /* * If we've used all available slots, then the switch is * reporting back more devices that we can handle with this * single call. Return a failed status, and let GA_NXT handle * the overload. */ if (i == ha->max_fibre_devices) rval = QLA_FUNCTION_FAILED; } return (rval); } /** * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query. * @vha: HA context * @list: switch info entries to populate * * This command uses the old Exectute SNS Command mailbox routine. * * Returns 0 on success. */ static int qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; uint16_t i; struct sns_cmd_pkt *sns_cmd; for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GPN_ID */ /* Prepare SNS command request. */ sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD, GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE); /* Prepare SNS command arguments -- port_id. */ sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa; sns_cmd->p.cmd.param[1] = list[i].d_id.b.area; sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; /* Execute SNS command. */ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2032, "GPN_ID Send SNS failed (%d).\n", rval); } else if (sns_cmd->p.gpn_data[8] != 0x80 || sns_cmd->p.gpn_data[9] != 0x02) { ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e, "GPN_ID failed, rejected request, gpn_rsp:\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f, sns_cmd->p.gpn_data, 16); rval = QLA_FUNCTION_FAILED; } else { /* Save portname */ memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16], WWN_SIZE); } /* Last device exit. */ if (list[i].d_id.b.rsvd_1 != 0) break; } return (rval); } /** * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query. * @vha: HA context * @list: switch info entries to populate * * This command uses the old Exectute SNS Command mailbox routine. * * Returns 0 on success. */ static int qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; uint16_t i; struct sns_cmd_pkt *sns_cmd; for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GNN_ID */ /* Prepare SNS command request. */ sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD, GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE); /* Prepare SNS command arguments -- port_id. */ sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa; sns_cmd->p.cmd.param[1] = list[i].d_id.b.area; sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; /* Execute SNS command. */ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x203f, "GNN_ID Send SNS failed (%d).\n", rval); } else if (sns_cmd->p.gnn_data[8] != 0x80 || sns_cmd->p.gnn_data[9] != 0x02) { ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082, "GNN_ID failed, rejected request, gnn_rsp:\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a, sns_cmd->p.gnn_data, 16); rval = QLA_FUNCTION_FAILED; } else { /* Save nodename */ memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16], WWN_SIZE); ql_dbg(ql_dbg_disc, vha, 0x206e, "GID_PT entry - nn %8phN pn %8phN " "port_id=%02x%02x%02x.\n", list[i].node_name, list[i].port_name, list[i].d_id.b.domain, list[i].d_id.b.area, list[i].d_id.b.al_pa); } /* Last device exit. */ if (list[i].d_id.b.rsvd_1 != 0) break; } return (rval); } /** * qla2x00_sns_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA. * @vha: HA context * * This command uses the old Exectute SNS Command mailbox routine. * * Returns 0 on success. */ static int qla2x00_sns_rft_id(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; struct sns_cmd_pkt *sns_cmd; /* Issue RFT_ID. */ /* Prepare SNS command request. */ sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN, RFT_ID_SNS_DATA_SIZE); /* Prepare SNS command arguments -- port_id, FC-4 types */ sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = vha->d_id.b.area; sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */ /* Execute SNS command. */ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2060, "RFT_ID Send SNS failed (%d).\n", rval); } else if (sns_cmd->p.rft_data[8] != 0x80 || sns_cmd->p.rft_data[9] != 0x02) { ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083, "RFT_ID failed, rejected request rft_rsp:\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080, sns_cmd->p.rft_data, 16); rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_disc, vha, 0x2073, "RFT_ID exiting normally.\n"); } return (rval); } /** * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA. * @vha: HA context * * This command uses the old Exectute SNS Command mailbox routine. * * Returns 0 on success. */ static int qla2x00_sns_rnn_id(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; struct sns_cmd_pkt *sns_cmd; /* Issue RNN_ID. */ /* Prepare SNS command request. */ sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN, RNN_ID_SNS_DATA_SIZE); /* Prepare SNS command arguments -- port_id, nodename. */ sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa; sns_cmd->p.cmd.param[1] = vha->d_id.b.area; sns_cmd->p.cmd.param[2] = vha->d_id.b.domain; sns_cmd->p.cmd.param[4] = vha->node_name[7]; sns_cmd->p.cmd.param[5] = vha->node_name[6]; sns_cmd->p.cmd.param[6] = vha->node_name[5]; sns_cmd->p.cmd.param[7] = vha->node_name[4]; sns_cmd->p.cmd.param[8] = vha->node_name[3]; sns_cmd->p.cmd.param[9] = vha->node_name[2]; sns_cmd->p.cmd.param[10] = vha->node_name[1]; sns_cmd->p.cmd.param[11] = vha->node_name[0]; /* Execute SNS command. */ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x204a, "RNN_ID Send SNS failed (%d).\n", rval); } else if (sns_cmd->p.rnn_data[8] != 0x80 || sns_cmd->p.rnn_data[9] != 0x02) { ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b, "RNN_ID failed, rejected request, rnn_rsp:\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c, sns_cmd->p.rnn_data, 16); rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_disc, vha, 0x204c, "RNN_ID exiting normally.\n"); } return (rval); } /** * qla2x00_mgmt_svr_login() - Login to fabric Management Service. * @vha: HA context * * Returns 0 on success. */ int qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) { int ret, rval; uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; ret = QLA_SUCCESS; if (vha->flags.management_server_logged_in) return ret; rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, mb, BIT_1); if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { if (rval == QLA_MEMORY_ALLOC_FAILED) ql_dbg(ql_dbg_disc, vha, 0x2085, "Failed management_server login: loopid=%x " "rval=%d\n", vha->mgmt_svr_loop_id, rval); else ql_dbg(ql_dbg_disc, vha, 0x2024, "Failed management_server login: loopid=%x " "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]); ret = QLA_FUNCTION_FAILED; } else vha->flags.management_server_logged_in = 1; return ret; } /** * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query. * @vha: HA context * @req_size: request size in bytes * @rsp_size: response size in bytes * * Returns a pointer to the @ha's ms_iocb. */ void * qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) { ms_iocb_entry_t *ms_pkt; struct qla_hw_data *ha = vha->hw; ms_pkt = ha->ms_iocb; memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); ms_pkt->entry_type = MS_IOCB_TYPE; ms_pkt->entry_count = 1; SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id); ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG); ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); ms_pkt->cmd_dsd_count = cpu_to_le16(1); ms_pkt->total_dsd_count = cpu_to_le16(2); ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); ms_pkt->req_bytecount = cpu_to_le32(req_size); put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address); ms_pkt->req_dsd.length = ms_pkt->req_bytecount; put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address); ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount; return ms_pkt; } /** * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query. * @vha: HA context * @req_size: request size in bytes * @rsp_size: response size in bytes * * Returns a pointer to the @ha's ms_iocb. */ void * qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) { struct ct_entry_24xx *ct_pkt; struct qla_hw_data *ha = vha->hw; ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); ct_pkt->entry_type = CT_IOCB_TYPE; ct_pkt->entry_count = 1; ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id); ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); ct_pkt->cmd_dsd_count = cpu_to_le16(1); ct_pkt->rsp_dsd_count = cpu_to_le16(1); ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); ct_pkt->cmd_byte_count = cpu_to_le32(req_size); put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address); ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address); ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count; ct_pkt->vp_index = vha->vp_idx; return ct_pkt; } static void qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size) { struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt = ha->ms_iocb; struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; if (IS_FWI2_CAPABLE(ha)) { ct_pkt->cmd_byte_count = cpu_to_le32(req_size); ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count; } else { ms_pkt->req_bytecount = cpu_to_le32(req_size); ms_pkt->req_dsd.length = ms_pkt->req_bytecount; } } /** * qla2x00_prep_ct_fdmi_req() - Prepare common CT request fields for SNS query. * @p: CT request buffer * @cmd: GS command * @rsp_size: response size in bytes * * Returns a pointer to the intitialized @ct_req. */ static inline struct ct_sns_req * qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) { memset(p, 0, sizeof(struct ct_sns_pkt)); p->p.req.header.revision = 0x01; p->p.req.header.gs_type = 0xFA; p->p.req.header.gs_subtype = 0x10; p->p.req.command = cpu_to_be16(cmd); p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); return &p->p.req; } uint qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha) { uint speeds = 0; if (IS_CNA_CAPABLE(ha)) return FDMI_PORT_SPEED_10GB; if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { if (ha->max_supported_speed == 2) { if (ha->min_supported_speed <= 6) speeds |= FDMI_PORT_SPEED_64GB; } if (ha->max_supported_speed == 2 || ha->max_supported_speed == 1) { if (ha->min_supported_speed <= 5) speeds |= FDMI_PORT_SPEED_32GB; } if (ha->max_supported_speed == 2 || ha->max_supported_speed == 1 || ha->max_supported_speed == 0) { if (ha->min_supported_speed <= 4) speeds |= FDMI_PORT_SPEED_16GB; } if (ha->max_supported_speed == 1 || ha->max_supported_speed == 0) { if (ha->min_supported_speed <= 3) speeds |= FDMI_PORT_SPEED_8GB; } if (ha->max_supported_speed == 0) { if (ha->min_supported_speed <= 2) speeds |= FDMI_PORT_SPEED_4GB; } return speeds; } if (IS_QLA2031(ha)) { if ((ha->pdev->subsystem_vendor == 0x103C) && ((ha->pdev->subsystem_device == 0x8002) || (ha->pdev->subsystem_device == 0x8086))) { speeds = FDMI_PORT_SPEED_16GB; } else { speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB| FDMI_PORT_SPEED_4GB; } return speeds; } if (IS_QLA25XX(ha) || IS_QLAFX00(ha)) return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB| FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; if (IS_QLA24XX_TYPE(ha)) return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB| FDMI_PORT_SPEED_1GB; if (IS_QLA23XX(ha)) return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; return FDMI_PORT_SPEED_1GB; } uint qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha) { switch (ha->link_data_rate) { case PORT_SPEED_1GB: return FDMI_PORT_SPEED_1GB; case PORT_SPEED_2GB: return FDMI_PORT_SPEED_2GB; case PORT_SPEED_4GB: return FDMI_PORT_SPEED_4GB; case PORT_SPEED_8GB: return FDMI_PORT_SPEED_8GB; case PORT_SPEED_10GB: return FDMI_PORT_SPEED_10GB; case PORT_SPEED_16GB: return FDMI_PORT_SPEED_16GB; case PORT_SPEED_32GB: return FDMI_PORT_SPEED_32GB; case PORT_SPEED_64GB: return FDMI_PORT_SPEED_64GB; default: return FDMI_PORT_SPEED_UNKNOWN; } } /** * qla2x00_hba_attributes() - perform HBA attributes registration * @vha: HA context * @entries: number of entries to use * @callopt: Option to issue extended or standard FDMI * command parameter * * Returns 0 on success. */ static unsigned long qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, unsigned int callopt) { struct qla_hw_data *ha = vha->hw; struct new_utsname *p_sysid = utsname(); struct ct_fdmi_hba_attr *eiter; uint16_t alen; unsigned long size = 0; /* Nodename. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME); memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name)); alen = sizeof(eiter->a.node_name); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20a0, "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name)); /* Manufacturer. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER); alen = scnprintf( eiter->a.manufacturer, sizeof(eiter->a.manufacturer), "%s", QLA2XXX_MANUFACTURER); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20a1, "MANUFACTURER = %s.\n", eiter->a.manufacturer); /* Serial number. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER); alen = 0; if (IS_FWI2_CAPABLE(ha)) { alen = qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num, sizeof(eiter->a.serial_num)); } if (!alen) { uint32_t sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; alen = scnprintf( eiter->a.serial_num, sizeof(eiter->a.serial_num), "%c%05d", 'A' + sn / 100000, sn % 100000); } alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20a2, "SERIAL NUMBER = %s.\n", eiter->a.serial_num); /* Model name. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_MODEL); alen = scnprintf( eiter->a.model, sizeof(eiter->a.model), "%s", ha->model_number); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20a3, "MODEL NAME = %s.\n", eiter->a.model); /* Model description. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION); alen = scnprintf( eiter->a.model_desc, sizeof(eiter->a.model_desc), "%s", ha->model_desc); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20a4, "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc); /* Hardware version. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION); alen = 0; if (IS_FWI2_CAPABLE(ha)) { if (!alen) { alen = qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version, sizeof(eiter->a.hw_version)); } if (!alen) { alen = qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version, sizeof(eiter->a.hw_version)); } } if (!alen) { alen = scnprintf( eiter->a.hw_version, sizeof(eiter->a.hw_version), "HW:%s", ha->adapter_id); } alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20a5, "HARDWARE VERSION = %s.\n", eiter->a.hw_version); /* Driver version. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION); alen = scnprintf( eiter->a.driver_version, sizeof(eiter->a.driver_version), "%s", qla2x00_version_str); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20a6, "DRIVER VERSION = %s.\n", eiter->a.driver_version); /* Option ROM version. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); alen = scnprintf( eiter->a.orom_version, sizeof(eiter->a.orom_version), "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20a7, "OPTROM VERSION = %d.%02d.\n", eiter->a.orom_version[1], eiter->a.orom_version[0]); /* Firmware version */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); ha->isp_ops->fw_version_str(vha, eiter->a.fw_version, sizeof(eiter->a.fw_version)); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20a8, "FIRMWARE VERSION = %s.\n", eiter->a.fw_version); /* OS Name and Version */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION); alen = 0; if (p_sysid) { alen = scnprintf( eiter->a.os_version, sizeof(eiter->a.os_version), "%s %s %s", p_sysid->sysname, p_sysid->release, p_sysid->machine); } if (!alen) { alen = scnprintf( eiter->a.os_version, sizeof(eiter->a.os_version), "%s %s", "Linux", fc_host_system_hostname(vha->host)); } alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20a9, "OS VERSION = %s.\n", eiter->a.os_version); if (callopt == CALLOPT_FDMI1) goto done; /* MAX CT Payload Length */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH); eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2); alen = sizeof(eiter->a.max_ct_len); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20aa, "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len)); /* Node Symbolic Name */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME); alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name, sizeof(eiter->a.sym_name)); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20ab, "SYMBOLIC NAME = %s.\n", eiter->a.sym_name); /* Vendor Specific information */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO); eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC); alen = sizeof(eiter->a.vendor_specific_info); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20ac, "VENDOR SPECIFIC INFO = 0x%x.\n", be32_to_cpu(eiter->a.vendor_specific_info)); /* Num Ports */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS); eiter->a.num_ports = cpu_to_be32(1); alen = sizeof(eiter->a.num_ports); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20ad, "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports)); /* Fabric Name */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME); memcpy(eiter->a.fabric_name, vha->fabric_node_name, sizeof(eiter->a.fabric_name)); alen = sizeof(eiter->a.fabric_name); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20ae, "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); /* BIOS Version */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME); alen = scnprintf( eiter->a.bios_name, sizeof(eiter->a.bios_name), "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20af, "BIOS NAME = %s\n", eiter->a.bios_name); /* Vendor Identifier */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER); alen = scnprintf( eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier), "%s", "QLGC"); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20b0, "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier); done: return size; } /** * qla2x00_port_attributes() - perform Port attributes registration * @vha: HA context * @entries: number of entries to use * @callopt: Option to issue extended or standard FDMI * command parameter * * Returns 0 on success. */ static unsigned long qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, unsigned int callopt) { struct qla_hw_data *ha = vha->hw; struct new_utsname *p_sysid = utsname(); char *hostname = p_sysid ? p_sysid->nodename : fc_host_system_hostname(vha->host); struct ct_fdmi_port_attr *eiter; uint16_t alen; unsigned long size = 0; /* FC4 types. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES); eiter->a.fc4_types[0] = 0x00; eiter->a.fc4_types[1] = 0x00; eiter->a.fc4_types[2] = 0x01; eiter->a.fc4_types[3] = 0x00; alen = sizeof(eiter->a.fc4_types); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c0, "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types); if (vha->flags.nvme_enabled) { eiter->a.fc4_types[6] = 1; /* NVMe type 28h */ ql_dbg(ql_dbg_disc, vha, 0x211f, "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n", eiter->a.fc4_types[6]); } /* Supported speed. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); eiter->a.sup_speed = cpu_to_be32( qla25xx_fdmi_port_speed_capability(ha)); alen = sizeof(eiter->a.sup_speed); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c1, "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed)); /* Current speed. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED); eiter->a.cur_speed = cpu_to_be32( qla25xx_fdmi_port_speed_currently(ha)); alen = sizeof(eiter->a.cur_speed); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c2, "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed)); /* Max frame size. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size); alen = sizeof(eiter->a.max_frame_size); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c3, "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size)); /* OS device name. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME); alen = scnprintf( eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name), "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c4, "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name); /* Hostname. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME); if (!*hostname || !strncmp(hostname, "(none)", 6)) hostname = "Linux-default"; alen = scnprintf( eiter->a.host_name, sizeof(eiter->a.host_name), "%s", hostname); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c5, "HOSTNAME = %s.\n", eiter->a.host_name); if (callopt == CALLOPT_FDMI1) goto done; /* Node Name */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME); memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name)); alen = sizeof(eiter->a.node_name); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c6, "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name)); /* Port Name */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_NAME); memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name)); alen = sizeof(eiter->a.port_name); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c7, "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name)); /* Port Symbolic Name */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME); alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name, sizeof(eiter->a.port_sym_name)); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c8, "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name); /* Port Type */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_TYPE); eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE); alen = sizeof(eiter->a.port_type); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c9, "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type)); /* Supported Class of Service */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS); eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3); alen = sizeof(eiter->a.port_supported_cos); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20ca, "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos)); /* Port Fabric Name */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME); memcpy(eiter->a.fabric_name, vha->fabric_node_name, sizeof(eiter->a.fabric_name)); alen = sizeof(eiter->a.fabric_name); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20cb, "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); /* FC4_type */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE); eiter->a.port_fc4_type[0] = 0x00; eiter->a.port_fc4_type[1] = 0x00; eiter->a.port_fc4_type[2] = 0x01; eiter->a.port_fc4_type[3] = 0x00; alen = sizeof(eiter->a.port_fc4_type); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20cc, "PORT ACTIVE FC4 TYPE = %016llx.\n", *(uint64_t *)eiter->a.port_fc4_type); /* Port State */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_STATE); eiter->a.port_state = cpu_to_be32(2); alen = sizeof(eiter->a.port_state); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20cd, "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state)); /* Number of Ports */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_COUNT); eiter->a.num_ports = cpu_to_be32(1); alen = sizeof(eiter->a.num_ports); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20ce, "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports)); /* Port Identifier */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER); eiter->a.port_id = cpu_to_be32(vha->d_id.b24); alen = sizeof(eiter->a.port_id); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20cf, "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id)); if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan) goto done; /* Smart SAN Service Category (Populate Smart SAN Initiator)*/ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE); alen = scnprintf( eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service), "%s", "Smart SAN Initiator"); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20d0, "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service); /* Smart SAN GUID (NWWN+PWWN) */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID); memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE); memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE); alen = sizeof(eiter->a.smartsan_guid); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20d1, "Smart SAN GUID = %016llx-%016llx\n", wwn_to_u64(eiter->a.smartsan_guid), wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE)); /* Smart SAN Version (populate "Smart SAN Version 1.0") */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION); alen = scnprintf( eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version), "%s", "Smart SAN Version 2.0"); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20d2, "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version); /* Smart SAN Product Name (Specify Adapter Model No) */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME); alen = scnprintf(eiter->a.smartsan_prod_name, sizeof(eiter->a.smartsan_prod_name), "ISP%04x", ha->pdev->device); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20d3, "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name); /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO); eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1); alen = sizeof(eiter->a.smartsan_port_info); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20d4, "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info); /* Smart SAN Security Support */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT); eiter->a.smartsan_security_support = cpu_to_be32(1); alen = sizeof(eiter->a.smartsan_security_support); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); size += alen; ql_dbg(ql_dbg_disc, vha, 0x20d6, "SMARTSAN SECURITY SUPPORT = %d\n", be32_to_cpu(eiter->a.smartsan_security_support)); done: return size; } /** * qla2x00_fdmi_rhba() - perform RHBA FDMI registration * @vha: HA context * @callopt: Option to issue FDMI registration * * Returns 0 on success. */ static int qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt) { struct qla_hw_data *ha = vha->hw; unsigned long size = 0; unsigned int rval, count; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; void *entries; count = callopt != CALLOPT_FDMI1 ? FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT; size = RHBA_RSP_SIZE; ql_dbg(ql_dbg_disc, vha, 0x20e0, "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size); /* Request size adjusted after CT preparation */ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); /* Prepare CT request */ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare FDMI command entries */ memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, sizeof(ct_req->req.rhba.hba_identifier)); size += sizeof(ct_req->req.rhba.hba_identifier); ct_req->req.rhba.entry_count = cpu_to_be32(1); size += sizeof(ct_req->req.rhba.entry_count); memcpy(ct_req->req.rhba.port_name, vha->port_name, sizeof(ct_req->req.rhba.port_name)); size += sizeof(ct_req->req.rhba.port_name); /* Attribute count */ ct_req->req.rhba.attrs.count = cpu_to_be32(count); size += sizeof(ct_req->req.rhba.attrs.count); /* Attribute block */ entries = &ct_req->req.rhba.attrs.entry; size += qla2x00_hba_attributes(vha, entries, callopt); /* Update MS request size. */ qla2x00_update_ms_fdmi_iocb(vha, size + 16); ql_dbg(ql_dbg_disc, vha, 0x20e1, "RHBA %016llx %016llx.\n", wwn_to_u64(ct_req->req.rhba.hba_identifier), wwn_to_u64(ct_req->req.rhba.port_name)); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2, entries, size); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(*ha->ms_iocb)); if (rval) { ql_dbg(ql_dbg_disc, vha, 0x20e3, "RHBA iocb failed (%d).\n", rval); return rval; } rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA"); if (rval) { if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && ct_rsp->header.explanation_code == CT_EXPL_ALREADY_REGISTERED) { ql_dbg(ql_dbg_disc, vha, 0x20e4, "RHBA already registered.\n"); return QLA_ALREADY_REGISTERED; } ql_dbg(ql_dbg_disc, vha, 0x20e5, "RHBA failed, CT Reason %#x, CT Explanation %#x\n", ct_rsp->header.reason_code, ct_rsp->header.explanation_code); return rval; } ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n"); return rval; } static int qla2x00_fdmi_dhba(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; /* Issue RPA */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE, DHBA_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare FDMI command arguments -- portname. */ memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); ql_dbg(ql_dbg_disc, vha, 0x2036, "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2037, "DHBA issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_disc, vha, 0x2038, "DHBA exiting normally.\n"); } return rval; } /** * qla2x00_fdmi_rprt() - perform RPRT registration * @vha: HA context * @callopt: Option to issue extended or standard FDMI * command parameter * * Returns 0 on success. */ static int qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt) { struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); struct qla_hw_data *ha = vha->hw; ulong size = 0; uint rval, count; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; void *entries; count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ? FDMI2_SMARTSAN_PORT_ATTR_COUNT : callopt != CALLOPT_FDMI1 ? FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT; size = RPRT_RSP_SIZE; ql_dbg(ql_dbg_disc, vha, 0x20e8, "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size); /* Request size adjusted after CT preparation */ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); /* Prepare CT request */ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare FDMI command entries */ memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name, sizeof(ct_req->req.rprt.hba_identifier)); size += sizeof(ct_req->req.rprt.hba_identifier); memcpy(ct_req->req.rprt.port_name, vha->port_name, sizeof(ct_req->req.rprt.port_name)); size += sizeof(ct_req->req.rprt.port_name); /* Attribute count */ ct_req->req.rprt.attrs.count = cpu_to_be32(count); size += sizeof(ct_req->req.rprt.attrs.count); /* Attribute block */ entries = ct_req->req.rprt.attrs.entry; size += qla2x00_port_attributes(vha, entries, callopt); /* Update MS request size. */ qla2x00_update_ms_fdmi_iocb(vha, size + 16); ql_dbg(ql_dbg_disc, vha, 0x20e9, "RPRT %016llx %016llx.\n", wwn_to_u64(ct_req->req.rprt.port_name), wwn_to_u64(ct_req->req.rprt.port_name)); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea, entries, size); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(*ha->ms_iocb)); if (rval) { ql_dbg(ql_dbg_disc, vha, 0x20eb, "RPRT iocb failed (%d).\n", rval); return rval; } rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT"); if (rval) { if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && ct_rsp->header.explanation_code == CT_EXPL_ALREADY_REGISTERED) { ql_dbg(ql_dbg_disc, vha, 0x20ec, "RPRT already registered.\n"); return QLA_ALREADY_REGISTERED; } ql_dbg(ql_dbg_disc, vha, 0x20ed, "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n", ct_rsp->header.reason_code, ct_rsp->header.explanation_code); return rval; } ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n"); return rval; } /** * qla2x00_fdmi_rpa() - perform RPA registration * @vha: HA context * @callopt: Option to issue FDMI registration * * Returns 0 on success. */ static int qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt) { struct qla_hw_data *ha = vha->hw; ulong size = 0; uint rval, count; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; void *entries; count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ? FDMI2_SMARTSAN_PORT_ATTR_COUNT : callopt != CALLOPT_FDMI1 ? FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT; size = callopt != CALLOPT_FDMI1 ? SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE; ql_dbg(ql_dbg_disc, vha, 0x20f0, "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size); /* Request size adjusted after CT preparation */ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size); /* Prepare CT request */ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare FDMI command entries. */ memcpy(ct_req->req.rpa.port_name, vha->port_name, sizeof(ct_req->req.rpa.port_name)); size += sizeof(ct_req->req.rpa.port_name); /* Attribute count */ ct_req->req.rpa.attrs.count = cpu_to_be32(count); size += sizeof(ct_req->req.rpa.attrs.count); /* Attribute block */ entries = ct_req->req.rpa.attrs.entry; size += qla2x00_port_attributes(vha, entries, callopt); /* Update MS request size. */ qla2x00_update_ms_fdmi_iocb(vha, size + 16); ql_dbg(ql_dbg_disc, vha, 0x20f1, "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name)); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2, entries, size); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(*ha->ms_iocb)); if (rval) { ql_dbg(ql_dbg_disc, vha, 0x20f3, "RPA iocb failed (%d).\n", rval); return rval; } rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA"); if (rval) { if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && ct_rsp->header.explanation_code == CT_EXPL_ALREADY_REGISTERED) { ql_dbg(ql_dbg_disc, vha, 0x20f4, "RPA already registered.\n"); return QLA_ALREADY_REGISTERED; } ql_dbg(ql_dbg_disc, vha, 0x20f5, "RPA failed, CT Reason code: %#x, CT Explanation %#x\n", ct_rsp->header.reason_code, ct_rsp->header.explanation_code); return rval; } ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n"); return rval; } /** * qla2x00_fdmi_register() - * @vha: HA context * * Returns 0 on success. */ int qla2x00_fdmi_register(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLAFX00(ha)) return rval; rval = qla2x00_mgmt_svr_login(vha); if (rval) return rval; /* For npiv/vport send rprt only */ if (vha->vp_idx) { if (ql2xsmartsan) rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN); if (rval || !ql2xsmartsan) rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2); if (rval) rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1); return rval; } /* Try fdmi2 first, if fails then try fdmi1 */ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2); if (rval) { if (rval != QLA_ALREADY_REGISTERED) goto try_fdmi; rval = qla2x00_fdmi_dhba(vha); if (rval) goto try_fdmi; rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2); if (rval) goto try_fdmi; } if (ql2xsmartsan) rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN); if (rval || !ql2xsmartsan) rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2); if (rval) goto try_fdmi; return rval; try_fdmi: rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1); if (rval) { if (rval != QLA_ALREADY_REGISTERED) return rval; rval = qla2x00_fdmi_dhba(vha); if (rval) return rval; rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1); if (rval) return rval; } rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1); return rval; } /** * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query. * @vha: HA context * @list: switch info entries to populate * * Returns 0 on success. */ int qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) { int rval = QLA_SUCCESS; uint16_t i; struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct ct_arg arg; if (!IS_IIDMA_CAPABLE(ha)) return QLA_FUNCTION_FAILED; arg.iocb = ha->ms_iocb; arg.req_dma = ha->ct_sns_dma; arg.rsp_dma = ha->ct_sns_dma; arg.req_size = GFPN_ID_REQ_SIZE; arg.rsp_size = GFPN_ID_RSP_SIZE; arg.nport_handle = NPH_SNS; for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GFPN_ID */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD, GFPN_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2023, "GFPN_ID issue IOCB failed (%d).\n", rval); break; } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GFPN_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; break; } else { /* Save fabric portname */ memcpy(list[i].fabric_port_name, ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE); } /* Last device exit. */ if (list[i].d_id.b.rsvd_1 != 0) break; } return (rval); } static inline struct ct_sns_req * qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) { memset(p, 0, sizeof(struct ct_sns_pkt)); p->p.req.header.revision = 0x01; p->p.req.header.gs_type = 0xFA; p->p.req.header.gs_subtype = 0x01; p->p.req.command = cpu_to_be16(cmd); p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); return &p->p.req; } static uint16_t qla2x00_port_speed_capability(uint16_t speed) { switch (speed) { case BIT_15: return PORT_SPEED_1GB; case BIT_14: return PORT_SPEED_2GB; case BIT_13: return PORT_SPEED_4GB; case BIT_12: return PORT_SPEED_10GB; case BIT_11: return PORT_SPEED_8GB; case BIT_10: return PORT_SPEED_16GB; case BIT_8: return PORT_SPEED_32GB; case BIT_7: return PORT_SPEED_64GB; default: return PORT_SPEED_UNKNOWN; } } /** * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query. * @vha: HA context * @list: switch info entries to populate * * Returns 0 on success. */ int qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) { int rval; uint16_t i; struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct ct_arg arg; if (!IS_IIDMA_CAPABLE(ha)) return QLA_FUNCTION_FAILED; if (!ha->flags.gpsc_supported) return QLA_FUNCTION_FAILED; rval = qla2x00_mgmt_svr_login(vha); if (rval) return rval; arg.iocb = ha->ms_iocb; arg.req_dma = ha->ct_sns_dma; arg.rsp_dma = ha->ct_sns_dma; arg.req_size = GPSC_REQ_SIZE; arg.rsp_size = GPSC_RSP_SIZE; arg.nport_handle = vha->mgmt_svr_loop_id; for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GFPN_ID */ /* Prepare common MS IOCB */ ms_pkt = qla24xx_prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD, GPSC_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_name */ memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name, WWN_SIZE); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2059, "GPSC issue IOCB failed (%d).\n", rval); } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GPSC")) != QLA_SUCCESS) { /* FM command unsupported? */ if (rval == QLA_INVALID_COMMAND && (ct_rsp->header.reason_code == CT_REASON_INVALID_COMMAND_CODE || ct_rsp->header.reason_code == CT_REASON_COMMAND_UNSUPPORTED)) { ql_dbg(ql_dbg_disc, vha, 0x205a, "GPSC command unsupported, disabling " "query.\n"); ha->flags.gpsc_supported = 0; rval = QLA_FUNCTION_FAILED; break; } rval = QLA_FUNCTION_FAILED; } else { list->fp_speed = qla2x00_port_speed_capability( be16_to_cpu(ct_rsp->rsp.gpsc.speed)); ql_dbg(ql_dbg_disc, vha, 0x205b, "GPSC ext entry - fpn " "%8phN speeds=%04x speed=%04x.\n", list[i].fabric_port_name, be16_to_cpu(ct_rsp->rsp.gpsc.speeds), be16_to_cpu(ct_rsp->rsp.gpsc.speed)); } /* Last device exit. */ if (list[i].d_id.b.rsvd_1 != 0) break; } return (rval); } /** * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query. * * @vha: HA context * @list: switch info entries to populate * */ void qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) { int rval; uint16_t i; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; uint8_t fcp_scsi_features = 0, nvme_features = 0; struct ct_arg arg; for (i = 0; i < ha->max_fibre_devices; i++) { /* Set default FC4 Type as UNKNOWN so the default is to * Process this port */ list[i].fc4_type = 0; /* Do not attempt GFF_ID if we are not FWI_2 capable */ if (!IS_FWI2_CAPABLE(ha)) continue; arg.iocb = ha->ms_iocb; arg.req_dma = ha->ct_sns_dma; arg.rsp_dma = ha->ct_sns_dma; arg.req_size = GFF_ID_REQ_SIZE; arg.rsp_size = GFF_ID_RSP_SIZE; arg.nport_handle = NPH_SNS; /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD, GFF_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x205c, "GFF_ID issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GFF_ID") != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x205d, "GFF_ID IOCB status had a failure status code.\n"); } else { fcp_scsi_features = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; fcp_scsi_features &= 0x0f; if (fcp_scsi_features) { list[i].fc4_type = FS_FC4TYPE_FCP; list[i].fc4_features = fcp_scsi_features; } nvme_features = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; nvme_features &= 0xf; if (nvme_features) { list[i].fc4_type |= FS_FC4TYPE_NVME; list[i].fc4_features = nvme_features; } } /* Last device exit. */ if (list[i].d_id.b.rsvd_1 != 0) break; } } int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_GPSC); if (!e) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; return qla2x00_post_work(vha, e); } void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea) { struct fc_port *fcport = ea->fcport; ql_dbg(ql_dbg_disc, vha, 0x20d8, "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id); if (fcport->disc_state == DSC_DELETE_PEND) return; /* We will figure-out what happen after AUTH completes */ if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) return; if (ea->sp->gen2 != fcport->login_gen) { /* target side must have changed it. */ ql_dbg(ql_dbg_disc, vha, 0x20d3, "%s %8phC generation changed\n", __func__, fcport->port_name); return; } else if (ea->sp->gen1 != fcport->rscn_gen) { return; } qla_post_iidma_work(vha, fcport); } static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; fc_port_t *fcport = sp->fcport; struct ct_sns_rsp *ct_rsp; struct event_arg ea; ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; ql_dbg(ql_dbg_disc, vha, 0x2053, "Async done-%s res %x, WWPN %8phC \n", sp->name, res, fcport->port_name); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); if (res == QLA_FUNCTION_TIMEOUT) goto done; if (res == (DID_ERROR << 16)) { /* entry status error */ goto done; } else if (res) { if ((ct_rsp->header.reason_code == CT_REASON_INVALID_COMMAND_CODE) || (ct_rsp->header.reason_code == CT_REASON_COMMAND_UNSUPPORTED)) { ql_dbg(ql_dbg_disc, vha, 0x2019, "GPSC command unsupported, disabling query.\n"); ha->flags.gpsc_supported = 0; goto done; } } else { fcport->fp_speed = qla2x00_port_speed_capability( be16_to_cpu(ct_rsp->rsp.gpsc.speed)); ql_dbg(ql_dbg_disc, vha, 0x2054, "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n", sp->name, fcport->fabric_port_name, be16_to_cpu(ct_rsp->rsp.gpsc.speeds), be16_to_cpu(ct_rsp->rsp.gpsc.speed)); } memset(&ea, 0, sizeof(ea)); ea.rc = res; ea.fcport = fcport; ea.sp = sp; qla24xx_handle_gpsc_event(vha, &ea); done: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) { int rval = QLA_FUNCTION_FAILED; struct ct_sns_req *ct_req; srb_t *sp; if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "gpsc"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla24xx_async_gpsc_sp_done); /* CT_IU preamble */ ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD, GPSC_RSP_SIZE); /* GPSC req */ memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name, WWN_SIZE); sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id; ql_dbg(ql_dbg_disc, vha, 0x205e, "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", sp->name, fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) { struct srb_iocb *c = &sp->u.iocb_cmd; switch (sp->type) { case SRB_ELS_DCMD: qla2x00_els_dcmd2_free(vha, &c->u.els_plogi); break; case SRB_CT_PTHRU_CMD: default: if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; } if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; } break; } /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); } void qla24xx_async_gffid_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; fc_port_t *fcport = sp->fcport; struct ct_sns_rsp *ct_rsp; uint8_t fc4_scsi_feat; uint8_t fc4_nvme_feat; ql_dbg(ql_dbg_disc, vha, 0x2133, "Async done-%s res %x ID %x. %8phC\n", sp->name, res, fcport->d_id.b24, fcport->port_name); ct_rsp = sp->u.iocb_cmd.u.ctarg.rsp; fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; sp->rc = res; /* * FC-GS-7, 5.2.3.12 FC-4 Features - format * The format of the FC-4 Features object, as defined by the FC-4, * Shall be an array of 4-bit values, one for each type code value */ if (!res) { if (fc4_scsi_feat & 0xf) { /* w1 b00:03 */ fcport->fc4_type = FS_FC4TYPE_FCP; fcport->fc4_features = fc4_scsi_feat & 0xf; } if (fc4_nvme_feat & 0xf) { /* w5 [00:03]/28h */ fcport->fc4_type |= FS_FC4TYPE_NVME; fcport->fc4_features = fc4_nvme_feat & 0xf; } } if (sp->flags & SRB_WAKEUP_ON_COMP) { complete(sp->comp); } else { if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; } if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; } /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); /* we should not be here */ dump_stack(); } } /* Get FC4 Feature with Nport ID. */ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool wait) { int rval = QLA_FUNCTION_FAILED; struct ct_sns_req *ct_req; srb_t *sp; DECLARE_COMPLETION_ONSTACK(comp); /* this routine does not have handling for no wait */ if (!vha->flags.online || !wait) return rval; /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) return rval; sp->type = SRB_CT_PTHRU_CMD; sp->name = "gffid"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla24xx_async_gffid_sp_done); sp->comp = &comp; sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout; if (wait) sp->flags = SRB_WAKEUP_ON_COMP; sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.req_allocated_size, &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xd041, "%s: Failed to allocate ct_sns request.\n", __func__); goto done_free_sp; } sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xd041, "%s: Failed to allocate ct_sns response.\n", __func__); goto done_free_sp; } /* CT_IU preamble */ ct_req = qla2x00_prep_ct_req(sp->u.iocb_cmd.u.ctarg.req, GFF_ID_CMD, GFF_ID_RSP_SIZE); ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain; ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area; ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa; sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; goto done_free_sp; } else { ql_dbg(ql_dbg_disc, vha, 0x3074, "Async-%s hdl=%x portid %06x\n", sp->name, sp->handle, fcport->d_id.b24); } wait_for_completion(sp->comp); rval = sp->rc; done_free_sp: if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; } if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; } /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); return rval; } /* GPN_FT + GNN_FT*/ static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn) { struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; unsigned long flags; u64 twwn; int rc = 0; if (!ha->num_vhosts) return 0; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { twwn = wwn_to_u64(vp->port_name); if (wwn == twwn) { rc = 1; break; } } spin_unlock_irqrestore(&ha->vport_slock, flags); return rc; } void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) { fc_port_t *fcport; u32 i, rc; bool found; struct fab_scan_rp *rp, *trp; unsigned long flags; u8 recheck = 0; u16 dup = 0, dup_cnt = 0; ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s enter\n", __func__); if (sp->gen1 != vha->hw->base_qpair->chip_reset) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s scan stop due to chip reset %x/%x\n", sp->name, sp->gen1, vha->hw->base_qpair->chip_reset); goto out; } rc = sp->rc; if (rc) { vha->scan.scan_retry++; if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); goto out; } else { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: Fabric scan failed for %d retries.\n", __func__, vha->scan.scan_retry); /* * Unable to scan any rports. logout loop below * will unregister all sessions. */ list_for_each_entry(fcport, &vha->vp_fcports, list) { if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) { fcport->scan_state = QLA_FCPORT_SCAN; if (fcport->loop_id == FC_NO_LOOP_ID) fcport->logout_on_delete = 0; else fcport->logout_on_delete = 1; } } goto login_logout; } } vha->scan.scan_retry = 0; list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->scan_state = QLA_FCPORT_SCAN; for (i = 0; i < vha->hw->max_fibre_devices; i++) { u64 wwn; int k; rp = &vha->scan.l[i]; found = false; wwn = wwn_to_u64(rp->port_name); if (wwn == 0) continue; /* Remove duplicate NPORT ID entries from switch data base */ for (k = i + 1; k < vha->hw->max_fibre_devices; k++) { trp = &vha->scan.l[k]; if (rp->id.b24 == trp->id.b24) { dup = 1; dup_cnt++; ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n", rp->id.b24, rp->port_name, trp->port_name); memset(trp, 0, sizeof(*trp)); } } if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE)) continue; /* Bypass reserved domain fields. */ if ((rp->id.b.domain & 0xf0) == 0xf0) continue; /* Bypass virtual ports of the same host. */ if (qla2x00_is_a_vp(vha, wwn)) continue; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE)) continue; fcport->scan_state = QLA_FCPORT_FOUND; fcport->last_rscn_gen = fcport->rscn_gen; fcport->fc4_type = rp->fc4type; found = true; if (fcport->scan_needed) { if (NVME_PRIORITY(vha->hw, fcport)) fcport->do_prli_nvme = 1; else fcport->do_prli_nvme = 0; } /* * If device was not a fabric device before. */ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { qla2x00_clear_loop_id(fcport); fcport->flags |= FCF_FABRIC_DEVICE; } else if (fcport->d_id.b24 != rp->id.b24 || (fcport->scan_needed && fcport->port_type != FCT_INITIATOR && fcport->port_type != FCT_NVME_INITIATOR)) { qlt_schedule_sess_for_deletion(fcport); } fcport->d_id.b24 = rp->id.b24; fcport->scan_needed = 0; break; } if (!found) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post new sess\n", __func__, __LINE__, rp->port_name); qla24xx_post_newsess_work(vha, &rp->id, rp->port_name, rp->node_name, NULL, rp->fc4type); } } if (dup) { ql_log(ql_log_warn, vha, 0xffff, "Detected %d duplicate NPORT ID(s) from switch data base\n", dup_cnt); } login_logout: /* * Logout all previous fabric dev marked lost, except FCP2 devices. */ list_for_each_entry(fcport, &vha->vp_fcports, list) { if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { fcport->scan_needed = 0; continue; } if (fcport->scan_state != QLA_FCPORT_FOUND) { bool do_delete = false; if (fcport->scan_needed && fcport->disc_state == DSC_LOGIN_PEND) { /* Cable got disconnected after we sent * a login. Do delete to prevent timeout. */ fcport->logout_on_delete = 1; do_delete = true; } fcport->scan_needed = 0; if (((qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) || do_delete) { if (fcport->loop_id != FC_NO_LOOP_ID) { if (fcport->flags & FCF_FCP2_DEVICE) continue; ql_log(ql_log_warn, vha, 0x20f0, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); fcport->tgt_link_down_time = 0; qlt_schedule_sess_for_deletion(fcport); continue; } } } else { if (fcport->scan_needed || fcport->disc_state != DSC_LOGIN_COMPLETE) { if (fcport->login_retry == 0) { fcport->login_retry = vha->hw->login_retry_count; ql_dbg(ql_dbg_disc, vha, 0x20a3, "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", fcport->port_name, fcport->loop_id, fcport->login_retry); } fcport->scan_needed = 0; qla24xx_fcport_handle_login(vha, fcport); } } } recheck = 1; out: qla24xx_sp_unmap(vha, sp); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); if (recheck) { list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->scan_needed) { set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); break; } } } } static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha, srb_t *sp, int cmd) { struct qla_work_evt *e; if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE) return QLA_PARAMETER_ERROR; e = qla2x00_alloc_work(vha, cmd); if (!e) return QLA_FUNCTION_FAILED; e->u.iosb.sp = sp; return qla2x00_post_work(vha, e); } static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha, srb_t *sp, int cmd) { struct qla_work_evt *e; if (cmd != QLA_EVT_GPNFT) return QLA_PARAMETER_ERROR; e = qla2x00_alloc_work(vha, cmd); if (!e) return QLA_FUNCTION_FAILED; e->u.gpnft.fc4_type = FC4_TYPE_NVME; e->u.gpnft.sp = sp; return qla2x00_post_work(vha, e); } static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, struct srb *sp) { struct qla_hw_data *ha = vha->hw; int num_fibre_dev = ha->max_fibre_devices; struct ct_sns_req *ct_req = (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; struct ct_sns_gpnft_rsp *ct_rsp = (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; struct ct_sns_gpn_ft_data *d; struct fab_scan_rp *rp; u16 cmd = be16_to_cpu(ct_req->command); u8 fc4_type = sp->gen2; int i, j, k; port_id_t id; u8 found; u64 wwn; j = 0; for (i = 0; i < num_fibre_dev; i++) { d = &ct_rsp->entries[i]; id.b.rsvd_1 = 0; id.b.domain = d->port_id[0]; id.b.area = d->port_id[1]; id.b.al_pa = d->port_id[2]; wwn = wwn_to_u64(d->port_name); if (id.b24 == 0 || wwn == 0) continue; if (fc4_type == FC4_TYPE_FCP_SCSI) { if (cmd == GPN_FT_CMD) { rp = &vha->scan.l[j]; rp->id = id; memcpy(rp->port_name, d->port_name, 8); j++; rp->fc4type = FS_FC4TYPE_FCP; } else { for (k = 0; k < num_fibre_dev; k++) { rp = &vha->scan.l[k]; if (id.b24 == rp->id.b24) { memcpy(rp->node_name, d->port_name, 8); break; } } } } else { /* Search if the fibre device supports FC4_TYPE_NVME */ if (cmd == GPN_FT_CMD) { found = 0; for (k = 0; k < num_fibre_dev; k++) { rp = &vha->scan.l[k]; if (!memcmp(rp->port_name, d->port_name, 8)) { /* * Supports FC-NVMe & FCP */ rp->fc4type |= FS_FC4TYPE_NVME; found = 1; break; } } /* We found new FC-NVMe only port */ if (!found) { for (k = 0; k < num_fibre_dev; k++) { rp = &vha->scan.l[k]; if (wwn_to_u64(rp->port_name)) { continue; } else { rp->id = id; memcpy(rp->port_name, d->port_name, 8); rp->fc4type = FS_FC4TYPE_NVME; break; } } } } else { for (k = 0; k < num_fibre_dev; k++) { rp = &vha->scan.l[k]; if (id.b24 == rp->id.b24) { memcpy(rp->node_name, d->port_name, 8); break; } } } } } } static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; struct ct_sns_req *ct_req = (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; u16 cmd = be16_to_cpu(ct_req->command); u8 fc4_type = sp->gen2; unsigned long flags; int rc; /* gen2 field is holding the fc4type */ ql_dbg(ql_dbg_disc, vha, 0xffff, "Async done-%s res %x FC4Type %x\n", sp->name, res, sp->gen2); sp->rc = res; if (res) { unsigned long flags; const char *name = sp->name; if (res == QLA_OS_TIMER_EXPIRED) { /* switch is ignoring all commands. * This might be a zone disable behavior. * This means we hit 64s timeout. * 22s GPNFT + 44s Abort = 64s */ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: Switch Zone check please .\n", name); qla2x00_mark_all_devices_lost(vha); } /* * We are in an Interrupt context, queue up this * sp for GNNFT_DONE work. This will allow all * the resource to get freed up. */ rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, QLA_EVT_GNNFT_DONE); if (rc) { /* Cleanup here to prevent memory leak */ qla24xx_sp_unmap(vha, sp); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; vha->scan.scan_retry++; spin_unlock_irqrestore(&vha->work_lock, flags); if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else { ql_dbg(ql_dbg_disc, vha, 0xffff, "Async done-%s rescan failed on all retries.\n", name); } } return; } qla2x00_find_free_fcp_nvme_slot(vha, sp); if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled && cmd == GNN_FT_CMD) { spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); sp->rc = res; rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT); if (rc) { qla24xx_sp_unmap(vha, sp); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } return; } if (cmd == GPN_FT_CMD) { rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, QLA_EVT_GPNFT_DONE); } else { rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, QLA_EVT_GNNFT_DONE); } if (rc) { qla24xx_sp_unmap(vha, sp); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return; } } /* * Get WWNN list for fc4_type * * It is assumed the same SRB is re-used from GPNFT to avoid * mem free & re-alloc */ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, u8 fc4_type) { int rval = QLA_FUNCTION_FAILED; struct ct_sns_req *ct_req; struct ct_sns_pkt *ct_sns; unsigned long flags; if (!vha->flags.online) { spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); goto done_free_sp; } if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xffff, "%s: req %p rsp %p are not setup\n", __func__, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.rsp); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); WARN_ON(1); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); goto done_free_sp; } ql_dbg(ql_dbg_disc, vha, 0xfffff, "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n", __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size, sp->u.iocb_cmd.u.ctarg.req_size); sp->type = SRB_CT_PTHRU_CMD; sp->name = "gnnft"; sp->gen1 = vha->hw->base_qpair->chip_reset; sp->gen2 = fc4_type; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_gpnft_gnnft_sp_done); memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; /* CT_IU preamble */ ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, sp->u.iocb_cmd.u.ctarg.rsp_size); /* GPN_FT req */ ct_req->req.gpn_ft.port_type = fc4_type; sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s hdl=%x FC4Type %x.\n", sp->name, sp->handle, ct_req->req.gpn_ft.port_type); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { goto done_free_sp; } return rval; done_free_sp: if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; } if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; } /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; if (vha->scan.scan_flags == 0) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__); vha->scan.scan_flags |= SF_QUEUED; schedule_delayed_work(&vha->scan.scan_work, 5); } spin_unlock_irqrestore(&vha->work_lock, flags); return rval; } /* GNNFT */ void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp) { ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s enter\n", __func__); qla24xx_async_gnnft(vha, sp, sp->gen2); } /* Get WWPN list for certain fc4_type */ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) { int rval = QLA_FUNCTION_FAILED; struct ct_sns_req *ct_req; struct ct_sns_pkt *ct_sns; u32 rspsz; unsigned long flags; ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s enter\n", __func__); if (!vha->flags.online) return rval; spin_lock_irqsave(&vha->work_lock, flags); if (vha->scan.scan_flags & SF_SCANNING) { spin_unlock_irqrestore(&vha->work_lock, flags); ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s: scan active\n", __func__); return rval; } vha->scan.scan_flags |= SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); if (fc4_type == FC4_TYPE_FCP_SCSI) { ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s: Performing FCP Scan\n", __func__); if (sp) { /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); } /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) { spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); return rval; } sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { ql_log(ql_log_warn, vha, 0xffff, "Failed to allocate ct_sns request.\n"); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); qla2x00_rel_sp(sp); return rval; } sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; rspsz = sizeof(struct ct_sns_gpnft_rsp) + vha->hw->max_fibre_devices * sizeof(struct ct_sns_gpn_ft_data); sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, rspsz, &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz; if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xffff, "Failed to allocate ct_sns request.\n"); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; /* ref: INIT */ qla2x00_rel_sp(sp); return rval; } sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz; ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s scan list size %d\n", __func__, vha->scan.size); memset(vha->scan.l, 0, vha->scan.size); } else if (!sp) { ql_dbg(ql_dbg_disc, vha, 0xffff, "NVME scan did not provide SP\n"); return rval; } sp->type = SRB_CT_PTHRU_CMD; sp->name = "gpnft"; sp->gen1 = vha->hw->base_qpair->chip_reset; sp->gen2 = fc4_type; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_gpnft_gnnft_sp_done); rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; /* CT_IU preamble */ ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); /* GPN_FT req */ ct_req->req.gpn_ft.port_type = fc4_type; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s hdl=%x FC4Type %x.\n", sp->name, sp->handle, ct_req->req.gpn_ft.port_type); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { goto done_free_sp; } return rval; done_free_sp: if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.req_allocated_size, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; } if (sp->u.iocb_cmd.u.ctarg.rsp) { dma_free_coherent(&vha->hw->pdev->dev, sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, sp->u.iocb_cmd.u.ctarg.rsp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; } /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; if (vha->scan.scan_flags == 0) { ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s: Scan scheduled.\n", __func__); vha->scan.scan_flags |= SF_QUEUED; schedule_delayed_work(&vha->scan.scan_work, 5); } spin_unlock_irqrestore(&vha->work_lock, flags); return rval; } void qla_scan_work_fn(struct work_struct *work) { struct fab_scan *s = container_of(to_delayed_work(work), struct fab_scan, scan_work); struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host, scan); unsigned long flags; ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule loop resync\n", __func__); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_QUEUED; spin_unlock_irqrestore(&vha->work_lock, flags); } /* GPFN_ID */ void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport = ea->fcport; ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, fcport->rscn_gen, ea->sp->gen1, vha->fcport_count); if (fcport->disc_state == DSC_DELETE_PEND) return; if (ea->sp->gen2 != fcport->login_gen) { /* target side must have changed it. */ ql_dbg(ql_dbg_disc, vha, 0x20d3, "%s %8phC generation changed\n", __func__, fcport->port_name); return; } else if (ea->sp->gen1 != fcport->rscn_gen) { return; } qla24xx_post_gpsc_work(vha, fcport); } static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; fc_port_t *fcport = sp->fcport; u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name; struct event_arg ea; u64 wwn; wwn = wwn_to_u64(fpn); if (wwn) memcpy(fcport->fabric_port_name, fpn, WWN_SIZE); memset(&ea, 0, sizeof(ea)); ea.fcport = fcport; ea.sp = sp; ea.rc = res; ql_dbg(ql_dbg_disc, vha, 0x204f, "Async done-%s res %x, WWPN %8phC %8phC\n", sp->name, res, fcport->port_name, fcport->fabric_port_name); qla24xx_handle_gfpnid_event(vha, &ea); /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) { int rval = QLA_FUNCTION_FAILED; struct ct_sns_req *ct_req; srb_t *sp; if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "gfpnid"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_gfpnid_sp_done); /* CT_IU preamble */ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD, GFPN_ID_RSP_SIZE); /* GFPN_ID req */ ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); /* req & rsp use the same buffer */ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", sp->name, fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; int ls; ls = atomic_read(&vha->loop_state); if (((ls != LOOP_READY) && (ls != LOOP_UP)) || test_bit(UNLOADING, &vha->dpc_flags)) return 0; e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID); if (!e) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; return qla2x00_post_work(vha, e); }
linux-master
drivers/scsi/qla2xxx/qla_gs.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include "qla_gbl.h" #include "qla_target.h" #include <linux/moduleparam.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/list.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> #include <linux/delay.h> void qla2x00_vp_stop_timer(scsi_qla_host_t *vha) { if (vha->vp_idx && vha->timer_active) { del_timer_sync(&vha->timer); vha->timer_active = 0; } } static uint32_t qla24xx_allocate_vp_id(scsi_qla_host_t *vha) { uint32_t vp_id; struct qla_hw_data *ha = vha->hw; unsigned long flags; /* Find an empty slot and assign an vp_id */ mutex_lock(&ha->vport_lock); vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); if (vp_id > ha->max_npiv_vports) { ql_dbg(ql_dbg_vport, vha, 0xa000, "vp_id %d is bigger than max-supported %d.\n", vp_id, ha->max_npiv_vports); mutex_unlock(&ha->vport_lock); return vp_id; } set_bit(vp_id, ha->vp_idx_map); ha->num_vhosts++; vha->vp_idx = vp_id; spin_lock_irqsave(&ha->vport_slock, flags); list_add_tail(&vha->list, &ha->vp_list); spin_unlock_irqrestore(&ha->vport_slock, flags); spin_lock_irqsave(&ha->hardware_lock, flags); qla_update_vp_map(vha, SET_VP_IDX); spin_unlock_irqrestore(&ha->hardware_lock, flags); mutex_unlock(&ha->vport_lock); return vp_id; } void qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) { uint16_t vp_id; struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; u32 i, bailout; mutex_lock(&ha->vport_lock); /* * Wait for all pending activities to finish before removing vport from * the list. * Lock needs to be held for safe removal from the list (it * ensures no active vp_list traversal while the vport is removed * from the queue) */ bailout = 0; for (i = 0; i < 500; i++) { spin_lock_irqsave(&ha->vport_slock, flags); if (atomic_read(&vha->vref_count) == 0) { list_del(&vha->list); qla_update_vp_map(vha, RESET_VP_IDX); bailout = 1; } spin_unlock_irqrestore(&ha->vport_slock, flags); if (bailout) break; else msleep(20); } if (!bailout) { ql_log(ql_log_info, vha, 0xfffa, "vha->vref_count=%u timeout\n", vha->vref_count.counter); spin_lock_irqsave(&ha->vport_slock, flags); list_del(&vha->list); qla_update_vp_map(vha, RESET_VP_IDX); spin_unlock_irqrestore(&ha->vport_slock, flags); } vp_id = vha->vp_idx; ha->num_vhosts--; clear_bit(vp_id, ha->vp_idx_map); mutex_unlock(&ha->vport_lock); } static scsi_qla_host_t * qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) { scsi_qla_host_t *vha; struct scsi_qla_host *tvha; unsigned long flags; spin_lock_irqsave(&ha->vport_slock, flags); /* Locate matching device in database. */ list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { if (!memcmp(port_name, vha->port_name, WWN_SIZE)) { spin_unlock_irqrestore(&ha->vport_slock, flags); return vha; } } spin_unlock_irqrestore(&ha->vport_slock, flags); return NULL; } /* * qla2x00_mark_vp_devices_dead * Updates fcport state when device goes offline. * * Input: * ha = adapter block pointer. * fcport = port structure pointer. * * Return: * None. * * Context: */ static void qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) { /* * !!! NOTE !!! * This function, if called in contexts other than vp create, disable * or delete, please make sure this is synchronized with the * delete thread. */ fc_port_t *fcport; list_for_each_entry(fcport, &vha->vp_fcports, list) { ql_dbg(ql_dbg_vport, vha, 0xa001, "Marking port dead, loop_id=0x%04x : %x.\n", fcport->loop_id, fcport->vha->vp_idx); qla2x00_mark_device_lost(vha, fcport, 0); qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); } } int qla24xx_disable_vp(scsi_qla_host_t *vha) { unsigned long flags; int ret = QLA_SUCCESS; fc_port_t *fcport; if (vha->hw->flags.edif_enabled) { if (DBELL_ACTIVE(vha)) qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE, FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN); /* delete sessions and flush sa_indexes */ qla2x00_wait_for_sess_deletion(vha); } if (vha->hw->flags.fw_started) ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->logout_on_delete = 0; if (!vha->hw->flags.edif_enabled) qla2x00_wait_for_sess_deletion(vha); /* Remove port id from vp target map */ spin_lock_irqsave(&vha->hw->hardware_lock, flags); qla_update_vp_map(vha, RESET_AL_PA); spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); qla2x00_mark_vp_devices_dead(vha); atomic_set(&vha->vp_state, VP_FAILED); vha->flags.management_server_logged_in = 0; if (ret == QLA_SUCCESS) { fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED); } else { fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); return -1; } return 0; } int qla24xx_enable_vp(scsi_qla_host_t *vha) { int ret; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); /* Check if physical ha port is Up */ if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || atomic_read(&base_vha->loop_state) == LOOP_DEAD || !(ha->current_topology & ISP_CFG_F)) { vha->vp_err_state = VP_ERR_PORTDWN; fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); ql_dbg(ql_dbg_taskm, vha, 0x800b, "%s skip enable. loop_state %x topo %x\n", __func__, base_vha->loop_state.counter, ha->current_topology); goto enable_failed; } /* Initialize the new vport unless it is a persistent port */ mutex_lock(&ha->vport_lock); ret = qla24xx_modify_vp_config(vha); mutex_unlock(&ha->vport_lock); if (ret != QLA_SUCCESS) { fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); goto enable_failed; } ql_dbg(ql_dbg_taskm, vha, 0x801a, "Virtual port with id: %d - Enabled.\n", vha->vp_idx); return 0; enable_failed: ql_dbg(ql_dbg_taskm, vha, 0x801b, "Virtual port with id: %d - Disabled.\n", vha->vp_idx); return 1; } static void qla24xx_configure_vp(scsi_qla_host_t *vha) { struct fc_vport *fc_vport; int ret; fc_vport = vha->fc_vport; ql_dbg(ql_dbg_vport, vha, 0xa002, "%s: change request #3.\n", __func__); ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); if (ret != QLA_SUCCESS) { ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable " "receiving of RSCN requests: 0x%x.\n", ret); return; } else { /* Corresponds to SCR enabled */ clear_bit(VP_SCR_NEEDED, &vha->vp_flags); } vha->flags.online = 1; if (qla24xx_configure_vhba(vha)) return; atomic_set(&vha->vp_state, VP_ACTIVE); fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); } void qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) { scsi_qla_host_t *vha, *tvp; struct qla_hw_data *ha = rsp->hw; int i = 0; unsigned long flags; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) { if (vha->vp_idx) { if (test_bit(VPORT_DELETE, &vha->dpc_flags)) continue; atomic_inc(&vha->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); switch (mb[0]) { case MBA_LIP_OCCURRED: case MBA_LOOP_UP: case MBA_LOOP_DOWN: case MBA_LIP_RESET: case MBA_POINT_TO_POINT: case MBA_CHG_IN_CONNECTION: ql_dbg(ql_dbg_async, vha, 0x5024, "Async_event for VP[%d], mb=0x%x vha=%p.\n", i, *mb, vha); qla2x00_async_event(vha, rsp, mb); break; case MBA_PORT_UPDATE: case MBA_RSCN_UPDATE: if ((mb[3] & 0xff) == vha->vp_idx) { ql_dbg(ql_dbg_async, vha, 0x5024, "Async_event for VP[%d], mb=0x%x vha=%p\n", i, *mb, vha); qla2x00_async_event(vha, rsp, mb); } break; } spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vha->vref_count); wake_up(&vha->vref_waitq); } i++; } spin_unlock_irqrestore(&ha->vport_slock, flags); } int qla2x00_vp_abort_isp(scsi_qla_host_t *vha) { fc_port_t *fcport; /* * To exclusively reset vport, we need to log it out first. * Note: This control_vp can fail if ISP reset is already * issued, this is expected, as the vp would be already * logged out due to ISP reset. */ if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->logout_on_delete = 0; } /* * Physical port will do most of the abort and recovery work. We can * just treat it as a loop down */ if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); qla2x00_mark_all_devices_lost(vha); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); } ql_dbg(ql_dbg_taskm, vha, 0x801d, "Scheduling enable of Vport %d.\n", vha->vp_idx); return qla24xx_enable_vp(vha); } static int qla2x00_do_dpc_vp(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags); /* Check if Fw is ready to configure VP first */ if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) { if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { /* VP acquired. complete port configuration */ ql_dbg(ql_dbg_dpc, vha, 0x4014, "Configure VP scheduled.\n"); qla24xx_configure_vp(vha); ql_dbg(ql_dbg_dpc, vha, 0x4015, "Configure VP end.\n"); return 0; } } if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) { if (atomic_read(&vha->loop_state) == LOOP_READY) { qla24xx_process_purex_list(&vha->purex_list); clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); } } if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) && !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && atomic_read(&vha->loop_state) != LOOP_DOWN) { if (!vha->relogin_jif || time_after_eq(jiffies, vha->relogin_jif)) { vha->relogin_jif = jiffies + HZ; clear_bit(RELOGIN_NEEDED, &vha->dpc_flags); ql_dbg(ql_dbg_dpc, vha, 0x4018, "Relogin needed scheduled.\n"); qla24xx_post_relogin_work(vha); } } if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) { clear_bit(RESET_ACTIVE, &vha->dpc_flags); } if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { ql_dbg(ql_dbg_dpc, vha, 0x401a, "Loop resync scheduled.\n"); qla2x00_loop_resync(vha); clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); ql_dbg(ql_dbg_dpc, vha, 0x401b, "Loop resync end.\n"); } } ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c, "Exiting %s.\n", __func__); return 0; } void qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp, *tvp; unsigned long flags = 0; if (vha->vp_idx) return; if (list_empty(&ha->vp_list)) return; clear_bit(VP_DPC_NEEDED, &vha->dpc_flags); if (!(ha->current_topology & ISP_CFG_F)) return; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { if (vp->vp_idx) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_do_dpc_vp(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } } spin_unlock_irqrestore(&ha->vport_slock, flags); } int qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) { scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); struct qla_hw_data *ha = base_vha->hw; scsi_qla_host_t *vha; uint8_t port_name[WWN_SIZE]; if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR) return VPCERR_UNSUPPORTED; /* Check up the F/W and H/W support NPIV */ if (!ha->flags.npiv_supported) return VPCERR_UNSUPPORTED; /* Check up whether npiv supported switch presented */ if (!(ha->switch_cap & FLOGI_MID_SUPPORT)) return VPCERR_NO_FABRIC_SUPP; /* Check up unique WWPN */ u64_to_wwn(fc_vport->port_name, port_name); if (!memcmp(port_name, base_vha->port_name, WWN_SIZE)) return VPCERR_BAD_WWN; vha = qla24xx_find_vhost_by_name(ha, port_name); if (vha) return VPCERR_BAD_WWN; /* Check up max-npiv-supports */ if (ha->num_vhosts > ha->max_npiv_vports) { ql_dbg(ql_dbg_vport, vha, 0xa004, "num_vhosts %ud is bigger " "than max_npiv_vports %ud.\n", ha->num_vhosts, ha->max_npiv_vports); return VPCERR_UNSUPPORTED; } return 0; } scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *fc_vport) { scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); struct qla_hw_data *ha = base_vha->hw; scsi_qla_host_t *vha; const struct scsi_host_template *sht = &qla2xxx_driver_template; struct Scsi_Host *host; vha = qla2x00_create_host(sht, ha); if (!vha) { ql_log(ql_log_warn, vha, 0xa005, "scsi_host_alloc() failed for vport.\n"); return(NULL); } host = vha->host; fc_vport->dd_data = vha; /* New host info */ u64_to_wwn(fc_vport->node_name, vha->node_name); u64_to_wwn(fc_vport->port_name, vha->port_name); vha->fc_vport = fc_vport; vha->device_flags = 0; vha->vp_idx = qla24xx_allocate_vp_id(vha); if (vha->vp_idx > ha->max_npiv_vports) { ql_dbg(ql_dbg_vport, vha, 0xa006, "Couldn't allocate vp_id.\n"); goto create_vhost_failed; } vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha); vha->dpc_flags = 0L; ha->dpc_active = 0; set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); /* * To fix the issue of processing a parent's RSCN for the vport before * its SCR is complete. */ set_bit(VP_SCR_NEEDED, &vha->vp_flags); atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_start_timer(vha, WATCH_INTERVAL); vha->req = base_vha->req; vha->flags.nvme_enabled = base_vha->flags.nvme_enabled; host->can_queue = base_vha->req->length + 128; host->cmd_per_lun = 3; if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) host->max_cmd_len = 32; else host->max_cmd_len = MAX_CMDSZ; host->max_channel = MAX_BUSES - 1; host->max_lun = ql2xmaxlun; host->unique_id = host->host_no; host->max_id = ha->max_fibre_devices; host->transportt = qla2xxx_transport_vport_template; ql_dbg(ql_dbg_vport, vha, 0xa007, "Detect vport hba %ld at address = %p.\n", vha->host_no, vha); vha->flags.init_done = 1; mutex_lock(&ha->vport_lock); set_bit(vha->vp_idx, ha->vp_idx_map); ha->cur_vport_count++; mutex_unlock(&ha->vport_lock); return vha; create_vhost_failed: return NULL; } static void qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) { struct qla_hw_data *ha = vha->hw; uint16_t que_id = req->id; dma_free_coherent(&ha->pdev->dev, (req->length + 1) * sizeof(request_t), req->ring, req->dma); req->ring = NULL; req->dma = 0; if (que_id) { ha->req_q_map[que_id] = NULL; mutex_lock(&ha->vport_lock); clear_bit(que_id, ha->req_qid_map); mutex_unlock(&ha->vport_lock); } kfree(req->outstanding_cmds); kfree(req); } static void qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { struct qla_hw_data *ha = vha->hw; uint16_t que_id = rsp->id; if (rsp->msix && rsp->msix->have_irq) { free_irq(rsp->msix->vector, rsp->msix->handle); rsp->msix->have_irq = 0; rsp->msix->in_use = 0; rsp->msix->handle = NULL; } dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), rsp->ring, rsp->dma); rsp->ring = NULL; rsp->dma = 0; if (que_id) { ha->rsp_q_map[que_id] = NULL; mutex_lock(&ha->vport_lock); clear_bit(que_id, ha->rsp_qid_map); mutex_unlock(&ha->vport_lock); } kfree(rsp); } int qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) { int ret = QLA_SUCCESS; if (req && vha->flags.qpairs_req_created) { req->options |= BIT_0; ret = qla25xx_init_req_que(vha, req); if (ret != QLA_SUCCESS) return QLA_FUNCTION_FAILED; qla25xx_free_req_que(vha, req); } return ret; } int qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { int ret = QLA_SUCCESS; if (rsp && vha->flags.qpairs_rsp_created) { rsp->options |= BIT_0; ret = qla25xx_init_rsp_que(vha, rsp); if (ret != QLA_SUCCESS) return QLA_FUNCTION_FAILED; qla25xx_free_rsp_que(vha, rsp); } return ret; } /* Delete all queues for a given vhost */ int qla25xx_delete_queues(struct scsi_qla_host *vha) { int cnt, ret = 0; struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct qla_hw_data *ha = vha->hw; struct qla_qpair *qpair, *tqpair; if (ql2xmqsupport || ql2xnvmeenable) { list_for_each_entry_safe(qpair, tqpair, &vha->qp_list, qp_list_elem) qla2xxx_delete_qpair(vha, qpair); } else { /* Delete request queues */ for (cnt = 1; cnt < ha->max_req_queues; cnt++) { req = ha->req_q_map[cnt]; if (req && test_bit(cnt, ha->req_qid_map)) { ret = qla25xx_delete_req_que(vha, req); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00ea, "Couldn't delete req que %d.\n", req->id); return ret; } } } /* Delete response queues */ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { rsp = ha->rsp_q_map[cnt]; if (rsp && test_bit(cnt, ha->rsp_qid_map)) { ret = qla25xx_delete_rsp_que(vha, rsp); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00eb, "Couldn't delete rsp que %d.\n", rsp->id); return ret; } } } } return ret; } int qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp) { int ret = 0; struct req_que *req = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; device_reg_t *reg; uint32_t cnt; req = kzalloc(sizeof(struct req_que), GFP_KERNEL); if (req == NULL) { ql_log(ql_log_fatal, base_vha, 0x00d9, "Failed to allocate memory for request queue.\n"); goto failed; } req->length = REQUEST_ENTRY_CNT_24XX; req->ring = dma_alloc_coherent(&ha->pdev->dev, (req->length + 1) * sizeof(request_t), &req->dma, GFP_KERNEL); if (req->ring == NULL) { ql_log(ql_log_fatal, base_vha, 0x00da, "Failed to allocate memory for request_ring.\n"); goto que_failed; } ret = qla2x00_alloc_outstanding_cmds(ha, req); if (ret != QLA_SUCCESS) goto que_failed; mutex_lock(&ha->mq_lock); que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); if (que_id >= ha->max_req_queues) { mutex_unlock(&ha->mq_lock); ql_log(ql_log_warn, base_vha, 0x00db, "No resources to create additional request queue.\n"); goto que_failed; } set_bit(que_id, ha->req_qid_map); ha->req_q_map[que_id] = req; req->rid = rid; req->vp_idx = vp_idx; req->qos = qos; ql_dbg(ql_dbg_multiq, base_vha, 0xc002, "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", que_id, req->rid, req->vp_idx, req->qos); ql_dbg(ql_dbg_init, base_vha, 0x00dc, "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", que_id, req->rid, req->vp_idx, req->qos); if (rsp_que < 0) req->rsp = NULL; else req->rsp = ha->rsp_q_map[rsp_que]; /* Use alternate PCI bus number */ if (MSB(req->rid)) options |= BIT_4; /* Use alternate PCI devfn */ if (LSB(req->rid)) options |= BIT_5; req->options = options; ql_dbg(ql_dbg_multiq, base_vha, 0xc003, "options=0x%x.\n", req->options); ql_dbg(ql_dbg_init, base_vha, 0x00dd, "options=0x%x.\n", req->options); for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) req->outstanding_cmds[cnt] = NULL; req->current_outstanding_cmd = 1; req->ring_ptr = req->ring; req->ring_index = 0; req->cnt = req->length; req->id = que_id; reg = ISP_QUE_REG(ha, que_id); req->req_q_in = &reg->isp25mq.req_q_in; req->req_q_out = &reg->isp25mq.req_q_out; req->max_q_depth = ha->req_q_map[0]->max_q_depth; req->out_ptr = (uint16_t *)(req->ring + req->length); mutex_unlock(&ha->mq_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc004, "ring_ptr=%p ring_index=%d, " "cnt=%d id=%d max_q_depth=%d.\n", req->ring_ptr, req->ring_index, req->cnt, req->id, req->max_q_depth); ql_dbg(ql_dbg_init, base_vha, 0x00de, "ring_ptr=%p ring_index=%d, " "cnt=%d id=%d max_q_depth=%d.\n", req->ring_ptr, req->ring_index, req->cnt, req->id, req->max_q_depth); if (startqp) { ret = qla25xx_init_req_que(base_vha, req); if (ret != QLA_SUCCESS) { ql_log(ql_log_fatal, base_vha, 0x00df, "%s failed.\n", __func__); mutex_lock(&ha->mq_lock); clear_bit(que_id, ha->req_qid_map); mutex_unlock(&ha->mq_lock); goto que_failed; } vha->flags.qpairs_req_created = 1; } return req->id; que_failed: qla25xx_free_req_que(base_vha, req); failed: return 0; } static void qla_do_work(struct work_struct *work) { unsigned long flags; struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work); struct scsi_qla_host *vha = qpair->vha; spin_lock_irqsave(&qpair->qp_lock, flags); qla24xx_process_response_queue(vha, qpair->rsp); spin_unlock_irqrestore(&qpair->qp_lock, flags); } /* create response queue */ int qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp) { int ret = 0; struct rsp_que *rsp = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; device_reg_t *reg; rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); if (rsp == NULL) { ql_log(ql_log_warn, base_vha, 0x0066, "Failed to allocate memory for response queue.\n"); goto failed; } rsp->length = RESPONSE_ENTRY_CNT_MQ; rsp->ring = dma_alloc_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), &rsp->dma, GFP_KERNEL); if (rsp->ring == NULL) { ql_log(ql_log_warn, base_vha, 0x00e1, "Failed to allocate memory for response ring.\n"); goto que_failed; } mutex_lock(&ha->mq_lock); que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); if (que_id >= ha->max_rsp_queues) { mutex_unlock(&ha->mq_lock); ql_log(ql_log_warn, base_vha, 0x00e2, "No resources to create additional request queue.\n"); goto que_failed; } set_bit(que_id, ha->rsp_qid_map); rsp->msix = qpair->msix; ha->rsp_q_map[que_id] = rsp; rsp->rid = rid; rsp->vp_idx = vp_idx; rsp->hw = ha; ql_dbg(ql_dbg_init, base_vha, 0x00e4, "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n", que_id, rsp->rid, rsp->vp_idx, rsp->hw); /* Use alternate PCI bus number */ if (MSB(rsp->rid)) options |= BIT_4; /* Use alternate PCI devfn */ if (LSB(rsp->rid)) options |= BIT_5; /* Enable MSIX handshake mode on for uncapable adapters */ if (!IS_MSIX_NACK_CAPABLE(ha)) options |= BIT_6; /* Set option to indicate response queue creation */ options |= BIT_1; rsp->options = options; rsp->id = que_id; reg = ISP_QUE_REG(ha, que_id); rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; rsp->rsp_q_out = &reg->isp25mq.rsp_q_out; rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); mutex_unlock(&ha->mq_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ql_dbg(ql_dbg_init, base_vha, 0x00e5, "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ret = qla25xx_request_irq(ha, qpair, qpair->msix, ha->flags.disable_msix_handshake ? QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS); if (ret) goto que_failed; if (startqp) { ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != QLA_SUCCESS) { ql_log(ql_log_fatal, base_vha, 0x00e7, "%s failed.\n", __func__); mutex_lock(&ha->mq_lock); clear_bit(que_id, ha->rsp_qid_map); mutex_unlock(&ha->mq_lock); goto que_failed; } vha->flags.qpairs_rsp_created = 1; } rsp->req = NULL; qla2x00_init_response_q_entries(rsp); if (qpair->hw->wq) INIT_WORK(&qpair->q_work, qla_do_work); return rsp->id; que_failed: qla25xx_free_rsp_que(base_vha, rsp); failed: return 0; } static void qla_ctrlvp_sp_done(srb_t *sp, int res) { if (sp->comp) complete(sp->comp); /* don't free sp here. Let the caller do the free */ } /** * qla24xx_control_vp() - Enable a virtual port for given host * @vha: adapter block pointer * @cmd: command type to be sent for enable virtual port * * Return: qla2xxx local function return status code. */ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) { int rval = QLA_MEMORY_ALLOC_FAILED; struct qla_hw_data *ha = vha->hw; int vp_index = vha->vp_idx; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); DECLARE_COMPLETION_ONSTACK(comp); srb_t *sp; ql_dbg(ql_dbg_vport, vha, 0x10c1, "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index); if (vp_index == 0 || vp_index >= ha->max_npiv_vports) return QLA_PARAMETER_ERROR; /* ref: INIT */ sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL); if (!sp) return rval; sp->type = SRB_CTRL_VP; sp->name = "ctrl_vp"; sp->comp = &comp; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla_ctrlvp_sp_done); sp->u.iocb_cmd.u.ctrlvp.cmd = cmd; sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index; rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_async, vha, 0xffff, "%s: %s Failed submission. %x.\n", __func__, sp->name, rval); goto done; } ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n", sp->name, sp->handle); wait_for_completion(&comp); sp->comp = NULL; rval = sp->rc; switch (rval) { case QLA_FUNCTION_TIMEOUT: ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n", __func__, sp->name, rval); break; case QLA_SUCCESS: ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n", __func__, sp->name); break; default: ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n", __func__, sp->name, rval); break; } done: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); return rval; } struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, uint16_t vp_idx) { struct qla_hw_data *ha = vha->hw; if (vha->vp_idx == vp_idx) return vha; BUG_ON(ha->vp_map == NULL); if (likely(test_bit(vp_idx, ha->vp_idx_map))) return ha->vp_map[vp_idx].vha; return NULL; } /* vport_slock to be held by the caller */ void qla_update_vp_map(struct scsi_qla_host *vha, int cmd) { void *slot; u32 key; int rc; if (!vha->hw->vp_map) return; key = vha->d_id.b24; switch (cmd) { case SET_VP_IDX: vha->hw->vp_map[vha->vp_idx].vha = vha; break; case SET_AL_PA: slot = btree_lookup32(&vha->hw->host_map, key); if (!slot) { ql_dbg(ql_dbg_disc, vha, 0xf018, "Save vha in host_map %p %06x\n", vha, key); rc = btree_insert32(&vha->hw->host_map, key, vha, GFP_ATOMIC); if (rc) ql_log(ql_log_info, vha, 0xd03e, "Unable to insert s_id into host_map: %06x\n", key); return; } ql_dbg(ql_dbg_disc, vha, 0xf019, "replace existing vha in host_map %p %06x\n", vha, key); btree_update32(&vha->hw->host_map, key, vha); break; case RESET_VP_IDX: vha->hw->vp_map[vha->vp_idx].vha = NULL; break; case RESET_AL_PA: ql_dbg(ql_dbg_disc, vha, 0xf01a, "clear vha in host_map %p %06x\n", vha, key); slot = btree_lookup32(&vha->hw->host_map, key); if (slot) btree_remove32(&vha->hw->host_map, key); vha->d_id.b24 = 0; break; } } void qla_update_host_map(struct scsi_qla_host *vha, port_id_t id) { if (!vha->d_id.b24) { vha->d_id = id; qla_update_vp_map(vha, SET_AL_PA); } else if (vha->d_id.b24 != id.b24) { qla_update_vp_map(vha, RESET_AL_PA); vha->d_id = id; qla_update_vp_map(vha, SET_AL_PA); } } int qla_create_buf_pool(struct scsi_qla_host *vha, struct qla_qpair *qp) { int sz; qp->buf_pool.num_bufs = qp->req->length; sz = BITS_TO_LONGS(qp->req->length); qp->buf_pool.buf_map = kcalloc(sz, sizeof(long), GFP_KERNEL); if (!qp->buf_pool.buf_map) { ql_log(ql_log_warn, vha, 0x0186, "Failed to allocate buf_map(%zd).\n", sz * sizeof(unsigned long)); return -ENOMEM; } sz = qp->req->length * sizeof(void *); qp->buf_pool.buf_array = kcalloc(qp->req->length, sizeof(void *), GFP_KERNEL); if (!qp->buf_pool.buf_array) { ql_log(ql_log_warn, vha, 0x0186, "Failed to allocate buf_array(%d).\n", sz); kfree(qp->buf_pool.buf_map); return -ENOMEM; } sz = qp->req->length * sizeof(dma_addr_t); qp->buf_pool.dma_array = kcalloc(qp->req->length, sizeof(dma_addr_t), GFP_KERNEL); if (!qp->buf_pool.dma_array) { ql_log(ql_log_warn, vha, 0x0186, "Failed to allocate dma_array(%d).\n", sz); kfree(qp->buf_pool.buf_map); kfree(qp->buf_pool.buf_array); return -ENOMEM; } set_bit(0, qp->buf_pool.buf_map); return 0; } void qla_free_buf_pool(struct qla_qpair *qp) { int i; struct qla_hw_data *ha = qp->vha->hw; for (i = 0; i < qp->buf_pool.num_bufs; i++) { if (qp->buf_pool.buf_array[i] && qp->buf_pool.dma_array[i]) dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[i], qp->buf_pool.dma_array[i]); qp->buf_pool.buf_array[i] = NULL; qp->buf_pool.dma_array[i] = 0; } kfree(qp->buf_pool.dma_array); kfree(qp->buf_pool.buf_array); kfree(qp->buf_pool.buf_map); } /* it is assume qp->qp_lock is held at this point */ int qla_get_buf(struct scsi_qla_host *vha, struct qla_qpair *qp, struct qla_buf_dsc *dsc) { u16 tag, i = 0; void *buf; dma_addr_t buf_dma; struct qla_hw_data *ha = vha->hw; dsc->tag = TAG_FREED; again: tag = find_first_zero_bit(qp->buf_pool.buf_map, qp->buf_pool.num_bufs); if (tag >= qp->buf_pool.num_bufs) { ql_dbg(ql_dbg_io, vha, 0x00e2, "qp(%d) ran out of buf resource.\n", qp->id); return -EIO; } if (tag == 0) { set_bit(0, qp->buf_pool.buf_map); i++; if (i == 5) { ql_dbg(ql_dbg_io, vha, 0x00e3, "qp(%d) unable to get tag.\n", qp->id); return -EIO; } goto again; } if (!qp->buf_pool.buf_array[tag]) { buf = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, GFP_ATOMIC, &buf_dma); if (!buf) { ql_log(ql_log_fatal, vha, 0x13b1, "Failed to allocate buf.\n"); return -ENOMEM; } dsc->buf = qp->buf_pool.buf_array[tag] = buf; dsc->buf_dma = qp->buf_pool.dma_array[tag] = buf_dma; qp->buf_pool.num_alloc++; } else { dsc->buf = qp->buf_pool.buf_array[tag]; dsc->buf_dma = qp->buf_pool.dma_array[tag]; memset(dsc->buf, 0, FCP_CMND_DMA_POOL_SIZE); } qp->buf_pool.num_active++; if (qp->buf_pool.num_active > qp->buf_pool.max_used) qp->buf_pool.max_used = qp->buf_pool.num_active; dsc->tag = tag; set_bit(tag, qp->buf_pool.buf_map); return 0; } static void qla_trim_buf(struct qla_qpair *qp, u16 trim) { int i, j; struct qla_hw_data *ha = qp->vha->hw; if (!trim) return; for (i = 0; i < trim; i++) { j = qp->buf_pool.num_alloc - 1; if (test_bit(j, qp->buf_pool.buf_map)) { ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x300b, "QP id(%d): trim active buf[%d]. Remain %d bufs\n", qp->id, j, qp->buf_pool.num_alloc); return; } if (qp->buf_pool.buf_array[j]) { dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[j], qp->buf_pool.dma_array[j]); qp->buf_pool.buf_array[j] = NULL; qp->buf_pool.dma_array[j] = 0; } qp->buf_pool.num_alloc--; if (!qp->buf_pool.num_alloc) break; } ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x3010, "QP id(%d): trimmed %d bufs. Remain %d bufs\n", qp->id, trim, qp->buf_pool.num_alloc); } static void __qla_adjust_buf(struct qla_qpair *qp) { u32 trim; qp->buf_pool.take_snapshot = 0; qp->buf_pool.prev_max = qp->buf_pool.max_used; qp->buf_pool.max_used = qp->buf_pool.num_active; if (qp->buf_pool.prev_max > qp->buf_pool.max_used && qp->buf_pool.num_alloc > qp->buf_pool.max_used) { /* down trend */ trim = qp->buf_pool.num_alloc - qp->buf_pool.max_used; trim = (trim * 10) / 100; trim = trim ? trim : 1; qla_trim_buf(qp, trim); } else if (!qp->buf_pool.prev_max && !qp->buf_pool.max_used) { /* 2 periods of no io */ qla_trim_buf(qp, qp->buf_pool.num_alloc); } } /* it is assume qp->qp_lock is held at this point */ void qla_put_buf(struct qla_qpair *qp, struct qla_buf_dsc *dsc) { if (dsc->tag == TAG_FREED) return; lockdep_assert_held(qp->qp_lock_ptr); clear_bit(dsc->tag, qp->buf_pool.buf_map); qp->buf_pool.num_active--; dsc->tag = TAG_FREED; if (qp->buf_pool.take_snapshot) __qla_adjust_buf(qp); } #define EXPIRE (60 * HZ) void qla_adjust_buf(struct scsi_qla_host *vha) { unsigned long flags; int i; struct qla_qpair *qp; if (vha->vp_idx) return; if (!vha->buf_expired) { vha->buf_expired = jiffies + EXPIRE; return; } if (time_before(jiffies, vha->buf_expired)) return; vha->buf_expired = jiffies + EXPIRE; for (i = 0; i < vha->hw->num_qpairs; i++) { qp = vha->hw->queue_pair_map[i]; if (!qp) continue; if (!qp->buf_pool.num_alloc) continue; if (qp->buf_pool.take_snapshot) { /* no io has gone through in the last EXPIRE period */ spin_lock_irqsave(qp->qp_lock_ptr, flags); __qla_adjust_buf(qp); spin_unlock_irqrestore(qp->qp_lock_ptr, flags); } else { qp->buf_pool.take_snapshot = 1; } } }
linux-master
drivers/scsi/qla2xxx/qla_mid.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include <linux/debugfs.h> #include <linux/seq_file.h> static struct dentry *qla2x00_dfs_root; static atomic_t qla2x00_dfs_root_count; #define QLA_DFS_RPORT_DEVLOSS_TMO 1 static int qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val) { switch (attr_id) { case QLA_DFS_RPORT_DEVLOSS_TMO: /* Only supported for FC-NVMe devices that are registered. */ if (!(fp->nvme_flag & NVME_FLAG_REGISTERED)) return -EIO; *val = fp->nvme_remote_port->dev_loss_tmo; break; default: return -EINVAL; } return 0; } static int qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val) { switch (attr_id) { case QLA_DFS_RPORT_DEVLOSS_TMO: /* Only supported for FC-NVMe devices that are registered. */ if (!(fp->nvme_flag & NVME_FLAG_REGISTERED)) return -EIO; #if (IS_ENABLED(CONFIG_NVME_FC)) return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port, val); #else /* CONFIG_NVME_FC */ return -EINVAL; #endif /* CONFIG_NVME_FC */ default: return -EINVAL; } return 0; } #define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr) \ static int qla_dfs_rport_##_attr##_get(void *data, u64 *val) \ { \ struct fc_port *fp = data; \ return qla_dfs_rport_get(fp, _attr_id, val); \ } \ static int qla_dfs_rport_##_attr##_set(void *data, u64 val) \ { \ struct fc_port *fp = data; \ return qla_dfs_rport_set(fp, _attr_id, val); \ } \ DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops, \ qla_dfs_rport_##_attr##_get, \ qla_dfs_rport_##_attr##_set, "%llu\n") /* * Wrapper for getting fc_port fields. * * _attr : Attribute name. * _get_val : Accessor macro to retrieve the value. */ #define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) \ static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val) \ { \ struct fc_port *fp = data; \ *val = _get_val; \ return 0; \ } \ DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops, \ qla_dfs_rport_field_##_attr##_get, \ NULL, "%llu\n") #define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \ DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) #define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \ DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr) DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo); DEFINE_QLA_DFS_RPORT_FIELD(disc_state); DEFINE_QLA_DFS_RPORT_FIELD(scan_state); DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state); DEFINE_QLA_DFS_RPORT_FIELD(login_pause); DEFINE_QLA_DFS_RPORT_FIELD(flags); DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag); DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen); DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen); DEFINE_QLA_DFS_RPORT_FIELD(login_gen); DEFINE_QLA_DFS_RPORT_FIELD(loop_id); DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24); DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref)); void qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp) { char wwn[32]; #define QLA_CREATE_RPORT_FIELD_ATTR(_attr) \ debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir, \ fp, &qla_dfs_rport_field_##_attr##_fops) if (!vha->dfs_rport_root || fp->dfs_rport_dir) return; sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name)); fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root); if (IS_ERR(fp->dfs_rport_dir)) return; if (NVME_TARGET(vha->hw, fp)) debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir, fp, &qla_dfs_rport_dev_loss_tmo_fops); QLA_CREATE_RPORT_FIELD_ATTR(disc_state); QLA_CREATE_RPORT_FIELD_ATTR(scan_state); QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state); QLA_CREATE_RPORT_FIELD_ATTR(login_pause); QLA_CREATE_RPORT_FIELD_ATTR(flags); QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag); QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen); QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen); QLA_CREATE_RPORT_FIELD_ATTR(login_gen); QLA_CREATE_RPORT_FIELD_ATTR(loop_id); QLA_CREATE_RPORT_FIELD_ATTR(port_id); QLA_CREATE_RPORT_FIELD_ATTR(sess_kref); } void qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp) { if (!vha->dfs_rport_root || !fp->dfs_rport_dir) return; debugfs_remove_recursive(fp->dfs_rport_dir); fp->dfs_rport_dir = NULL; } static int qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) { scsi_qla_host_t *vha = s->private; struct qla_hw_data *ha = vha->hw; unsigned long flags; struct fc_port *sess = NULL; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; seq_printf(s, "%s\n", vha->host_str); if (tgt) { seq_puts(s, "Port ID Port Name Handle\n"); spin_lock_irqsave(&ha->tgt.sess_lock, flags); list_for_each_entry(sess, &vha->vp_fcports, list) seq_printf(s, "%02x:%02x:%02x %8phC %d\n", sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, sess->port_name, sess->loop_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } return 0; } DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess); static int qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) { scsi_qla_host_t *vha = s->private; struct qla_hw_data *ha = vha->hw; struct gid_list_info *gid_list; dma_addr_t gid_list_dma; fc_port_t fc_port; char *id_iter; int rc, i; uint16_t entries, loop_id; seq_printf(s, "%s\n", vha->host_str); gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), &gid_list_dma, GFP_KERNEL); if (!gid_list) { ql_dbg(ql_dbg_user, vha, 0x7018, "DMA allocation failed for %u\n", qla2x00_gid_list_size(ha)); return 0; } rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); if (rc != QLA_SUCCESS) goto out_free_id_list; id_iter = (char *)gid_list; seq_puts(s, "Port Name Port ID Loop ID\n"); for (i = 0; i < entries; i++) { struct gid_list_info *gid = (struct gid_list_info *)id_iter; loop_id = le16_to_cpu(gid->loop_id); memset(&fc_port, 0, sizeof(fc_port_t)); fc_port.loop_id = loop_id; rc = qla24xx_gpdb_wait(vha, &fc_port, 0); seq_printf(s, "%8phC %02x%02x%02x %d\n", fc_port.port_name, fc_port.d_id.b.domain, fc_port.d_id.b.area, fc_port.d_id.b.al_pa, fc_port.loop_id); id_iter += ha->gid_list_info_size; } out_free_id_list: dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), gid_list, gid_list_dma); return 0; } DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database); static int qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) { struct scsi_qla_host *vha = s->private; uint16_t mb[MAX_IOCB_MB_REG]; int rc; struct qla_hw_data *ha = vha->hw; u16 iocbs_used, i, exch_used; rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG); if (rc != QLA_SUCCESS) { seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]); } else { seq_puts(s, "FW Resource count\n\n"); seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]); seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]); seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]); seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]); seq_printf(s, "Current IOCB count[%d]\n", mb[7]); seq_printf(s, "Original IOCB count[%d]\n", mb[10]); seq_printf(s, "MAX VP count[%d]\n", mb[11]); seq_printf(s, "MAX FCF count[%d]\n", mb[12]); seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n", mb[20]); seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n", mb[21]); seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n", mb[22]); seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n", mb[23]); } if (ql2xenforce_iocb_limit) { /* lock is not require. It's an estimate. */ iocbs_used = ha->base_qpair->fwres.iocbs_used; exch_used = ha->base_qpair->fwres.exch_used; for (i = 0; i < ha->max_qpairs; i++) { if (ha->queue_pair_map[i]) { iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used; exch_used += ha->queue_pair_map[i]->fwres.exch_used; } } seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n", iocbs_used, ha->base_qpair->fwres.iocbs_limit); seq_printf(s, "estimate exchange used[%d] high water limit [%d] n", exch_used, ha->base_qpair->fwres.exch_limit); if (ql2xenforce_iocb_limit == 2) { iocbs_used = atomic_read(&ha->fwres.iocb_used); exch_used = atomic_read(&ha->fwres.exch_used); seq_printf(s, " estimate iocb2 used [%d] high water limit [%d]\n", iocbs_used, ha->fwres.iocb_limit); seq_printf(s, " estimate exchange2 used[%d] high water limit [%d] \n", exch_used, ha->fwres.exch_limit); } } return 0; } DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt); static int qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) { struct scsi_qla_host *vha = s->private; struct qla_qpair *qpair = vha->hw->base_qpair; uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio, core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd, num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent; u16 i; fc_port_t *fcport = NULL; if (qla2x00_chip_is_down(vha)) return 0; qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd; core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf; qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio; core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status; qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio; core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd; num_q_full_sent = qpair->tgt_counters.num_q_full_sent; num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed; num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent; for (i = 0; i < vha->hw->max_qpairs; i++) { qpair = vha->hw->queue_pair_map[i]; if (!qpair) continue; qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd; core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf; qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio; core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status; qla_core_ret_sta_ctio += qpair->tgt_counters.qla_core_ret_sta_ctio; core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd; num_q_full_sent += qpair->tgt_counters.num_q_full_sent; num_alloc_iocb_failed += qpair->tgt_counters.num_alloc_iocb_failed; num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent; } seq_puts(s, "Target Counters\n"); seq_printf(s, "qla_core_sbt_cmd = %lld\n", qla_core_sbt_cmd); seq_printf(s, "qla_core_ret_sta_ctio = %lld\n", qla_core_ret_sta_ctio); seq_printf(s, "qla_core_ret_ctio = %lld\n", qla_core_ret_ctio); seq_printf(s, "core_qla_que_buf = %lld\n", core_qla_que_buf); seq_printf(s, "core_qla_snd_status = %lld\n", core_qla_snd_status); seq_printf(s, "core_qla_free_cmd = %lld\n", core_qla_free_cmd); seq_printf(s, "num alloc iocb failed = %lld\n", num_alloc_iocb_failed); seq_printf(s, "num term exchange sent = %lld\n", num_term_xchg_sent); seq_printf(s, "num Q full sent = %lld\n", num_q_full_sent); /* DIF stats */ seq_printf(s, "DIF Inp Bytes = %lld\n", vha->qla_stats.qla_dif_stats.dif_input_bytes); seq_printf(s, "DIF Outp Bytes = %lld\n", vha->qla_stats.qla_dif_stats.dif_output_bytes); seq_printf(s, "DIF Inp Req = %lld\n", vha->qla_stats.qla_dif_stats.dif_input_requests); seq_printf(s, "DIF Outp Req = %lld\n", vha->qla_stats.qla_dif_stats.dif_output_requests); seq_printf(s, "DIF Guard err = %d\n", vha->qla_stats.qla_dif_stats.dif_guard_err); seq_printf(s, "DIF Ref tag err = %d\n", vha->qla_stats.qla_dif_stats.dif_ref_tag_err); seq_printf(s, "DIF App tag err = %d\n", vha->qla_stats.qla_dif_stats.dif_app_tag_err); seq_puts(s, "\n"); seq_puts(s, "Initiator Error Counters\n"); seq_printf(s, "HW Error Count = %14lld\n", vha->hw_err_cnt); seq_printf(s, "Link Down Count = %14lld\n", vha->short_link_down_cnt); seq_printf(s, "Interface Err Count = %14lld\n", vha->interface_err_cnt); seq_printf(s, "Cmd Timeout Count = %14lld\n", vha->cmd_timeout_cnt); seq_printf(s, "Reset Count = %14lld\n", vha->reset_cmd_err_cnt); seq_puts(s, "\n"); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (!fcport->rport) continue; seq_printf(s, "Target Num = %7d Link Down Count = %14lld\n", fcport->rport->number, fcport->tgt_short_link_down_cnt); } seq_puts(s, "\n"); return 0; } DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters); static int qla2x00_dfs_fce_show(struct seq_file *s, void *unused) { scsi_qla_host_t *vha = s->private; uint32_t cnt; uint32_t *fce; uint64_t fce_start; struct qla_hw_data *ha = vha->hw; mutex_lock(&ha->fce_mutex); seq_puts(s, "FCE Trace Buffer\n"); seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); seq_puts(s, "FCE Enable Registers\n"); seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], ha->fce_mb[5], ha->fce_mb[6]); fce = (uint32_t *) ha->fce; fce_start = (unsigned long long) ha->fce_dma; for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) { if (cnt % 8 == 0) seq_printf(s, "\n%llx: ", (unsigned long long)((cnt * 4) + fce_start)); else seq_putc(s, ' '); seq_printf(s, "%08x", *fce++); } seq_puts(s, "\nEnd\n"); mutex_unlock(&ha->fce_mutex); return 0; } static int qla2x00_dfs_fce_open(struct inode *inode, struct file *file) { scsi_qla_host_t *vha = inode->i_private; struct qla_hw_data *ha = vha->hw; int rval; if (!ha->flags.fce_enabled) goto out; mutex_lock(&ha->fce_mutex); /* Pause tracing to flush FCE buffers. */ rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd); if (rval) ql_dbg(ql_dbg_user, vha, 0x705c, "DebugFS: Unable to disable FCE (%d).\n", rval); ha->flags.fce_enabled = 0; mutex_unlock(&ha->fce_mutex); out: return single_open(file, qla2x00_dfs_fce_show, vha); } static int qla2x00_dfs_fce_release(struct inode *inode, struct file *file) { scsi_qla_host_t *vha = inode->i_private; struct qla_hw_data *ha = vha->hw; int rval; if (ha->flags.fce_enabled) goto out; mutex_lock(&ha->fce_mutex); /* Re-enable FCE tracing. */ ha->flags.fce_enabled = 1; memset(ha->fce, 0, fce_calc_size(ha->fce_bufs)); rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs); if (rval) { ql_dbg(ql_dbg_user, vha, 0x700d, "DebugFS: Unable to reinitialize FCE (%d).\n", rval); ha->flags.fce_enabled = 0; } mutex_unlock(&ha->fce_mutex); out: return single_release(inode, file); } static const struct file_operations dfs_fce_ops = { .open = qla2x00_dfs_fce_open, .read = seq_read, .llseek = seq_lseek, .release = qla2x00_dfs_fce_release, }; static int qla_dfs_naqp_show(struct seq_file *s, void *unused) { struct scsi_qla_host *vha = s->private; struct qla_hw_data *ha = vha->hw; seq_printf(s, "%d\n", ha->tgt.num_act_qpairs); return 0; } /* * Helper macros for setting up debugfs entries. * _name: The name of the debugfs entry * _ctx_struct: The context that was passed when creating the debugfs file * * QLA_DFS_SETUP_RD could be used when there is only a show function. * - show function take the name qla_dfs_<sysfs-name>_show * * QLA_DFS_SETUP_RW could be used when there are both show and write functions. * - show function take the name qla_dfs_<sysfs-name>_show * - write function take the name qla_dfs_<sysfs-name>_write * * To have a new debugfs entry, do: * 1. Create a "struct dentry *" in the appropriate structure in the format * dfs_<sysfs-name> * 2. Setup debugfs entries using QLA_DFS_SETUP_RD / QLA_DFS_SETUP_RW * 3. Create debugfs file in qla2x00_dfs_setup() using QLA_DFS_CREATE_FILE * or QLA_DFS_ROOT_CREATE_FILE * 4. Remove debugfs file in qla2x00_dfs_remove() using QLA_DFS_REMOVE_FILE * or QLA_DFS_ROOT_REMOVE_FILE * * Example for creating "TEST" sysfs file: * 1. struct qla_hw_data { ... struct dentry *dfs_TEST; } * 2. QLA_DFS_SETUP_RD(TEST, scsi_qla_host_t); * 3. In qla2x00_dfs_setup(): * QLA_DFS_CREATE_FILE(ha, TEST, 0600, ha->dfs_dir, vha); * 4. In qla2x00_dfs_remove(): * QLA_DFS_REMOVE_FILE(ha, TEST); */ #define QLA_DFS_SETUP_RD(_name, _ctx_struct) \ static int \ qla_dfs_##_name##_open(struct inode *inode, struct file *file) \ { \ _ctx_struct *__ctx = inode->i_private; \ \ return single_open(file, qla_dfs_##_name##_show, __ctx); \ } \ \ static const struct file_operations qla_dfs_##_name##_ops = { \ .open = qla_dfs_##_name##_open, \ .read = seq_read, \ .llseek = seq_lseek, \ .release = single_release, \ }; #define QLA_DFS_SETUP_RW(_name, _ctx_struct) \ static int \ qla_dfs_##_name##_open(struct inode *inode, struct file *file) \ { \ _ctx_struct *__ctx = inode->i_private; \ \ return single_open(file, qla_dfs_##_name##_show, __ctx); \ } \ \ static const struct file_operations qla_dfs_##_name##_ops = { \ .open = qla_dfs_##_name##_open, \ .read = seq_read, \ .llseek = seq_lseek, \ .release = single_release, \ .write = qla_dfs_##_name##_write, \ }; #define QLA_DFS_ROOT_CREATE_FILE(_name, _perm, _ctx) \ do { \ if (!qla_dfs_##_name) \ qla_dfs_##_name = debugfs_create_file(#_name, \ _perm, qla2x00_dfs_root, _ctx, \ &qla_dfs_##_name##_ops); \ } while (0) #define QLA_DFS_ROOT_REMOVE_FILE(_name) \ do { \ if (qla_dfs_##_name) { \ debugfs_remove(qla_dfs_##_name); \ qla_dfs_##_name = NULL; \ } \ } while (0) #define QLA_DFS_CREATE_FILE(_struct, _name, _perm, _parent, _ctx) \ do { \ (_struct)->dfs_##_name = debugfs_create_file(#_name, \ _perm, _parent, _ctx, \ &qla_dfs_##_name##_ops) \ } while (0) #define QLA_DFS_REMOVE_FILE(_struct, _name) \ do { \ if ((_struct)->dfs_##_name) { \ debugfs_remove((_struct)->dfs_##_name); \ (_struct)->dfs_##_name = NULL; \ } \ } while (0) static int qla_dfs_naqp_open(struct inode *inode, struct file *file) { struct scsi_qla_host *vha = inode->i_private; return single_open(file, qla_dfs_naqp_show, vha); } static ssize_t qla_dfs_naqp_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { struct seq_file *s = file->private_data; struct scsi_qla_host *vha = s->private; struct qla_hw_data *ha = vha->hw; char *buf; int rc = 0; unsigned long num_act_qp; if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) { pr_err("host%ld: this adapter does not support Multi Q.", vha->host_no); return -EINVAL; } if (!vha->flags.qpairs_available) { pr_err("host%ld: Driver is not setup with Multi Q.", vha->host_no); return -EINVAL; } buf = memdup_user_nul(buffer, count); if (IS_ERR(buf)) { pr_err("host%ld: fail to copy user buffer.", vha->host_no); return PTR_ERR(buf); } num_act_qp = simple_strtoul(buf, NULL, 0); if (num_act_qp >= vha->hw->max_qpairs) { pr_err("User set invalid number of qpairs %lu. Max = %d", num_act_qp, vha->hw->max_qpairs); rc = -EINVAL; goto out_free; } if (num_act_qp != ha->tgt.num_act_qpairs) { ha->tgt.num_act_qpairs = num_act_qp; qlt_clr_qp_table(vha); } rc = count; out_free: kfree(buf); return rc; } static const struct file_operations dfs_naqp_ops = { .open = qla_dfs_naqp_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = qla_dfs_naqp_write, }; int qla2x00_dfs_setup(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto out; if (!ha->fce) goto out; if (qla2x00_dfs_root) goto create_dir; atomic_set(&qla2x00_dfs_root_count, 0); qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL); create_dir: if (ha->dfs_dir) goto create_nodes; mutex_init(&ha->fce_mutex); ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root); atomic_inc(&qla2x00_dfs_root_count); create_nodes: ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count", S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops); ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR, ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops); ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database", S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops); ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, &dfs_fce_ops); ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess", S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops); if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) { ha->tgt.dfs_naqp = debugfs_create_file("naqp", 0400, ha->dfs_dir, vha, &dfs_naqp_ops); if (IS_ERR(ha->tgt.dfs_naqp)) { ql_log(ql_log_warn, vha, 0xd011, "Unable to create debugFS naqp node.\n"); goto out; } } vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir); if (IS_ERR(vha->dfs_rport_root)) { ql_log(ql_log_warn, vha, 0xd012, "Unable to create debugFS rports node.\n"); goto out; } out: return 0; } int qla2x00_dfs_remove(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (ha->tgt.dfs_naqp) { debugfs_remove(ha->tgt.dfs_naqp); ha->tgt.dfs_naqp = NULL; } if (ha->tgt.dfs_tgt_sess) { debugfs_remove(ha->tgt.dfs_tgt_sess); ha->tgt.dfs_tgt_sess = NULL; } if (ha->tgt.dfs_tgt_port_database) { debugfs_remove(ha->tgt.dfs_tgt_port_database); ha->tgt.dfs_tgt_port_database = NULL; } if (ha->dfs_fw_resource_cnt) { debugfs_remove(ha->dfs_fw_resource_cnt); ha->dfs_fw_resource_cnt = NULL; } if (ha->dfs_tgt_counters) { debugfs_remove(ha->dfs_tgt_counters); ha->dfs_tgt_counters = NULL; } if (ha->dfs_fce) { debugfs_remove(ha->dfs_fce); ha->dfs_fce = NULL; } if (vha->dfs_rport_root) { debugfs_remove_recursive(vha->dfs_rport_root); vha->dfs_rport_root = NULL; } if (ha->dfs_dir) { debugfs_remove(ha->dfs_dir); ha->dfs_dir = NULL; atomic_dec(&qla2x00_dfs_root_count); } if (atomic_read(&qla2x00_dfs_root_count) == 0 && qla2x00_dfs_root) { debugfs_remove(qla2x00_dfs_root); qla2x00_dfs_root = NULL; } return 0; }
linux-master
drivers/scsi/qla2xxx/qla_dfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include "qla_target.h" #include <linux/delay.h> #include <linux/gfp.h> #ifdef CONFIG_PPC #define IS_PPCARCH true #else #define IS_PPCARCH false #endif static struct mb_cmd_name { uint16_t cmd; const char *str; } mb_str[] = { {MBC_GET_PORT_DATABASE, "GPDB"}, {MBC_GET_ID_LIST, "GIDList"}, {MBC_GET_LINK_PRIV_STATS, "Stats"}, {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, }; static const char *mb_to_str(uint16_t cmd) { int i; struct mb_cmd_name *e; for (i = 0; i < ARRAY_SIZE(mb_str); i++) { e = mb_str + i; if (cmd == e->cmd) return e->str; } return "unknown"; } static struct rom_cmd { uint16_t cmd; } rom_cmds[] = { { MBC_LOAD_RAM }, { MBC_EXECUTE_FIRMWARE }, { MBC_READ_RAM_WORD }, { MBC_MAILBOX_REGISTER_TEST }, { MBC_VERIFY_CHECKSUM }, { MBC_GET_FIRMWARE_VERSION }, { MBC_LOAD_RISC_RAM }, { MBC_DUMP_RISC_RAM }, { MBC_LOAD_RISC_RAM_EXTENDED }, { MBC_DUMP_RISC_RAM_EXTENDED }, { MBC_WRITE_RAM_WORD_EXTENDED }, { MBC_READ_RAM_EXTENDED }, { MBC_GET_RESOURCE_COUNTS }, { MBC_SET_FIRMWARE_OPTION }, { MBC_MID_INITIALIZE_FIRMWARE }, { MBC_GET_FIRMWARE_STATE }, { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, { MBC_GET_RETRY_COUNT }, { MBC_TRACE_CONTROL }, { MBC_INITIALIZE_MULTIQ }, { MBC_IOCB_COMMAND_A64 }, { MBC_GET_ADAPTER_LOOP_ID }, { MBC_READ_SFP }, { MBC_SET_RNID_PARAMS }, { MBC_GET_RNID_PARAMS }, { MBC_GET_SET_ZIO_THRESHOLD }, }; static int is_rom_cmd(uint16_t cmd) { int i; struct rom_cmd *wc; for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { wc = rom_cmds + i; if (wc->cmd == cmd) return 1; } return 0; } /* * qla2x00_mailbox_command * Issue mailbox command and waits for completion. * * Input: * ha = adapter block pointer. * mcp = driver internal mbx struct pointer. * * Output: * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. * * Returns: * 0 : QLA_SUCCESS = cmd performed success * 1 : QLA_FUNCTION_FAILED (error encountered) * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) * * Context: * Kernel context. */ static int qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) { int rval, i; unsigned long flags = 0; device_reg_t *reg; uint8_t abort_active, eeh_delay; uint8_t io_lock_on; uint16_t command = 0; uint16_t *iptr; __le16 __iomem *optr; uint32_t cnt; uint32_t mboxes; unsigned long wait_time; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); u32 chip_reset; ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); if (ha->pdev->error_state == pci_channel_io_perm_failure) { ql_log(ql_log_warn, vha, 0x1001, "PCI channel failed permanently, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } if (vha->device_flags & DFLG_DEV_FAILED) { ql_log(ql_log_warn, vha, 0x1002, "Device in failed state, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } /* if PCI error, then avoid mbx processing.*/ if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && test_bit(UNLOADING, &base_vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0xd04e, "PCI error, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } eeh_delay = 0; reg = ha->iobase; io_lock_on = base_vha->flags.init_done; rval = QLA_SUCCESS; abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); chip_reset = ha->chip_reset; if (ha->flags.pci_channel_io_perm_failure) { ql_log(ql_log_warn, vha, 0x1003, "Perm failure on EEH timeout MBX, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; ql_log(ql_log_warn, vha, 0x1004, "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); return QLA_FUNCTION_TIMEOUT; } /* check if ISP abort is active and return cmd with timeout */ if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) { ql_log(ql_log_info, vha, 0x1005, "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", mcp->mb[0]); return QLA_FUNCTION_TIMEOUT; } atomic_inc(&ha->num_pend_mbx_stage1); /* * Wait for active mailbox commands to finish by waiting at most tov * seconds. This is to serialize actual issuing of mailbox cmds during * non ISP abort time. */ if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { /* Timeout occurred. Return error. */ ql_log(ql_log_warn, vha, 0xd035, "Cmd access timeout, cmd=0x%x, Exiting.\n", mcp->mb[0]); vha->hw_err_cnt++; atomic_dec(&ha->num_pend_mbx_stage1); return QLA_FUNCTION_TIMEOUT; } atomic_dec(&ha->num_pend_mbx_stage1); if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || ha->flags.eeh_busy) { ql_log(ql_log_warn, vha, 0xd035, "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n", ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]); rval = QLA_ABORTED; goto premature_exit; } /* Save mailbox command for debug */ ha->mcp = mcp; ql_dbg(ql_dbg_mbx, vha, 0x1006, "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); spin_lock_irqsave(&ha->hardware_lock, flags); if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || ha->flags.mbox_busy) { rval = QLA_ABORTED; spin_unlock_irqrestore(&ha->hardware_lock, flags); goto premature_exit; } ha->flags.mbox_busy = 1; /* Load mailbox registers. */ if (IS_P3P_TYPE(ha)) optr = &reg->isp82.mailbox_in[0]; else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) optr = &reg->isp24.mailbox0; else optr = MAILBOX_REG(ha, &reg->isp, 0); iptr = mcp->mb; command = mcp->mb[0]; mboxes = mcp->out_mb; ql_dbg(ql_dbg_mbx, vha, 0x1111, "Mailbox registers (OUT):\n"); for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (IS_QLA2200(ha) && cnt == 8) optr = MAILBOX_REG(ha, &reg->isp, 8); if (mboxes & BIT_0) { ql_dbg(ql_dbg_mbx, vha, 0x1112, "mbox[%d]<-0x%04x\n", cnt, *iptr); wrt_reg_word(optr, *iptr); } else { wrt_reg_word(optr, 0); } mboxes >>= 1; optr++; iptr++; } ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, "I/O Address = %p.\n", optr); /* Issue set host interrupt command to send cmd out. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); /* Unlock mbx registers and wait for interrupt */ ql_dbg(ql_dbg_mbx, vha, 0x100f, "Going to unlock irq & waiting for interrupts. " "jiffies=%lx.\n", jiffies); /* Wait for mbx cmd completion until timeout */ atomic_inc(&ha->num_pend_mbx_stage2); if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); if (IS_P3P_TYPE(ha)) wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING); else if (IS_FWI2_CAPABLE(ha)) wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT); else wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_time = jiffies; if (!wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ)) { ql_dbg(ql_dbg_mbx, vha, 0x117a, "cmd=%x Timeout.\n", command); spin_lock_irqsave(&ha->hardware_lock, flags); clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (chip_reset != ha->chip_reset) { eeh_delay = ha->flags.eeh_busy ? 1 : 0; spin_lock_irqsave(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, flags); atomic_dec(&ha->num_pend_mbx_stage2); rval = QLA_ABORTED; goto premature_exit; } } else if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { eeh_delay = ha->flags.eeh_busy ? 1 : 0; spin_lock_irqsave(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, flags); atomic_dec(&ha->num_pend_mbx_stage2); rval = QLA_ABORTED; goto premature_exit; } if (time_after(jiffies, wait_time + 5 * HZ)) ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", command, jiffies_to_msecs(jiffies - wait_time)); } else { ql_dbg(ql_dbg_mbx, vha, 0x1011, "Cmd=%x Polling Mode.\n", command); if (IS_P3P_TYPE(ha)) { if (rd_reg_dword(&reg->isp82.hint) & HINT_MBX_INT_PENDING) { ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, flags); atomic_dec(&ha->num_pend_mbx_stage2); ql_dbg(ql_dbg_mbx, vha, 0x1012, "Pending mailbox timeout, exiting.\n"); vha->hw_err_cnt++; rval = QLA_FUNCTION_TIMEOUT; goto premature_exit; } wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING); } else if (IS_FWI2_CAPABLE(ha)) wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT); else wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ while (!ha->flags.mbox_int) { if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { eeh_delay = ha->flags.eeh_busy ? 1 : 0; spin_lock_irqsave(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, flags); atomic_dec(&ha->num_pend_mbx_stage2); rval = QLA_ABORTED; goto premature_exit; } if (time_after(jiffies, wait_time)) break; /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); if (!ha->flags.mbox_int && !(IS_QLA2200(ha) && command == MBC_LOAD_RISC_RAM_EXTENDED)) msleep(10); } /* while */ ql_dbg(ql_dbg_mbx, vha, 0x1013, "Waited %d sec.\n", (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); } atomic_dec(&ha->num_pend_mbx_stage2); /* Check whether we timed out */ if (ha->flags.mbox_int) { uint16_t *iptr2; ql_dbg(ql_dbg_mbx, vha, 0x1014, "Cmd=%x completed.\n", command); /* Got interrupt. Clear the flag. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { spin_lock_irqsave(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; ha->mcp = NULL; rval = QLA_FUNCTION_FAILED; ql_log(ql_log_warn, vha, 0xd048, "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); goto premature_exit; } if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x11ff, "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], MBS_COMMAND_COMPLETE); rval = QLA_FUNCTION_FAILED; } /* Load return mailbox registers. */ iptr2 = mcp->mb; iptr = (uint16_t *)&ha->mailbox_out[0]; mboxes = mcp->in_mb; ql_dbg(ql_dbg_mbx, vha, 0x1113, "Mailbox registers (IN):\n"); for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (mboxes & BIT_0) { *iptr2 = *iptr; ql_dbg(ql_dbg_mbx, vha, 0x1114, "mbox[%d]->0x%04x\n", cnt, *iptr2); } mboxes >>= 1; iptr2++; iptr++; } } else { uint16_t mb[8]; uint32_t ictrl, host_status, hccr; uint16_t w; if (IS_FWI2_CAPABLE(ha)) { mb[0] = rd_reg_word(&reg->isp24.mailbox0); mb[1] = rd_reg_word(&reg->isp24.mailbox1); mb[2] = rd_reg_word(&reg->isp24.mailbox2); mb[3] = rd_reg_word(&reg->isp24.mailbox3); mb[7] = rd_reg_word(&reg->isp24.mailbox7); ictrl = rd_reg_dword(&reg->isp24.ictrl); host_status = rd_reg_dword(&reg->isp24.host_status); hccr = rd_reg_dword(&reg->isp24.hccr); ql_log(ql_log_warn, vha, 0xd04c, "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], mb[7], host_status, hccr); vha->hw_err_cnt++; } else { mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0); ictrl = rd_reg_word(&reg->isp.ictrl); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); vha->hw_err_cnt++; } ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); /* Capture FW dump only, if PCI device active */ if (!pci_channel_offline(vha->hw->pdev)) { pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); if (w == 0xffff || ictrl == 0xffffffff || (chip_reset != ha->chip_reset)) { /* This is special case if there is unload * of driver happening and if PCI device go * into bad state due to PCI error condition * then only PCI ERR flag would be set. * we will do premature exit for above case. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = QLA_FUNCTION_TIMEOUT; goto premature_exit; } /* Attempt to capture firmware dump for further * anallysis of the current formware state. we do not * need to do this if we are intentionally generating * a dump */ if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) qla2xxx_dump_fw(vha); rval = QLA_FUNCTION_TIMEOUT; } } spin_lock_irqsave(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Clean up */ ha->mcp = NULL; if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { ql_dbg(ql_dbg_mbx, vha, 0x101a, "Checking for additional resp interrupt.\n"); /* polling mode for non isp_abort commands. */ qla2x00_poll(ha->rsp_q_map[0]); } if (rval == QLA_FUNCTION_TIMEOUT && mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { if (!io_lock_on || (mcp->flags & IOCTL_CMD) || ha->flags.eeh_busy) { /* not in dpc. schedule it for dpc to take over. */ ql_dbg(ql_dbg_mbx, vha, 0x101b, "Timeout, schedule isp_abort_needed.\n"); if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (IS_QLA82XX(ha)) { ql_dbg(ql_dbg_mbx, vha, 0x112a, "disabling pause transmit on port " "0 & 1.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0| CRB_NIU_XG_PAUSE_CTL_P1); } ql_log(ql_log_info, base_vha, 0x101c, "Mailbox cmd timeout occurred, cmd=0x%x, " "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " "abort.\n", command, mcp->mb[0], ha->flags.eeh_busy); vha->hw_err_cnt++; set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } else if (current == ha->dpc_thread) { /* call abort directly since we are in the DPC thread */ ql_dbg(ql_dbg_mbx, vha, 0x101d, "Timeout, calling abort_isp.\n"); if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (IS_QLA82XX(ha)) { ql_dbg(ql_dbg_mbx, vha, 0x112b, "disabling pause transmit on port " "0 & 1.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0| CRB_NIU_XG_PAUSE_CTL_P1); } ql_log(ql_log_info, base_vha, 0x101e, "Mailbox cmd timeout occurred, cmd=0x%x, " "mb[0]=0x%x. Scheduling ISP abort ", command, mcp->mb[0]); vha->hw_err_cnt++; set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); /* Allow next mbx cmd to come in. */ complete(&ha->mbx_cmd_comp); if (ha->isp_ops->abort_isp(vha) && !ha->flags.eeh_busy) { /* Failed. retry later. */ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); ql_dbg(ql_dbg_mbx, vha, 0x101f, "Finished abort_isp.\n"); goto mbx_done; } } } premature_exit: /* Allow next mbx cmd to come in. */ complete(&ha->mbx_cmd_comp); mbx_done: if (rval == QLA_ABORTED) { ql_log(ql_log_info, vha, 0xd035, "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", mcp->mb[0]); } else if (rval) { if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, dev_name(&ha->pdev->dev), 0x1020+0x800, vha->host_no, rval); mboxes = mcp->in_mb; cnt = 4; for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) if (mboxes & BIT_0) { printk(" mb[%u]=%x", i, mcp->mb[i]); cnt--; } pr_warn(" cmd=%x ****\n", command); } if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { ql_dbg(ql_dbg_mbx, vha, 0x1198, "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", rd_reg_dword(&reg->isp24.host_status), rd_reg_dword(&reg->isp24.ictrl), rd_reg_dword(&reg->isp24.istatus)); } else { ql_dbg(ql_dbg_mbx, vha, 0x1206, "ctrl_status=%#x ictrl=%#x istatus=%#x\n", rd_reg_word(&reg->isp.ctrl_status), rd_reg_word(&reg->isp.ictrl), rd_reg_word(&reg->isp.istatus)); } } else { ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); } i = 500; while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) { /* * The caller of this mailbox encounter pci error. * Hold the thread until PCIE link reset complete to make * sure caller does not unmap dma while recovery is * in progress. */ msleep(1); i--; } return rval; } int qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, uint32_t risc_code_size) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, "Entered %s.\n", __func__); if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_0; } else { mcp->mb[0] = MBC_LOAD_RISC_RAM; mcp->out_mb = MBX_0; } mcp->mb[1] = LSW(risc_addr); mcp->mb[2] = MSW(req_dma); mcp->mb[3] = LSW(req_dma); mcp->mb[6] = MSW(MSD(req_dma)); mcp->mb[7] = LSW(MSD(req_dma)); mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[4] = MSW(risc_code_size); mcp->mb[5] = LSW(risc_code_size); mcp->out_mb |= MBX_5|MBX_4; } else { mcp->mb[4] = LSW(risc_code_size); mcp->out_mb |= MBX_4; } mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1023, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); vha->hw_err_cnt++; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, "Done %s.\n", __func__); } return rval; } #define NVME_ENABLE_FLAG BIT_3 #define EDIF_HW_SUPPORT BIT_10 /* * qla2x00_execute_fw * Start adapter firmware. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; u8 semaphore = 0; #define EXE_FW_FORCE_SEMAPHORE BIT_7 u8 retry = 5; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, "Entered %s.\n", __func__); again: mcp->mb[0] = MBC_EXECUTE_FIRMWARE; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[1] = MSW(risc_addr); mcp->mb[2] = LSW(risc_addr); mcp->mb[3] = 0; mcp->mb[4] = 0; mcp->mb[11] = 0; /* Enable BPM? */ if (ha->flags.lr_detected) { mcp->mb[4] = BIT_0; if (IS_BPM_RANGE_CAPABLE(ha)) mcp->mb[4] |= ha->lr_distance << LR_DIST_FW_POS; } if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) mcp->mb[4] |= NVME_ENABLE_FLAG; if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { struct nvram_81xx *nv = ha->nvram; /* set minimum speed if specified in nvram */ if (nv->min_supported_speed >= 2 && nv->min_supported_speed <= 5) { mcp->mb[4] |= BIT_4; mcp->mb[11] |= nv->min_supported_speed & 0xF; mcp->out_mb |= MBX_11; mcp->in_mb |= BIT_5; vha->min_supported_speed = nv->min_supported_speed; } if (IS_PPCARCH) mcp->mb[11] |= BIT_4; } if (ha->flags.exlogins_enabled) mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; if (ha->flags.exchoffld_enabled) mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; if (semaphore) mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE; mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1; } else { mcp->mb[1] = LSW(risc_addr); mcp->out_mb |= MBX_1; if (IS_QLA2322(ha) || IS_QLA6322(ha)) { mcp->mb[2] = 0; mcp->out_mb |= MBX_2; } } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR && mcp->mb[1] == 0x27 && retry) { semaphore = 1; retry--; ql_dbg(ql_dbg_async, vha, 0x1026, "Exe FW: force semaphore.\n"); goto again; } if (retry) { retry--; ql_dbg(ql_dbg_async, vha, 0x509d, "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry); goto again; } ql_dbg(ql_dbg_mbx, vha, 0x1026, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); vha->hw_err_cnt++; return rval; } if (!IS_FWI2_CAPABLE(ha)) goto done; ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; ql_dbg(ql_dbg_mbx, vha, 0x119a, "fw_ability_mask=%x.\n", ha->fw_ability_mask); ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", ha->max_supported_speed == 0 ? "16Gps" : ha->max_supported_speed == 1 ? "32Gps" : ha->max_supported_speed == 2 ? "64Gps" : "unknown"); if (vha->min_supported_speed) { ha->min_supported_speed = mcp->mb[5] & (BIT_0 | BIT_1 | BIT_2); ql_dbg(ql_dbg_mbx, vha, 0x119c, "min_supported_speed=%s.\n", ha->min_supported_speed == 6 ? "64Gps" : ha->min_supported_speed == 5 ? "32Gps" : ha->min_supported_speed == 4 ? "16Gps" : ha->min_supported_speed == 3 ? "8Gps" : ha->min_supported_speed == 2 ? "4Gps" : "unknown"); } } if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) { ha->flags.edif_hw = 1; ql_log(ql_log_info, vha, 0xffff, "%s: edif HW\n", __func__); } done: ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, "Done %s.\n", __func__); return rval; } /* * qla_get_exlogin_status * Get extended login status * uses the memory offload control/status Mailbox * * Input: * ha: adapter state pointer. * fwopt: firmware options * * Returns: * qla2x00 local function status * * Context: * Kernel context. */ #define FETCH_XLOGINS_STAT 0x8 int qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, uint16_t *ex_logins_cnt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, "Entered %s\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; mcp->mb[1] = FETCH_XLOGINS_STAT; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_10|MBX_4|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); } else { *buf_sz = mcp->mb[4]; *ex_logins_cnt = mcp->mb[10]; ql_log(ql_log_info, vha, 0x1190, "buffer size 0x%x, exchange login count=%d\n", mcp->mb[4], mcp->mb[10]); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, "Done %s.\n", __func__); } return rval; } /* * qla_set_exlogin_mem_cfg * set extended login memory configuration * Mbx needs to be issues before init_cb is set * * Input: * ha: adapter state pointer. * buffer: buffer pointer * phys_addr: physical address of buffer * size: size of buffer * TARGET_QUEUE_LOCK must be released * ADAPTER_STATE_LOCK must be release * * Returns: * qla2x00 local funxtion status code. * * Context: * Kernel context. */ #define CONFIG_XLOGINS_MEM 0x9 int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; mcp->mb[1] = CONFIG_XLOGINS_MEM; mcp->mb[2] = MSW(phys_addr); mcp->mb[3] = LSW(phys_addr); mcp->mb[6] = MSW(MSD(phys_addr)); mcp->mb[7] = LSW(MSD(phys_addr)); mcp->mb[8] = MSW(ha->exlogin_size); mcp->mb[9] = LSW(ha->exlogin_size); mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_11|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x111b, "EXlogin Failed=%x. MB0=%x MB11=%x\n", rval, mcp->mb[0], mcp->mb[11]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, "Done %s.\n", __func__); } return rval; } /* * qla_get_exchoffld_status * Get exchange offload status * uses the memory offload control/status Mailbox * * Input: * ha: adapter state pointer. * fwopt: firmware options * * Returns: * qla2x00 local function status * * Context: * Kernel context. */ #define FETCH_XCHOFFLD_STAT 0x2 int qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, uint16_t *ex_logins_cnt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, "Entered %s\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; mcp->mb[1] = FETCH_XCHOFFLD_STAT; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_10|MBX_4|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); } else { *buf_sz = mcp->mb[4]; *ex_logins_cnt = mcp->mb[10]; ql_log(ql_log_info, vha, 0x118e, "buffer size 0x%x, exchange offload count=%d\n", mcp->mb[4], mcp->mb[10]); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, "Done %s.\n", __func__); } return rval; } /* * qla_set_exchoffld_mem_cfg * Set exchange offload memory configuration * Mbx needs to be issues before init_cb is set * * Input: * ha: adapter state pointer. * buffer: buffer pointer * phys_addr: physical address of buffer * size: size of buffer * TARGET_QUEUE_LOCK must be released * ADAPTER_STATE_LOCK must be release * * Returns: * qla2x00 local funxtion status code. * * Context: * Kernel context. */ #define CONFIG_XCHOFFLD_MEM 0x3 int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; mcp->mb[1] = CONFIG_XCHOFFLD_MEM; mcp->mb[2] = MSW(ha->exchoffld_buf_dma); mcp->mb[3] = LSW(ha->exchoffld_buf_dma); mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); mcp->mb[8] = MSW(ha->exchoffld_size); mcp->mb[9] = LSW(ha->exchoffld_size); mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_11|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_fw_version * Get firmware version. * * Input: * ha: adapter state pointer. * major: pointer for major number. * minor: pointer for minor number. * subminor: pointer for subminor number. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_fw_version(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; mcp->out_mb = MBX_0; mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; if (IS_FWI2_CAPABLE(ha)) mcp->in_mb |= MBX_17|MBX_16|MBX_15; if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->in_mb |= MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; mcp->flags = 0; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) goto failed; /* Return mailbox data. */ ha->fw_major_version = mcp->mb[1]; ha->fw_minor_version = mcp->mb[2]; ha->fw_subminor_version = mcp->mb[3]; ha->fw_attributes = mcp->mb[6]; if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ else ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { ha->mpi_version[0] = mcp->mb[10] & 0xff; ha->mpi_version[1] = mcp->mb[11] >> 8; ha->mpi_version[2] = mcp->mb[11] & 0xff; ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; ha->phy_version[0] = mcp->mb[8] & 0xff; ha->phy_version[1] = mcp->mb[9] >> 8; ha->phy_version[2] = mcp->mb[9] & 0xff; } if (IS_FWI2_CAPABLE(ha)) { ha->fw_attributes_h = mcp->mb[15]; ha->fw_attributes_ext[0] = mcp->mb[16]; ha->fw_attributes_ext[1] = mcp->mb[17]; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", __func__, mcp->mb[15], mcp->mb[6]); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", __func__, mcp->mb[17], mcp->mb[16]); if (ha->fw_attributes_h & 0x4) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, "%s: Firmware supports Extended Login 0x%x\n", __func__, ha->fw_attributes_h); if (ha->fw_attributes_h & 0x8) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, "%s: Firmware supports Exchange Offload 0x%x\n", __func__, ha->fw_attributes_h); /* * FW supports nvme and driver load parameter requested nvme. * BIT 26 of fw_attributes indicates NVMe support. */ if ((ha->fw_attributes_h & (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && ql2xnvmeenable) { if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) vha->flags.nvme_first_burst = 1; vha->flags.nvme_enabled = 1; ql_log(ql_log_info, vha, 0xd302, "%s: FC-NVMe is Enabled (0x%x)\n", __func__, ha->fw_attributes_h); } /* BIT_13 of Extended FW Attributes informs about NVMe2 support */ if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) { ql_log(ql_log_info, vha, 0xd302, "Firmware supports NVMe2 0x%x\n", ha->fw_attributes_ext[0]); vha->flags.nvme2_enabled = 1; } if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable && (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) { ha->flags.edif_enabled = 1; ql_log(ql_log_info, vha, 0xffff, "%s: edif is enabled\n", __func__); } } if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ha->serdes_version[0] = mcp->mb[7] & 0xff; ha->serdes_version[1] = mcp->mb[8] >> 8; ha->serdes_version[2] = mcp->mb[8] & 0xff; ha->mpi_version[0] = mcp->mb[10] & 0xff; ha->mpi_version[1] = mcp->mb[11] >> 8; ha->mpi_version[2] = mcp->mb[11] & 0xff; ha->pep_version[0] = mcp->mb[13] & 0xff; ha->pep_version[1] = mcp->mb[14] >> 8; ha->pep_version[2] = mcp->mb[14] & 0xff; ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; if (IS_QLA28XX(ha)) { if (mcp->mb[16] & BIT_10) ha->flags.secure_fw = 1; ql_log(ql_log_info, vha, 0xffff, "Secure Flash Update in FW: %s\n", (ha->flags.secure_fw) ? "Supported" : "Not Supported"); } if (ha->flags.scm_supported_a && (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) { ha->flags.scm_supported_f = 1; ha->sf_init_cb->flags |= cpu_to_le16(BIT_13); } ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n", (ha->flags.scm_supported_f) ? "Supported" : "Not Supported"); if (vha->flags.nvme2_enabled) { /* set BIT_15 of special feature control block for SLER */ ha->sf_init_cb->flags |= cpu_to_le16(BIT_15); /* set BIT_14 of special feature control block for PI CTRL*/ ha->sf_init_cb->flags |= cpu_to_le16(BIT_14); } } failed: if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_fw_options * Set firmware options. * * Input: * ha = adapter block pointer. * fwopt = pointer for firmware options. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; mcp->out_mb = MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); } else { fwopts[0] = mcp->mb[0]; fwopts[1] = mcp->mb[1]; fwopts[2] = mcp->mb[2]; fwopts[3] = mcp->mb[3]; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, "Done %s.\n", __func__); } return rval; } /* * qla2x00_set_fw_options * Set firmware options. * * Input: * ha = adapter block pointer. * fwopt = pointer for firmware options. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; mcp->mb[1] = fwopts[1]; mcp->mb[2] = fwopts[2]; mcp->mb[3] = fwopts[3]; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->in_mb |= MBX_1; mcp->mb[10] = fwopts[10]; mcp->out_mb |= MBX_10; } else { mcp->mb[10] = fwopts[10]; mcp->mb[11] = fwopts[11]; mcp->mb[12] = 0; /* Undocumented, but used */ mcp->out_mb |= MBX_12|MBX_11|MBX_10; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); fwopts[0] = mcp->mb[0]; if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1030, "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, "Done %s.\n", __func__); } return rval; } /* * qla2x00_mbx_reg_test * Mailbox register wrap test. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_mbx_reg_test(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, "Entered %s.\n", __func__); mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; mcp->mb[1] = 0xAAAA; mcp->mb[2] = 0x5555; mcp->mb[3] = 0xAA55; mcp->mb[4] = 0x55AA; mcp->mb[5] = 0xA5A5; mcp->mb[6] = 0x5A5A; mcp->mb[7] = 0x2525; mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) rval = QLA_FUNCTION_FAILED; if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || mcp->mb[7] != 0x2525) rval = QLA_FUNCTION_FAILED; } if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); vha->hw_err_cnt++; } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, "Done %s.\n", __func__); } return rval; } /* * qla2x00_verify_checksum * Verify firmware checksum. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, "Entered %s.\n", __func__); mcp->mb[0] = MBC_VERIFY_CHECKSUM; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[1] = MSW(risc_addr); mcp->mb[2] = LSW(risc_addr); mcp->out_mb |= MBX_2|MBX_1; mcp->in_mb |= MBX_2|MBX_1; } else { mcp->mb[1] = LSW(risc_addr); mcp->out_mb |= MBX_1; mcp->in_mb |= MBX_1; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1036, "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, "Done %s.\n", __func__); } return rval; } /* * qla2x00_issue_iocb * Issue IOCB using mailbox command * * Input: * ha = adapter state pointer. * buffer = buffer pointer. * phys_addr = physical address of buffer. * size = size of buffer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, size_t size, uint32_t tov) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!vha->hw->flags.fw_started) return QLA_INVALID_COMMAND; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, "Entered %s.\n", __func__); mcp->mb[0] = MBC_IOCB_COMMAND_A64; mcp->mb[1] = 0; mcp->mb[2] = MSW(LSD(phys_addr)); mcp->mb[3] = LSW(LSD(phys_addr)); mcp->mb[6] = MSW(MSD(phys_addr)); mcp->mb[7] = LSW(MSD(phys_addr)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = tov; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); } else { sts_entry_t *sts_entry = buffer; /* Mask reserved bits. */ sts_entry->entry_status &= IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, "Done %s (status=%x).\n", __func__, sts_entry->entry_status); } return rval; } int qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, size_t size) { return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, MBX_TOV_SECONDS); } /* * qla2x00_abort_command * Abort command aborts a specified IOCB. * * Input: * ha = adapter block pointer. * sp = SB structure pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_abort_command(srb_t *sp) { unsigned long flags = 0; int rval; uint32_t handle = 0; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; fc_port_t *fcport = sp->fcport; scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req; struct scsi_cmnd *cmd = GET_CMD_SP(sp); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, "Entered %s.\n", __func__); if (sp->qpair) req = sp->qpair->req; else req = vha->req; spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < req->num_outstanding_cmds; handle++) { if (req->outstanding_cmds[handle] == sp) break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (handle == req->num_outstanding_cmds) { /* command not found */ return QLA_FUNCTION_FAILED; } mcp->mb[0] = MBC_ABORT_COMMAND; if (HAS_EXTENDED_IDS(ha)) mcp->mb[1] = fcport->loop_id; else mcp->mb[1] = fcport->loop_id << 8; mcp->mb[2] = (uint16_t)handle; mcp->mb[3] = (uint16_t)(handle >> 16); mcp->mb[6] = (uint16_t)cmd->device->lun; mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, "Done %s.\n", __func__); } return rval; } int qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) { int rval, rval2; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; scsi_qla_host_t *vha; vha = fcport->vha; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, "Entered %s.\n", __func__); mcp->mb[0] = MBC_ABORT_TARGET; mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = 0; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = fcport->loop_id << 8; } mcp->mb[2] = vha->hw->loop_reset_delay; mcp->mb[9] = vha->vp_idx; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, "Failed=%x.\n", rval); } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, MK_SYNC_ID); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1040, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, "Done %s.\n", __func__); } return rval; } int qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) { int rval, rval2; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; scsi_qla_host_t *vha; vha = fcport->vha; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LUN_RESET; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) mcp->mb[1] = fcport->loop_id; else mcp->mb[1] = fcport->loop_id << 8; mcp->mb[2] = (u32)l; mcp->mb[3] = 0; mcp->mb[9] = vha->vp_idx; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, MK_SYNC_ID_LUN); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1044, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_adapter_id * Get adapter ID and topology. * * Input: * ha = adapter block pointer. * id = pointer for loop ID. * al_pa = pointer for AL_PA. * area = pointer for area. * domain = pointer for domain. * top = pointer for topology. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_0; mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_CNA_CAPABLE(vha->hw)) mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; if (IS_FWI2_CAPABLE(vha->hw)) mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (mcp->mb[0] == MBS_COMMAND_ERROR) rval = QLA_COMMAND_ERROR; else if (mcp->mb[0] == MBS_INVALID_COMMAND) rval = QLA_INVALID_COMMAND; /* Return data. */ *id = mcp->mb[1]; *al_pa = LSB(mcp->mb[2]); *area = MSB(mcp->mb[2]); *domain = LSB(mcp->mb[3]); *top = mcp->mb[6]; *sw_cap = mcp->mb[7]; if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, "Done %s.\n", __func__); if (IS_CNA_CAPABLE(vha->hw)) { vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; vha->fcoe_fcf_idx = mcp->mb[10]; vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; } /* If FA-WWN supported */ if (IS_FAWWN_CAPABLE(vha->hw)) { if (mcp->mb[7] & BIT_14) { vha->port_name[0] = MSB(mcp->mb[16]); vha->port_name[1] = LSB(mcp->mb[16]); vha->port_name[2] = MSB(mcp->mb[17]); vha->port_name[3] = LSB(mcp->mb[17]); vha->port_name[4] = MSB(mcp->mb[18]); vha->port_name[5] = LSB(mcp->mb[18]); vha->port_name[6] = MSB(mcp->mb[19]); vha->port_name[7] = LSB(mcp->mb[19]); fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); ql_dbg(ql_dbg_mbx, vha, 0x10ca, "FA-WWN acquired %016llx\n", wwn_to_u64(vha->port_name)); } } if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { vha->bbcr = mcp->mb[15]; if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) { ql_log(ql_log_info, vha, 0x11a4, "SCM: EDC ELS completed, flags 0x%x\n", mcp->mb[21]); } if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) { vha->hw->flags.scm_enabled = 1; vha->scm_fabric_connection_flags |= SCM_FLAG_RDF_COMPLETED; ql_log(ql_log_info, vha, 0x11a5, "SCM: RDF ELS completed, flags 0x%x\n", mcp->mb[23]); } } } return rval; } /* * qla2x00_get_retry_cnt * Get current firmware login retry count and delay. * * Input: * ha = adapter block pointer. * retry_cnt = pointer to login retry count. * tov = pointer to login timeout value. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, uint16_t *r_a_tov) { int rval; uint16_t ratov; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RETRY_COUNT; mcp->out_mb = MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x104a, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /* Convert returned data and check our values. */ *r_a_tov = mcp->mb[3] / 2; ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { /* Update to the larger values */ *retry_cnt = (uint8_t)mcp->mb[1]; *tov = ratov; } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); } return rval; } /* * qla2x00_init_firmware * Initialize adapter firmware. * * Input: * ha = adapter block pointer. * dptr = Initialization control block pointer. * size = size of initialization control block. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, "Entered %s.\n", __func__); if (IS_P3P_TYPE(ha) && ql2xdbwr) qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); if (ha->flags.npiv_supported) mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; else mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; mcp->mb[1] = 0; mcp->mb[2] = MSW(ha->init_cb_dma); mcp->mb[3] = LSW(ha->init_cb_dma); mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { mcp->mb[1] = BIT_0; mcp->mb[10] = MSW(ha->ex_init_cb_dma); mcp->mb[11] = LSW(ha->ex_init_cb_dma); mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); mcp->mb[14] = sizeof(*ha->ex_init_cb); mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; } if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) { mcp->mb[1] |= BIT_1; mcp->mb[16] = MSW(ha->sf_init_cb_dma); mcp->mb[17] = LSW(ha->sf_init_cb_dma); mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma)); mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma)); mcp->mb[15] = sizeof(*ha->sf_init_cb); mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15; } /* 1 and 2 should normally be captured. */ mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) /* mb3 is additional info about the installed SFP. */ mcp->in_mb |= MBX_3; mcp->buf_size = size; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x104d, "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); if (ha->init_cb) { ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); } if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); } } else { if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (mcp->mb[2] == 6 || mcp->mb[3] == 2) ql_dbg(ql_dbg_mbx, vha, 0x119d, "Invalid SFP/Validation Failed\n"); } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_port_database * Issue normal/enhanced get port database mailbox command * and copy device name as necessary. * * Input: * ha = adapter state pointer. * dev = structure pointer. * opt = enhanced cmd option byte. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; port_database_t *pd; struct port_database_24xx *pd24; dma_addr_t pd_dma; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, "Entered %s.\n", __func__); pd24 = NULL; pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { ql_log(ql_log_warn, vha, 0x1050, "Failed to allocate port database structure.\n"); fcport->query = 0; return QLA_MEMORY_ALLOC_FAILED; } mcp->mb[0] = MBC_GET_PORT_DATABASE; if (opt != 0 && !IS_FWI2_CAPABLE(ha)) mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; mcp->mb[2] = MSW(pd_dma); mcp->mb[3] = LSW(pd_dma); mcp->mb[6] = MSW(MSD(pd_dma)); mcp->mb[7] = LSW(MSD(pd_dma)); mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10|MBX_1; mcp->in_mb |= MBX_1; } else if (HAS_EXTENDED_IDS(ha)) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10|MBX_1; } else { mcp->mb[1] = fcport->loop_id << 8 | opt; mcp->out_mb |= MBX_1; } mcp->buf_size = IS_FWI2_CAPABLE(ha) ? PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; mcp->flags = MBX_DMA_IN; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) goto gpd_error_out; if (IS_FWI2_CAPABLE(ha)) { uint64_t zero = 0; u8 current_login_state, last_login_state; pd24 = (struct port_database_24xx *) pd; /* Check for logged in state. */ if (NVME_TARGET(ha, fcport)) { current_login_state = pd24->current_login_state >> 4; last_login_state = pd24->last_login_state >> 4; } else { current_login_state = pd24->current_login_state & 0xf; last_login_state = pd24->last_login_state & 0xf; } fcport->current_login_state = pd24->current_login_state; fcport->last_login_state = pd24->last_login_state; /* Check for logged in state. */ if (current_login_state != PDS_PRLI_COMPLETE && last_login_state != PDS_PRLI_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x119a, "Unable to verify login-state (%x/%x) for loop_id %x.\n", current_login_state, last_login_state, fcport->loop_id); rval = QLA_FUNCTION_FAILED; if (!fcport->query) goto gpd_error_out; } if (fcport->loop_id == FC_NO_LOOP_ID || (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && memcmp(fcport->port_name, pd24->port_name, 8))) { /* We lost the device mid way. */ rval = QLA_NOT_LOGGED_IN; goto gpd_error_out; } /* Names are little-endian. */ memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); /* Get port_id of device. */ fcport->d_id.b.domain = pd24->port_id[0]; fcport->d_id.b.area = pd24->port_id[1]; fcport->d_id.b.al_pa = pd24->port_id[2]; fcport->d_id.b.rsvd_1 = 0; /* If not target must be initiator or unknown type. */ if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; /* Passback COS information. */ fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? FC_COS_CLASS2 : FC_COS_CLASS3; if (pd24->prli_svc_param_word_3[0] & BIT_7) fcport->flags |= FCF_CONF_COMP_SUPPORTED; } else { uint64_t zero = 0; /* Check for logged in state. */ if (pd->master_state != PD_STATE_PORT_LOGGED_IN && pd->slave_state != PD_STATE_PORT_LOGGED_IN) { ql_dbg(ql_dbg_mbx, vha, 0x100a, "Unable to verify login-state (%x/%x) - " "portid=%02x%02x%02x.\n", pd->master_state, pd->slave_state, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } if (fcport->loop_id == FC_NO_LOOP_ID || (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && memcmp(fcport->port_name, pd->port_name, 8))) { /* We lost the device mid way. */ rval = QLA_NOT_LOGGED_IN; goto gpd_error_out; } /* Names are little-endian. */ memcpy(fcport->node_name, pd->node_name, WWN_SIZE); memcpy(fcport->port_name, pd->port_name, WWN_SIZE); /* Get port_id of device. */ fcport->d_id.b.domain = pd->port_id[0]; fcport->d_id.b.area = pd->port_id[3]; fcport->d_id.b.al_pa = pd->port_id[2]; fcport->d_id.b.rsvd_1 = 0; /* If not target must be initiator or unknown type. */ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; /* Passback COS information. */ fcport->supported_classes = (pd->options & BIT_4) ? FC_COS_CLASS2 : FC_COS_CLASS3; } gpd_error_out: dma_pool_free(ha->s_dma_pool, pd, pd_dma); fcport->query = 0; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1052, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, "Done %s.\n", __func__); } return rval; } int qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, struct port_database_24xx *pdb) { mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; dma_addr_t pdb_dma; int rval; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115, "Entered %s.\n", __func__); memset(pdb, 0, sizeof(*pdb)); pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, sizeof(*pdb), DMA_FROM_DEVICE); if (!pdb_dma) { ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); return QLA_MEMORY_ALLOC_FAILED; } mcp->mb[0] = MBC_GET_PORT_DATABASE; mcp->mb[1] = nport_handle; mcp->mb[2] = MSW(LSD(pdb_dma)); mcp->mb[3] = LSW(LSD(pdb_dma)); mcp->mb[6] = MSW(MSD(pdb_dma)); mcp->mb[7] = LSW(MSD(pdb_dma)); mcp->mb[9] = 0; mcp->mb[10] = 0; mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->buf_size = sizeof(*pdb); mcp->flags = MBX_DMA_IN; mcp->tov = vha->hw->login_timeout * 2; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x111a, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b, "Done %s.\n", __func__); } dma_unmap_single(&vha->hw->pdev->dev, pdb_dma, sizeof(*pdb), DMA_FROM_DEVICE); return rval; } /* * qla2x00_get_firmware_state * Get adapter firmware state. * * Input: * ha = adapter block pointer. * dptr = pointer for firmware state. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, "Entered %s.\n", __func__); if (!ha->flags.fw_started) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_FIRMWARE_STATE; mcp->out_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; else mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return firmware states. */ states[0] = mcp->mb[1]; if (IS_FWI2_CAPABLE(vha->hw)) { states[1] = mcp->mb[2]; states[2] = mcp->mb[3]; /* SFP info */ states[3] = mcp->mb[4]; states[4] = mcp->mb[5]; states[5] = mcp->mb[6]; /* DPORT status */ } if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); } else { if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (mcp->mb[2] == 6 || mcp->mb[3] == 2) ql_dbg(ql_dbg_mbx, vha, 0x119e, "Invalid SFP/Validation Failed\n"); } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_port_name * Issue get port name mailbox command. * Returned name is in big endian format. * * Input: * ha = adapter block pointer. * loop_id = loop ID of device. * name = pointer for name. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_PORT_NAME; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = loop_id << 8 | opt; } mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); } else { if (name != NULL) { /* This function returns name in big endian. */ name[0] = MSB(mcp->mb[2]); name[1] = LSB(mcp->mb[2]); name[2] = MSB(mcp->mb[3]); name[3] = LSB(mcp->mb[3]); name[4] = MSB(mcp->mb[6]); name[5] = LSB(mcp->mb[6]); name[6] = MSB(mcp->mb[7]); name[7] = LSB(mcp->mb[7]); } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, "Done %s.\n", __func__); } return rval; } /* * qla24xx_link_initialization * Issue link initialization mailbox command. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla24xx_link_initialize(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_LINK_INITIALIZATION; mcp->mb[1] = BIT_4; if (vha->hw->operating_mode == LOOP) mcp->mb[1] |= BIT_6; else mcp->mb[1] |= BIT_5; mcp->mb[2] = 0; mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, "Done %s.\n", __func__); } return rval; } /* * qla2x00_lip_reset * Issue LIP reset mailbox command. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_lip_reset(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_disc, vha, 0x105a, "Entered %s.\n", __func__); if (IS_CNA_CAPABLE(vha->hw)) { /* Logout across all FCFs. */ mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = BIT_1; mcp->mb[2] = 0; mcp->out_mb = MBX_2|MBX_1|MBX_0; } else if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = BIT_4; mcp->mb[2] = 0; mcp->mb[3] = vha->hw->loop_reset_delay; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; } else { mcp->mb[0] = MBC_LIP_RESET; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = 0x00ff; mcp->mb[10] = 0; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = 0xff00; } mcp->mb[2] = vha->hw->loop_reset_delay; mcp->mb[3] = 0; } mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, "Done %s.\n", __func__); } return rval; } /* * qla2x00_send_sns * Send SNS command. * * Input: * ha = adapter block pointer. * sns = pointer for command. * cmd_size = command size. * buf_size = response/command size. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, uint16_t cmd_size, size_t buf_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, "Entered %s.\n", __func__); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, "Retry cnt=%d ratov=%d total tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); mcp->mb[0] = MBC_SEND_SNS_COMMAND; mcp->mb[1] = cmd_size; mcp->mb[2] = MSW(sns_phys_address); mcp->mb[3] = LSW(sns_phys_address); mcp->mb[6] = MSW(MSD(sns_phys_address)); mcp->mb[7] = LSW(MSD(sns_phys_address)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0|MBX_1; mcp->buf_size = buf_size; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x105f, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, "Done %s.\n", __func__); } return rval; } int qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) { int rval; struct logio_entry_24xx *lg; dma_addr_t lg_dma; uint32_t iop[2]; struct qla_hw_data *ha = vha->hw; struct req_que *req; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, "Entered %s.\n", __func__); if (vha->vp_idx && vha->qpair) req = vha->qpair->req; else req = ha->req_q_map[0]; lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { ql_log(ql_log_warn, vha, 0x1062, "Failed to allocate login IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; lg->handle = make_handle(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); if (opt & BIT_0) lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); if (opt & BIT_1) lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; lg->vp_index = vha->vp_idx; rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, (ha->r_a_tov / 10 * 2) + 2); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1063, "Failed to issue login IOCB (%x).\n", rval); } else if (lg->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x1064, "Failed to complete IOCB -- error status (%x).\n", lg->entry_status); rval = QLA_FUNCTION_FAILED; } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { iop[0] = le32_to_cpu(lg->io_parameter[0]); iop[1] = le32_to_cpu(lg->io_parameter[1]); ql_dbg(ql_dbg_mbx, vha, 0x1065, "Failed to complete IOCB -- completion status (%x) " "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), iop[0], iop[1]); switch (iop[0]) { case LSC_SCODE_PORTID_USED: mb[0] = MBS_PORT_ID_USED; mb[1] = LSW(iop[1]); break; case LSC_SCODE_NPORT_USED: mb[0] = MBS_LOOP_ID_USED; break; case LSC_SCODE_NOLINK: case LSC_SCODE_NOIOCB: case LSC_SCODE_NOXCB: case LSC_SCODE_CMD_FAILED: case LSC_SCODE_NOFABRIC: case LSC_SCODE_FW_NOT_READY: case LSC_SCODE_NOT_LOGGED_IN: case LSC_SCODE_NOPCB: case LSC_SCODE_ELS_REJECT: case LSC_SCODE_CMD_PARAM_ERR: case LSC_SCODE_NONPORT: case LSC_SCODE_LOGGED_IN: case LSC_SCODE_NOFLOGI_ACC: default: mb[0] = MBS_COMMAND_ERROR; break; } } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, "Done %s.\n", __func__); iop[0] = le32_to_cpu(lg->io_parameter[0]); mb[0] = MBS_COMMAND_COMPLETE; mb[1] = 0; if (iop[0] & BIT_4) { if (iop[0] & BIT_8) mb[1] |= BIT_1; } else mb[1] = BIT_0; /* Passback COS information. */ mb[10] = 0; if (lg->io_parameter[7] || lg->io_parameter[8]) mb[10] |= BIT_0; /* Class 2. */ if (lg->io_parameter[9] || lg->io_parameter[10]) mb[10] |= BIT_1; /* Class 3. */ if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) mb[10] |= BIT_7; /* Confirmed Completion * Allowed */ } dma_pool_free(ha->s_dma_pool, lg, lg_dma); return rval; } /* * qla2x00_login_fabric * Issue login fabric port mailbox command. * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * domain = device domain. * area = device area. * al_pa = device AL_PA. * status = pointer for return status. * opt = command options. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(ha)) { mcp->mb[1] = loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = (loop_id << 8) | opt; } mcp->mb[2] = domain; mcp->mb[3] = area << 8 | al_pa; mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[2] = mcp->mb[2]; mb[6] = mcp->mb[6]; mb[7] = mcp->mb[7]; /* COS retrieved from Get-Port-Database mailbox command. */ mb[10] = 0; } if (rval != QLA_SUCCESS) { /* RLU tmp code: need to change main mailbox_command function to * return ok even when the mailbox completion value is not * SUCCESS. The caller needs to be responsible to interpret * the return values of this mailbox command if we're not * to change too much of the existing code. */ if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) rval = QLA_SUCCESS; /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1068, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, "Done %s.\n", __func__); } return rval; } /* * qla2x00_login_local_device * Issue login loop port mailbox command. * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * opt = command options. * * Returns: * Return status code. * * Context: * Kernel context. * */ int qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t *mb_ret, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, "Entered %s.\n", __func__); if (IS_FWI2_CAPABLE(ha)) return qla24xx_login_fabric(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, mb_ret, opt); mcp->mb[0] = MBC_LOGIN_LOOP_PORT; if (HAS_EXTENDED_IDS(ha)) mcp->mb[1] = fcport->loop_id; else mcp->mb[1] = fcport->loop_id << 8; mcp->mb[2] = opt; mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb_ret != NULL) { mb_ret[0] = mcp->mb[0]; mb_ret[1] = mcp->mb[1]; mb_ret[6] = mcp->mb[6]; mb_ret[7] = mcp->mb[7]; } if (rval != QLA_SUCCESS) { /* AV tmp code: need to change main mailbox_command function to * return ok even when the mailbox completion value is not * SUCCESS. The caller needs to be responsible to interpret * the return values of this mailbox command if we're not * to change too much of the existing code. */ if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) rval = QLA_SUCCESS; ql_dbg(ql_dbg_mbx, vha, 0x106b, "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, "Done %s.\n", __func__); } return (rval); } int qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa) { int rval; struct logio_entry_24xx *lg; dma_addr_t lg_dma; struct qla_hw_data *ha = vha->hw; struct req_que *req; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, "Entered %s.\n", __func__); lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { ql_log(ql_log_warn, vha, 0x106e, "Failed to allocate logout IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } req = vha->req; lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; lg->handle = make_handle(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| LCF_FREE_NPORT); lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; lg->vp_index = vha->vp_idx; rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, (ha->r_a_tov / 10 * 2) + 2); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x106f, "Failed to issue logout IOCB (%x).\n", rval); } else if (lg->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x1070, "Failed to complete IOCB -- error status (%x).\n", lg->entry_status); rval = QLA_FUNCTION_FAILED; } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x1071, "Failed to complete IOCB -- completion status (%x) " "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), le32_to_cpu(lg->io_parameter[0]), le32_to_cpu(lg->io_parameter[1])); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, lg, lg_dma); return rval; } /* * qla2x00_fabric_logout * Issue logout fabric port mailbox command. * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; mcp->out_mb = MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = loop_id; mcp->mb[10] = 0; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = loop_id << 8; } mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1074, "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, "Done %s.\n", __func__); } return rval; } /* * qla2x00_full_login_lip * Issue full login LIP mailbox command. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_full_login_lip(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; mcp->mb[2] = 0; mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_id_list * * Input: * ha = adapter block pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, uint16_t *entries) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, "Entered %s.\n", __func__); if (id_list == NULL) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_ID_LIST; mcp->out_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[2] = MSW(id_list_dma); mcp->mb[3] = LSW(id_list_dma); mcp->mb[6] = MSW(MSD(id_list_dma)); mcp->mb[7] = LSW(MSD(id_list_dma)); mcp->mb[8] = 0; mcp->mb[9] = vha->vp_idx; mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; } else { mcp->mb[1] = MSW(id_list_dma); mcp->mb[2] = LSW(id_list_dma); mcp->mb[3] = MSW(MSD(id_list_dma)); mcp->mb[6] = LSW(MSD(id_list_dma)); mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; } mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); } else { *entries = mcp->mb[1]; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_resource_cnts * Get current firmware resource counts. * * Input: * ha = adapter block pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_resource_cnts(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; mcp->out_mb = MBX_0; mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->in_mb |= MBX_12; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x107d, "Failed mb[0]=%x.\n", mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11], mcp->mb[12]); ha->orig_fw_tgt_xcb_count = mcp->mb[1]; ha->cur_fw_tgt_xcb_count = mcp->mb[2]; ha->cur_fw_xcb_count = mcp->mb[3]; ha->orig_fw_xcb_count = mcp->mb[6]; ha->cur_fw_iocb_count = mcp->mb[7]; ha->orig_fw_iocb_count = mcp->mb[10]; if (ha->flags.npiv_supported) ha->max_npiv_vports = mcp->mb[11]; if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ha->fw_max_fcf_count = mcp->mb[12]; } return (rval); } /* * qla2x00_get_fcal_position_map * Get FCAL (LILP) position map using mailbox command * * Input: * ha = adapter state pointer. * pos_map = buffer pointer (can be NULL). * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map, u8 *num_entries) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; char *pmap; dma_addr_t pmap_dma; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, "Entered %s.\n", __func__); pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); if (pmap == NULL) { ql_log(ql_log_warn, vha, 0x1080, "Memory alloc failed.\n"); return QLA_MEMORY_ALLOC_FAILED; } mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; mcp->mb[2] = MSW(pmap_dma); mcp->mb[3] = LSW(pmap_dma); mcp->mb[6] = MSW(MSD(pmap_dma)); mcp->mb[7] = LSW(MSD(pmap_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->buf_size = FCAL_MAP_SIZE; mcp->flags = MBX_DMA_IN; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, "mb0/mb1=%x/%X FC/AL position map size (%x).\n", mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, pmap, pmap[0] + 1); if (pos_map) memcpy(pos_map, pmap, FCAL_MAP_SIZE); if (num_entries) *num_entries = pmap[0]; } dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_link_status * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * ret_buf = pointer to link status return buffer. * * Returns: * 0 = success. * BIT_0 = mem alloc error. * BIT_1 = mailbox error. */ int qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, struct link_statistics *stats, dma_addr_t stats_dma) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint32_t *iter = (uint32_t *)stats; ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_LINK_STATUS; mcp->mb[2] = MSW(LSD(stats_dma)); mcp->mb[3] = LSW(LSD(stats_dma)); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[1] = loop_id; mcp->mb[4] = 0; mcp->mb[10] = 0; mcp->out_mb |= MBX_10|MBX_4|MBX_1; mcp->in_mb |= MBX_1; } else if (HAS_EXTENDED_IDS(ha)) { mcp->mb[1] = loop_id; mcp->mb[10] = 0; mcp->out_mb |= MBX_10|MBX_1; } else { mcp->mb[1] = loop_id << 8; mcp->out_mb |= MBX_1; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x1085, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); rval = QLA_FUNCTION_FAILED; } else { /* Re-endianize - firmware data is le32. */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, "Done %s.\n", __func__); for ( ; dwords--; iter++) le32_to_cpus(iter); } } else { /* Failed. */ ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); } return rval; } int qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, dma_addr_t stats_dma, uint16_t options) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint32_t *iter = (uint32_t *)stats; ushort dwords = sizeof(*stats)/sizeof(*iter); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, "Entered %s.\n", __func__); memset(&mc, 0, sizeof(mc)); mc.mb[0] = MBC_GET_LINK_PRIV_STATS; mc.mb[2] = MSW(LSD(stats_dma)); mc.mb[3] = LSW(LSD(stats_dma)); mc.mb[6] = MSW(MSD(stats_dma)); mc.mb[7] = LSW(MSD(stats_dma)); mc.mb[8] = dwords; mc.mb[9] = vha->vp_idx; mc.mb[10] = options; rval = qla24xx_send_mb_cmd(vha, &mc); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x1089, "Failed mb[0]=%x.\n", mcp->mb[0]); rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, "Done %s.\n", __func__); /* Re-endianize - firmware data is le32. */ for ( ; dwords--; iter++) le32_to_cpus(iter); } } else { /* Failed. */ ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); } return rval; } int qla24xx_abort_command(srb_t *sp) { int rval; unsigned long flags = 0; struct abort_entry_24xx *abt; dma_addr_t abt_dma; uint32_t handle; fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req; struct qla_qpair *qpair = sp->qpair; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, "Entered %s.\n", __func__); if (sp->qpair) req = sp->qpair->req; else return QLA_ERR_NO_QPAIR; if (ql2xasynctmfenable) return qla24xx_async_abort_command(sp); spin_lock_irqsave(qpair->qp_lock_ptr, flags); for (handle = 1; handle < req->num_outstanding_cmds; handle++) { if (req->outstanding_cmds[handle] == sp) break; } spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); if (handle == req->num_outstanding_cmds) { /* Command not found. */ return QLA_ERR_NOT_FOUND; } abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); if (abt == NULL) { ql_log(ql_log_warn, vha, 0x108d, "Failed to allocate abort IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } abt->entry_type = ABORT_IOCB_TYPE; abt->entry_count = 1; abt->handle = make_handle(req->id, abt->handle); abt->nport_handle = cpu_to_le16(fcport->loop_id); abt->handle_to_abort = make_handle(req->id, handle); abt->port_id[0] = fcport->d_id.b.al_pa; abt->port_id[1] = fcport->d_id.b.area; abt->port_id[2] = fcport->d_id.b.domain; abt->vp_index = fcport->vha->vp_idx; abt->req_que_no = cpu_to_le16(req->id); /* Need to pass original sp */ qla_nvme_abort_set_option(abt, sp); rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x108e, "Failed to issue IOCB (%x).\n", rval); } else if (abt->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x108f, "Failed to complete IOCB -- error status (%x).\n", abt->entry_status); rval = QLA_FUNCTION_FAILED; } else if (abt->nport_handle != cpu_to_le16(0)) { ql_dbg(ql_dbg_mbx, vha, 0x1090, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(abt->nport_handle)); if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR)) rval = QLA_FUNCTION_PARAMETER_ERROR; else rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, "Done %s.\n", __func__); } if (rval == QLA_SUCCESS) qla_nvme_abort_process_comp_status(abt, sp); qla_wait_nvme_release_cmd_kref(sp); dma_pool_free(ha->s_dma_pool, abt, abt_dma); return rval; } struct tsk_mgmt_cmd { union { struct tsk_mgmt_entry tsk; struct sts_entry_24xx sts; } p; }; static int __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, uint64_t l, int tag) { int rval, rval2; struct tsk_mgmt_cmd *tsk; struct sts_entry_24xx *sts; dma_addr_t tsk_dma; scsi_qla_host_t *vha; struct qla_hw_data *ha; struct req_que *req; struct qla_qpair *qpair; vha = fcport->vha; ha = vha->hw; req = vha->req; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, "Entered %s.\n", __func__); if (vha->vp_idx && vha->qpair) { /* NPIV port */ qpair = vha->qpair; req = qpair->req; } tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); if (tsk == NULL) { ql_log(ql_log_warn, vha, 0x1093, "Failed to allocate task management IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; tsk->p.tsk.entry_count = 1; tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); tsk->p.tsk.control_flags = cpu_to_le32(type); tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; tsk->p.tsk.port_id[1] = fcport->d_id.b.area; tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; tsk->p.tsk.vp_index = fcport->vha->vp_idx; if (type == TCF_LUN_RESET) { int_to_scsilun(l, &tsk->p.tsk.lun); host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, sizeof(tsk->p.tsk.lun)); } sts = &tsk->p.sts; rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1094, "Failed to issue %s reset IOCB (%x).\n", name, rval); } else if (sts->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x1095, "Failed to complete IOCB -- error status (%x).\n", sts->entry_status); rval = QLA_FUNCTION_FAILED; } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x1096, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(sts->comp_status)); rval = QLA_FUNCTION_FAILED; } else if (le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID) { if (le32_to_cpu(sts->rsp_data_len) < 4) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, "Ignoring inconsistent data length -- not enough " "response info (%d).\n", le32_to_cpu(sts->rsp_data_len)); } else if (sts->data[3]) { ql_dbg(ql_dbg_mbx, vha, 0x1098, "Failed to complete IOCB -- response (%x).\n", sts->data[3]); rval = QLA_FUNCTION_FAILED; } } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1099, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); return rval; } int qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) { struct qla_hw_data *ha = fcport->vha->hw; if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); } int qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) { struct qla_hw_data *ha = fcport->vha->hw; if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); } int qla2x00_system_error(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; mcp->tov = 5; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, "Done %s.\n", __func__); } return rval; } int qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, "Entered %s.\n", __func__); mcp->mb[0] = MBC_WRITE_SERDES; mcp->mb[1] = addr; if (IS_QLA2031(vha->hw)) mcp->mb[2] = data & 0xff; else mcp->mb[2] = data; mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1183, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, "Done %s.\n", __func__); } return rval; } int qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, "Entered %s.\n", __func__); mcp->mb[0] = MBC_READ_SERDES; mcp->mb[1] = addr; mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (IS_QLA2031(vha->hw)) *data = mcp->mb[1] & 0xff; else *data = mcp->mb[1]; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1186, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, "Done %s.\n", __func__); } return rval; } int qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA8044(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; mcp->mb[1] = HCS_WRITE_SERDES; mcp->mb[3] = LSW(addr); mcp->mb[4] = MSW(addr); mcp->mb[5] = LSW(data); mcp->mb[6] = MSW(data); mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x11a1, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, "Done %s.\n", __func__); } return rval; } int qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA8044(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; mcp->mb[1] = HCS_READ_SERDES; mcp->mb[3] = LSW(addr); mcp->mb[4] = MSW(addr); mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); *data = mcp->mb[2] << 16 | mcp->mb[1]; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x118a, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, "Done %s.\n", __func__); } return rval; } /** * qla2x00_set_serdes_params() - * @vha: HA context * @sw_em_1g: serial link options * @sw_em_2g: serial link options * @sw_em_4g: serial link options * * Returns */ int qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, uint16_t sw_em_2g, uint16_t sw_em_4g) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SERDES_PARAMS; mcp->mb[1] = BIT_0; mcp->mb[2] = sw_em_1g | BIT_15; mcp->mb[3] = sw_em_2g | BIT_15; mcp->mb[4] = sw_em_4g | BIT_15; mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x109f, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, "Done %s.\n", __func__); } return rval; } int qla2x00_stop_firmware(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, "Entered %s.\n", __func__); mcp->mb[0] = MBC_STOP_FIRMWARE; mcp->mb[1] = 0; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = 5; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); if (mcp->mb[0] == MBS_INVALID_COMMAND) rval = QLA_INVALID_COMMAND; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, "Done %s.\n", __func__); } return rval; } int qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, uint16_t buffers) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_EFT_ENABLE; mcp->mb[2] = LSW(eft_dma); mcp->mb[3] = MSW(eft_dma); mcp->mb[4] = LSW(MSD(eft_dma)); mcp->mb[5] = MSW(MSD(eft_dma)); mcp->mb[6] = buffers; mcp->mb[7] = TC_AEN_DISABLE; mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10a5, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, "Done %s.\n", __func__); } return rval; } int qla2x00_disable_eft_trace(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_EFT_DISABLE; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10a8, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, "Done %s.\n", __func__); } return rval; } int qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, uint16_t buffers, uint16_t *mb, uint32_t *dwords) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, "Entered %s.\n", __func__); if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_FCE_ENABLE; mcp->mb[2] = LSW(fce_dma); mcp->mb[3] = MSW(fce_dma); mcp->mb[4] = LSW(MSD(fce_dma)); mcp->mb[5] = MSW(MSD(fce_dma)); mcp->mb[6] = buffers; mcp->mb[7] = TC_AEN_DISABLE; mcp->mb[8] = 0; mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| MBX_1|MBX_0; mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10ab, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, "Done %s.\n", __func__); if (mb) memcpy(mb, mcp->mb, 8 * sizeof(*mb)); if (dwords) *dwords = buffers; } return rval; } int qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_FCE_DISABLE; mcp->mb[2] = TC_FCE_DISABLE_TRACE; mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10ae, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, "Done %s.\n", __func__); if (wr) *wr = (uint64_t) mcp->mb[5] << 48 | (uint64_t) mcp->mb[4] << 32 | (uint64_t) mcp->mb[3] << 16 | (uint64_t) mcp->mb[2]; if (rd) *rd = (uint64_t) mcp->mb[9] << 48 | (uint64_t) mcp->mb[8] << 32 | (uint64_t) mcp->mb[7] << 16 | (uint64_t) mcp->mb[6]; } return rval; } int qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t *port_speed, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, "Entered %s.\n", __func__); if (!IS_IIDMA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = mcp->mb[3] = 0; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; } if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, "Done %s.\n", __func__); if (port_speed) *port_speed = mcp->mb[3]; } return rval; } int qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t port_speed, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, "Entered %s.\n", __func__); if (!IS_IIDMA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = BIT_0; mcp->mb[3] = port_speed & 0x3F; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; } if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, "Done %s.\n", __func__); } return rval; } void qla24xx_report_id_acquisition(scsi_qla_host_t *vha, struct vp_rpt_id_entry_24xx *rptid_entry) { struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp = NULL; unsigned long flags; int found; port_id_t id; struct fc_port *fcport; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, "Entered %s.\n", __func__); if (rptid_entry->entry_status != 0) return; id.b.domain = rptid_entry->port_id[2]; id.b.area = rptid_entry->port_id[1]; id.b.al_pa = rptid_entry->port_id[0]; id.b.rsvd_1 = 0; ha->flags.n2n_ae = 0; if (rptid_entry->format == 0) { /* loop */ ql_dbg(ql_dbg_async, vha, 0x10b7, "Format 0 : Number of VPs setup %d, number of " "VPs acquired %d.\n", rptid_entry->vp_setup, rptid_entry->vp_acquired); ql_dbg(ql_dbg_async, vha, 0x10b8, "Primary port id %02x%02x%02x.\n", rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); ha->current_topology = ISP_CFG_NL; qla_update_host_map(vha, id); } else if (rptid_entry->format == 1) { /* fabric */ ql_dbg(ql_dbg_async, vha, 0x10b9, "Format 1: VP[%d] enabled - status %d - with " "port id %02x%02x%02x.\n", rptid_entry->vp_idx, rptid_entry->vp_status, rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); ql_dbg(ql_dbg_async, vha, 0x5075, "Format 1: Remote WWPN %8phC.\n", rptid_entry->u.f1.port_name); ql_dbg(ql_dbg_async, vha, 0x5075, "Format 1: WWPN %8phC.\n", vha->port_name); switch (rptid_entry->u.f1.flags & TOPO_MASK) { case TOPO_N2N: ha->current_topology = ISP_CFG_N; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->scan_state = QLA_FCPORT_SCAN; fcport->n2n_flag = 0; } id.b24 = 0; if (wwn_to_u64(vha->port_name) > wwn_to_u64(rptid_entry->u.f1.port_name)) { vha->d_id.b24 = 0; vha->d_id.b.al_pa = 1; ha->flags.n2n_bigger = 1; id.b.al_pa = 2; ql_dbg(ql_dbg_async, vha, 0x5075, "Format 1: assign local id %x remote id %x\n", vha->d_id.b24, id.b24); } else { ql_dbg(ql_dbg_async, vha, 0x5075, "Format 1: Remote login - Waiting for WWPN %8phC.\n", rptid_entry->u.f1.port_name); ha->flags.n2n_bigger = 0; } fcport = qla2x00_find_fcport_by_wwpn(vha, rptid_entry->u.f1.port_name, 1); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); if (fcport) { fcport->plogi_nack_done_deadline = jiffies + HZ; fcport->dm_login_expire = jiffies + QLA_N2N_WAIT_TIME * HZ; fcport->scan_state = QLA_FCPORT_FOUND; fcport->n2n_flag = 1; fcport->keep_nport_handle = 1; fcport->login_retry = vha->hw->login_retry_count; fcport->fc4_type = FS_FC4TYPE_FCP; if (vha->flags.nvme_enabled) fcport->fc4_type |= FS_FC4TYPE_NVME; if (wwn_to_u64(vha->port_name) > wwn_to_u64(fcport->port_name)) { fcport->d_id = id; } switch (fcport->disc_state) { case DSC_DELETED: set_bit(RELOGIN_NEEDED, &vha->dpc_flags); break; case DSC_DELETE_PEND: break; default: qlt_schedule_sess_for_deletion(fcport); break; } } else { qla24xx_post_newsess_work(vha, &id, rptid_entry->u.f1.port_name, rptid_entry->u.f1.node_name, NULL, FS_FCP_IS_N2N); } /* if our portname is higher then initiate N2N login */ set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); return; case TOPO_FL: ha->current_topology = ISP_CFG_FL; break; case TOPO_F: ha->current_topology = ISP_CFG_F; break; default: break; } ha->flags.gpsc_supported = 1; ha->current_topology = ISP_CFG_F; /* buffer to buffer credit flag */ vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; if (rptid_entry->vp_idx == 0) { if (rptid_entry->vp_status == VP_STAT_COMPL) { /* FA-WWN is only for physical port */ if (qla_ini_mode_enabled(vha) && ha->flags.fawwpn_enabled && (rptid_entry->u.f1.flags & BIT_6)) { memcpy(vha->port_name, rptid_entry->u.f1.port_name, WWN_SIZE); } qla_update_host_map(vha, id); } set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); } else { if (rptid_entry->vp_status != VP_STAT_COMPL && rptid_entry->vp_status != VP_STAT_ID_CHG) { ql_dbg(ql_dbg_mbx, vha, 0x10ba, "Could not acquire ID for VP[%d].\n", rptid_entry->vp_idx); return; } found = 0; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { if (rptid_entry->vp_idx == vp->vp_idx) { found = 1; break; } } spin_unlock_irqrestore(&ha->vport_slock, flags); if (!found) return; qla_update_host_map(vp, id); /* * Cannot configure here as we are still sitting on the * response queue. Handle it in dpc context. */ set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); } set_bit(VP_DPC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else if (rptid_entry->format == 2) { ql_dbg(ql_dbg_async, vha, 0x505f, "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); ql_dbg(ql_dbg_async, vha, 0x5075, "N2N: Remote WWPN %8phC.\n", rptid_entry->u.f2.port_name); /* N2N. direct connect */ ha->current_topology = ISP_CFG_N; ha->flags.rida_fmt2 = 1; vha->d_id.b.domain = rptid_entry->port_id[2]; vha->d_id.b.area = rptid_entry->port_id[1]; vha->d_id.b.al_pa = rptid_entry->port_id[0]; ha->flags.n2n_ae = 1; spin_lock_irqsave(&ha->vport_slock, flags); qla_update_vp_map(vha, SET_AL_PA); spin_unlock_irqrestore(&ha->vport_slock, flags); list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->scan_state = QLA_FCPORT_SCAN; fcport->n2n_flag = 0; } fcport = qla2x00_find_fcport_by_wwpn(vha, rptid_entry->u.f2.port_name, 1); if (fcport) { fcport->login_retry = vha->hw->login_retry_count; fcport->plogi_nack_done_deadline = jiffies + HZ; fcport->scan_state = QLA_FCPORT_FOUND; fcport->keep_nport_handle = 1; fcport->n2n_flag = 1; fcport->d_id.b.domain = rptid_entry->u.f2.remote_nport_id[2]; fcport->d_id.b.area = rptid_entry->u.f2.remote_nport_id[1]; fcport->d_id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0]; /* * For the case where remote port sending PRLO, FW * sends up RIDA Format 2 as an indication of session * loss. In other word, FW state change from PRLI * complete back to PLOGI complete. Delete the * session and let relogin drive the reconnect. */ if (atomic_read(&fcport->state) == FCS_ONLINE) qlt_schedule_sess_for_deletion(fcport); } } } /* * qla24xx_modify_vp_config * Change VP configuration for vha * * Input: * vha = adapter block pointer. * * Returns: * qla2xxx local function return status code. * * Context: * Kernel context. */ int qla24xx_modify_vp_config(scsi_qla_host_t *vha) { int rval; struct vp_config_entry_24xx *vpmod; dma_addr_t vpmod_dma; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); /* This can be called by the parent */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, "Entered %s.\n", __func__); vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); if (!vpmod) { ql_log(ql_log_warn, vha, 0x10bc, "Failed to allocate modify VP IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } vpmod->entry_type = VP_CONFIG_IOCB_TYPE; vpmod->entry_count = 1; vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; vpmod->vp_count = 1; vpmod->vp_index1 = vha->vp_idx; vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; qlt_modify_vp_config(vha, vpmod); memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); vpmod->entry_count = 1; rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10bd, "Failed to issue VP config IOCB (%x).\n", rval); } else if (vpmod->comp_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x10be, "Failed to complete IOCB -- error status (%x).\n", vpmod->comp_status); rval = QLA_FUNCTION_FAILED; } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x10bf, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(vpmod->comp_status)); rval = QLA_FUNCTION_FAILED; } else { /* EMPTY */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, "Done %s.\n", __func__); fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); } dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); return rval; } /* * qla2x00_send_change_request * Receive or disable RSCN request from fabric controller * * Input: * ha = adapter block pointer * format = registration format: * 0 - Reserved * 1 - Fabric detected registration * 2 - N_port detected registration * 3 - Full registration * FF - clear registration * vp_idx = Virtual port index * * Returns: * qla2x00 local function return status code. * * Context: * Kernel Context */ int qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, uint16_t vp_idx) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; mcp->mb[1] = format; mcp->mb[9] = vp_idx; mcp->out_mb = MBX_9|MBX_1|MBX_0; mcp->in_mb = MBX_0|MBX_1; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { rval = BIT_1; } } else rval = BIT_1; return rval; } int qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, uint32_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, "Entered %s.\n", __func__); if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; mcp->mb[8] = MSW(addr); mcp->mb[10] = 0; mcp->out_mb = MBX_10|MBX_8|MBX_0; } else { mcp->mb[0] = MBC_DUMP_RISC_RAM; mcp->out_mb = MBX_0; } mcp->mb[1] = LSW(addr); mcp->mb[2] = MSW(req_dma); mcp->mb[3] = LSW(req_dma); mcp->mb[6] = MSW(MSD(req_dma)); mcp->mb[7] = LSW(MSD(req_dma)); mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[4] = MSW(size); mcp->mb[5] = LSW(size); mcp->out_mb |= MBX_5|MBX_4; } else { mcp->mb[4] = LSW(size); mcp->out_mb |= MBX_4; } mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1008, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, "Done %s.\n", __func__); } return rval; } /* 84XX Support **************************************************************/ struct cs84xx_mgmt_cmd { union { struct verify_chip_entry_84xx req; struct verify_chip_rsp_84xx rsp; } p; }; int qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) { int rval, retry; struct cs84xx_mgmt_cmd *mn; dma_addr_t mn_dma; uint16_t options; unsigned long flags; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, "Entered %s.\n", __func__); mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); if (mn == NULL) { return QLA_MEMORY_ALLOC_FAILED; } /* Force Update? */ options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; /* Diagnostic firmware? */ /* options |= MENLO_DIAG_FW; */ /* We update the firmware with only one data sequence. */ options |= VCO_END_OF_DATA; do { retry = 0; memset(mn, 0, sizeof(*mn)); mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; mn->p.req.entry_count = 1; mn->p.req.options = cpu_to_le16(options); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, "Dump of Verify Request.\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, mn, sizeof(*mn)); rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10cb, "Failed to issue verify IOCB (%x).\n", rval); goto verify_done; } ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, "Dump of Verify Response.\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, mn, sizeof(*mn)); status[0] = le16_to_cpu(mn->p.rsp.comp_status); status[1] = status[0] == CS_VCS_CHIP_FAILURE ? le16_to_cpu(mn->p.rsp.failure_code) : 0; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, "cs=%x fc=%x.\n", status[0], status[1]); if (status[0] != CS_COMPLETE) { rval = QLA_FUNCTION_FAILED; if (!(options & VCO_DONT_UPDATE_FW)) { ql_dbg(ql_dbg_mbx, vha, 0x10cf, "Firmware update failed. Retrying " "without update firmware.\n"); options |= VCO_DONT_UPDATE_FW; options &= ~VCO_FORCE_UPDATE; retry = 1; } } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, "Firmware updated to %x.\n", le32_to_cpu(mn->p.rsp.fw_ver)); /* NOTE: we only update OP firmware. */ spin_lock_irqsave(&ha->cs84xx->access_lock, flags); ha->cs84xx->op_fw_version = le32_to_cpu(mn->p.rsp.fw_ver); spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); } } while (retry); verify_done: dma_pool_free(ha->s_dma_pool, mn, mn_dma); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, "Done %s.\n", __func__); } return rval; } int qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) { int rval; unsigned long flags; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (!ha->flags.fw_started) return QLA_SUCCESS; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, "Entered %s.\n", __func__); if (IS_SHADOW_REG_CAPABLE(ha)) req->options |= BIT_13; mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = req->options; mcp->mb[2] = MSW(LSD(req->dma)); mcp->mb[3] = LSW(LSD(req->dma)); mcp->mb[6] = MSW(MSD(req->dma)); mcp->mb[7] = LSW(MSD(req->dma)); mcp->mb[5] = req->length; if (req->rsp) mcp->mb[10] = req->rsp->id; mcp->mb[12] = req->qos; mcp->mb[11] = req->vp_idx; mcp->mb[13] = req->rid; if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->mb[15] = 0; mcp->mb[4] = req->id; /* que in ptr index */ mcp->mb[8] = 0; /* que out ptr index */ mcp->mb[9] = *req->out_ptr = 0; mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS * 2; if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->in_mb |= MBX_1; if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { mcp->out_mb |= MBX_15; /* debug q create issue in SR-IOV */ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; } spin_lock_irqsave(&ha->hardware_lock, flags); if (!(req->options & BIT_0)) { wrt_reg_dword(req->req_q_in, 0); if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) wrt_reg_dword(req->req_q_out, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10d4, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, "Done %s.\n", __func__); } return rval; } int qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { int rval; unsigned long flags; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (!ha->flags.fw_started) return QLA_SUCCESS; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, "Entered %s.\n", __func__); if (IS_SHADOW_REG_CAPABLE(ha)) rsp->options |= BIT_13; mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = rsp->options; mcp->mb[2] = MSW(LSD(rsp->dma)); mcp->mb[3] = LSW(LSD(rsp->dma)); mcp->mb[6] = MSW(MSD(rsp->dma)); mcp->mb[7] = LSW(MSD(rsp->dma)); mcp->mb[5] = rsp->length; mcp->mb[14] = rsp->msix->entry; mcp->mb[13] = rsp->rid; if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->mb[15] = 0; mcp->mb[4] = rsp->id; /* que in ptr index */ mcp->mb[8] = *rsp->in_ptr = 0; /* que out ptr index */ mcp->mb[9] = 0; mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS * 2; if (IS_QLA81XX(ha)) { mcp->out_mb |= MBX_12|MBX_11|MBX_10; mcp->in_mb |= MBX_1; } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; mcp->in_mb |= MBX_1; /* debug q create issue in SR-IOV */ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; } spin_lock_irqsave(&ha->hardware_lock, flags); if (!(rsp->options & BIT_0)) { wrt_reg_dword(rsp->rsp_q_out, 0); if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) wrt_reg_dword(rsp->rsp_q_in, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10d7, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, "Done %s.\n", __func__); } return rval; } int qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, "Entered %s.\n", __func__); mcp->mb[0] = MBC_IDC_ACK; memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10da, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, "Done %s.\n", __func__); } return rval; } int qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, "Entered %s.\n", __func__); if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10dd, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, "Done %s.\n", __func__); *sector_size = mcp->mb[1]; } return rval; } int qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, "Entered %s.\n", __func__); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : FAC_OPT_CMD_WRITE_PROTECT; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e0, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, "Done %s.\n", __func__); } return rval; } int qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, "Entered %s.\n", __func__); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; mcp->mb[2] = LSW(start); mcp->mb[3] = MSW(start); mcp->mb[4] = LSW(finish); mcp->mb[5] = MSW(finish); mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e3, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, "Done %s.\n", __func__); } return rval; } int qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) { int rval = QLA_SUCCESS; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return rval; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, "Entered %s.\n", __func__); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : FAC_OPT_CMD_UNLOCK_SEMAPHORE); mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e3, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, "Done %s.\n", __func__); } return rval; } int qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) { int rval = 0; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, "Entered %s.\n", __func__); mcp->mb[0] = MBC_RESTART_MPI_FW; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0|MBX_1; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e6, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, "Done %s.\n", __func__); } return rval; } int qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; int i; int len; __le16 *str; struct qla_hw_data *ha = vha->hw; if (!IS_P3P_TYPE(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, "Entered %s.\n", __func__); str = (__force __le16 *)version; len = strlen(version); mcp->mb[0] = MBC_SET_RNID_PARAMS; mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; mcp->out_mb = MBX_1|MBX_0; for (i = 4; i < 16 && len; i++, str++, len -= 2) { mcp->mb[i] = le16_to_cpup(str); mcp->out_mb |= 1<<i; } for (; i < 16; i++) { mcp->mb[i] = 0; mcp->out_mb |= 1<<i; } mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x117c, "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, "Done %s.\n", __func__); } return rval; } int qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; int len; uint16_t dwlen; uint8_t *str; dma_addr_t str_dma; struct qla_hw_data *ha = vha->hw; if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || IS_P3P_TYPE(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, "Entered %s.\n", __func__); str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); if (!str) { ql_log(ql_log_warn, vha, 0x117f, "Failed to allocate driver version param.\n"); return QLA_MEMORY_ALLOC_FAILED; } memcpy(str, "\x7\x3\x11\x0", 4); dwlen = str[0]; len = dwlen * 4 - 4; memset(str + 4, 0, len); if (len > strlen(version)) len = strlen(version); memcpy(str + 4, version, len); mcp->mb[0] = MBC_SET_RNID_PARAMS; mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; mcp->mb[2] = MSW(LSD(str_dma)); mcp->mb[3] = LSW(LSD(str_dma)); mcp->mb[6] = MSW(MSD(str_dma)); mcp->mb[7] = LSW(MSD(str_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1180, "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, str, str_dma); return rval; } int qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, void *buf, uint16_t bufsiz) { int rval, i; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint32_t *bp; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RNID_PARAMS; mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; mcp->mb[2] = MSW(buf_dma); mcp->mb[3] = LSW(buf_dma); mcp->mb[6] = MSW(MSD(buf_dma)); mcp->mb[7] = LSW(MSD(buf_dma)); mcp->mb[8] = bufsiz/4; mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x115a, "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, "Done %s.\n", __func__); bp = (uint32_t *) buf; for (i = 0; i < (bufsiz-4)/4; i++, bp++) *bp = le32_to_cpu((__force __le32)*bp); } return rval; } #define PUREX_CMD_COUNT 4 int qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint8_t *els_cmd_map; uint8_t active_cnt = 0; dma_addr_t els_cmd_map_dma; uint8_t cmd_opcode[PUREX_CMD_COUNT]; uint8_t i, index, purex_bit; struct qla_hw_data *ha = vha->hw; if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_SUCCESS; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197, "Entered %s.\n", __func__); els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, &els_cmd_map_dma, GFP_KERNEL); if (!els_cmd_map) { ql_log(ql_log_warn, vha, 0x7101, "Failed to allocate RDP els command param.\n"); return QLA_MEMORY_ALLOC_FAILED; } /* List of Purex ELS */ if (ql2xrdpenable) { cmd_opcode[active_cnt] = ELS_RDP; active_cnt++; } if (ha->flags.scm_supported_f) { cmd_opcode[active_cnt] = ELS_FPIN; active_cnt++; } if (ha->flags.edif_enabled) { cmd_opcode[active_cnt] = ELS_AUTH_ELS; active_cnt++; } for (i = 0; i < active_cnt; i++) { index = cmd_opcode[i] / 8; purex_bit = cmd_opcode[i] % 8; els_cmd_map[index] |= 1 << purex_bit; } mcp->mb[0] = MBC_SET_RNID_PARAMS; mcp->mb[1] = RNID_TYPE_ELS_CMD << 8; mcp->mb[2] = MSW(LSD(els_cmd_map_dma)); mcp->mb[3] = LSW(LSD(els_cmd_map_dma)); mcp->mb[6] = MSW(MSD(els_cmd_map_dma)); mcp->mb[7] = LSW(MSD(els_cmd_map_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT; mcp->buf_size = ELS_CMD_MAP_SIZE; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x118d, "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, "Done %s.\n", __func__); } dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, els_cmd_map, els_cmd_map_dma); return rval; } static int qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RNID_PARAMS; mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); *temp = mcp->mb[1]; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x115a, "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, "Done %s.\n", __func__); } return rval; } int qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; if (len == 1) opt |= BIT_0; mcp->mb[0] = MBC_READ_SFP; mcp->mb[1] = dev; mcp->mb[2] = MSW(LSD(sfp_dma)); mcp->mb[3] = LSW(LSD(sfp_dma)); mcp->mb[6] = MSW(MSD(sfp_dma)); mcp->mb[7] = LSW(MSD(sfp_dma)); mcp->mb[8] = len; mcp->mb[9] = off; mcp->mb[10] = opt; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (opt & BIT_0) *sfp = mcp->mb[1]; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e9, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { /* sfp is not there */ rval = QLA_INTERFACE_ERROR; } } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, "Done %s.\n", __func__); } return rval; } int qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; if (len == 1) opt |= BIT_0; if (opt & BIT_0) len = *sfp; mcp->mb[0] = MBC_WRITE_SFP; mcp->mb[1] = dev; mcp->mb[2] = MSW(LSD(sfp_dma)); mcp->mb[3] = LSW(LSD(sfp_dma)); mcp->mb[6] = MSW(MSD(sfp_dma)); mcp->mb[7] = LSW(MSD(sfp_dma)); mcp->mb[8] = len; mcp->mb[9] = off; mcp->mb[10] = opt; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10ec, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, "Done %s.\n", __func__); } return rval; } int qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, uint16_t size_in_bytes, uint16_t *actual_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, "Entered %s.\n", __func__); if (!IS_CNA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_XGMAC_STATS; mcp->mb[2] = MSW(stats_dma); mcp->mb[3] = LSW(stats_dma); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->mb[8] = size_in_bytes >> 2; mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10ef, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, "Done %s.\n", __func__); *actual_size = mcp->mb[2] << 2; } return rval; } int qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, uint16_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, "Entered %s.\n", __func__); if (!IS_CNA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_DCBX_PARAMS; mcp->mb[1] = 0; mcp->mb[2] = MSW(tlv_dma); mcp->mb[3] = LSW(tlv_dma); mcp->mb[6] = MSW(MSD(tlv_dma)); mcp->mb[7] = LSW(MSD(tlv_dma)); mcp->mb[8] = size; mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10f2, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, "Done %s.\n", __func__); } return rval; } int qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_READ_RAM_EXTENDED; mcp->mb[1] = LSW(risc_addr); mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10f5, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, "Done %s.\n", __func__); *data = mcp->mb[3] << 16 | mcp->mb[2]; } return rval; } int qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing /* transfer count */ mcp->mb[10] = LSW(mreq->transfer_size); mcp->mb[11] = MSW(mreq->transfer_size); /* send data address */ mcp->mb[14] = LSW(mreq->send_dma); mcp->mb[15] = MSW(mreq->send_dma); mcp->mb[20] = LSW(MSD(mreq->send_dma)); mcp->mb[21] = MSW(MSD(mreq->send_dma)); /* receive data address */ mcp->mb[16] = LSW(mreq->rcv_dma); mcp->mb[17] = MSW(mreq->rcv_dma); mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); /* Iteration count */ mcp->mb[18] = LSW(mreq->iteration_count); mcp->mb[19] = MSW(mreq->iteration_count); mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; if (IS_CNA_CAPABLE(vha->hw)) mcp->out_mb |= MBX_2; mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; mcp->buf_size = mreq->transfer_size; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10f8, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, "Done %s.\n", __func__); } /* Copy mailbox information */ memcpy( mresp, mcp->mb, 64); return rval; } int qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; /* BIT_6 specifies 64bit address */ mcp->mb[1] = mreq->options | BIT_15 | BIT_6; if (IS_CNA_CAPABLE(ha)) { mcp->mb[2] = vha->fcoe_fcf_idx; } mcp->mb[16] = LSW(mreq->rcv_dma); mcp->mb[17] = MSW(mreq->rcv_dma); mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); mcp->mb[10] = LSW(mreq->transfer_size); mcp->mb[14] = LSW(mreq->send_dma); mcp->mb[15] = MSW(mreq->send_dma); mcp->mb[20] = LSW(MSD(mreq->send_dma)); mcp->mb[21] = MSW(MSD(mreq->send_dma)); mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; if (IS_CNA_CAPABLE(ha)) mcp->out_mb |= MBX_2; mcp->in_mb = MBX_0; if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->in_mb |= MBX_1; if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->in_mb |= MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; mcp->buf_size = mreq->transfer_size; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10fb, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, "Done %s.\n", __func__); } /* Copy mailbox information */ memcpy(mresp, mcp->mb, 64); return rval; } int qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); mcp->mb[0] = MBC_ISP84XX_RESET; mcp->mb[1] = enable_diagnostic; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, "Done %s.\n", __func__); return rval; } int qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; mcp->mb[1] = LSW(risc_addr); mcp->mb[2] = LSW(data); mcp->mb[3] = MSW(data); mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1101, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, "Done %s.\n", __func__); } return rval; } int qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) { int rval; uint32_t stat, timer; uint16_t mb0 = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; rval = QLA_SUCCESS; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, "Entered %s.\n", __func__); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); /* Write the MBC data to the registers */ wrt_reg_word(&reg->mailbox0, MBC_WRITE_MPI_REGISTER); wrt_reg_word(&reg->mailbox1, mb[0]); wrt_reg_word(&reg->mailbox2, mb[1]); wrt_reg_word(&reg->mailbox3, mb[2]); wrt_reg_word(&reg->mailbox4, mb[3]); wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT); /* Poll for MBC interrupt */ for (timer = 6000000; timer; timer--) { /* Check for pending interrupts. */ stat = rd_reg_dword(&reg->host_status); if (stat & HSRX_RISC_INT) { stat &= 0xff; if (stat == 0x1 || stat == 0x2 || stat == 0x10 || stat == 0x11) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = rd_reg_word(&reg->mailbox0); wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT); rd_reg_dword(&reg->hccr); break; } } udelay(5); } if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) rval = mb0 & MBS_MASK; else rval = QLA_FUNCTION_FAILED; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1104, "Failed=%x mb[0]=%x.\n", rval, mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, "Done %s.\n", __func__); } return rval; } /* Set the specified data rate */ int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; uint16_t val; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, mode); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; memset(mcp, 0, sizeof(*mcp)); switch (ha->set_data_rate) { case PORT_SPEED_AUTO: case PORT_SPEED_4GB: case PORT_SPEED_8GB: case PORT_SPEED_16GB: case PORT_SPEED_32GB: val = ha->set_data_rate; break; default: ql_log(ql_log_warn, vha, 0x1199, "Unrecognized speed setting:%d. Setting Autoneg\n", ha->set_data_rate); val = ha->set_data_rate = PORT_SPEED_AUTO; break; } mcp->mb[0] = MBC_DATA_RATE; mcp->mb[1] = mode; mcp->mb[2] = val; mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->in_mb |= MBX_4|MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1107, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { if (mcp->mb[1] != 0x7) ql_dbg(ql_dbg_mbx, vha, 0x1179, "Speed set:0x%x\n", mcp->mb[1]); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, "Done %s.\n", __func__); } return rval; } int qla2x00_get_data_rate(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_DATA_RATE; mcp->mb[1] = QLA_GET_DATA_RATE; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->in_mb |= MBX_4|MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1107, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { if (mcp->mb[1] != 0x7) ha->link_data_rate = mcp->mb[1]; if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (mcp->mb[4] & BIT_0) ql_log(ql_log_info, vha, 0x11a2, "FEC=enabled (data rate).\n"); } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, "Done %s.\n", __func__); if (mcp->mb[1] != 0x7) ha->link_data_rate = mcp->mb[1]; } return rval; } int qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, "Entered %s.\n", __func__); if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_PORT_CONFIG; mcp->out_mb = MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x110a, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /* Copy all bits to preserve original value */ memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, "Done %s.\n", __func__); } return rval; } int qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_PORT_CONFIG; /* Copy all bits to preserve original setting */ memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x110d, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, "Done %s.\n", __func__); return rval; } int qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, "Entered %s.\n", __func__); if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; if (ha->flags.fcp_prio_enabled) mcp->mb[2] = BIT_1; else mcp->mb[2] = BIT_2; mcp->mb[4] = priority & 0xf; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; mb[4] = mcp->mb[4]; } if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, "Done %s.\n", __func__); } return rval; } int qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) { int rval = QLA_FUNCTION_FAILED; struct qla_hw_data *ha = vha->hw; uint8_t byte; if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { ql_dbg(ql_dbg_mbx, vha, 0x1150, "Thermal not supported by this card.\n"); return rval; } if (IS_QLA25XX(ha)) { if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && ha->pdev->subsystem_device == 0x0175) { rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x1, 1, BIT_13|BIT_0); *temp = byte; return rval; } if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && ha->pdev->subsystem_device == 0x338e) { rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); *temp = byte; return rval; } ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Thermal not supported by this card.\n"); return rval; } if (IS_QLA82XX(ha)) { *temp = qla82xx_read_temperature(vha); rval = QLA_SUCCESS; return rval; } else if (IS_QLA8044(ha)) { *temp = qla8044_read_temperature(vha); rval = QLA_SUCCESS; return rval; } rval = qla2x00_read_asic_temperature(vha, temp); return rval; } int qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_TOGGLE_INTERRUPT; mcp->mb[1] = 1; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1016, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, "Done %s.\n", __func__); } return rval; } int qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, "Entered %s.\n", __func__); if (!IS_P3P_TYPE(ha)) return QLA_FUNCTION_FAILED; memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_TOGGLE_INTERRUPT; mcp->mb[1] = 0; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x100c, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, "Done %s.\n", __func__); } return rval; } int qla82xx_md_get_template_size(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; int rval = QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[2] = LSW(RQST_TMPLT_SIZE); mcp->mb[3] = MSW(RQST_TMPLT_SIZE); mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); /* Always copy back return mailbox values. */ if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1120, "mailbox command FAILED=0x%x, subcode=%x.\n", (mcp->mb[1] << 16) | mcp->mb[0], (mcp->mb[3] << 16) | mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, "Done %s.\n", __func__); ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); if (!ha->md_template_size) { ql_dbg(ql_dbg_mbx, vha, 0x1122, "Null template size obtained.\n"); rval = QLA_FUNCTION_FAILED; } } return rval; } int qla82xx_md_get_template(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; int rval = QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, "Entered %s.\n", __func__); ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); if (!ha->md_tmplt_hdr) { ql_log(ql_log_warn, vha, 0x1124, "Unable to allocate memory for Minidump template.\n"); return rval; } memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[2] = LSW(RQST_TMPLT); mcp->mb[3] = MSW(RQST_TMPLT); mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); mcp->mb[8] = LSW(ha->md_template_size); mcp->mb[9] = MSW(ha->md_template_size); mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; mcp->tov = MBX_TOV_SECONDS; mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1125, "mailbox command FAILED=0x%x, subcode=%x.\n", ((mcp->mb[1] << 16) | mcp->mb[0]), ((mcp->mb[3] << 16) | mcp->mb[2])); } else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, "Done %s.\n", __func__); return rval; } int qla8044_md_get_template(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; int rval = QLA_FUNCTION_FAILED; int offset = 0, size = MINIDUMP_SIZE_36K; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, "Entered %s.\n", __func__); ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); if (!ha->md_tmplt_hdr) { ql_log(ql_log_warn, vha, 0xb11b, "Unable to allocate memory for Minidump template.\n"); return rval; } memset(mcp->mb, 0 , sizeof(mcp->mb)); while (offset < ha->md_template_size) { mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[2] = LSW(RQST_TMPLT); mcp->mb[3] = MSW(RQST_TMPLT); mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); mcp->mb[8] = LSW(size); mcp->mb[9] = MSW(size); mcp->mb[10] = offset & 0x0000FFFF; mcp->mb[11] = offset & 0xFFFF0000; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; mcp->tov = MBX_TOV_SECONDS; mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0xb11c, "mailbox command FAILED=0x%x, subcode=%x.\n", ((mcp->mb[1] << 16) | mcp->mb[0]), ((mcp->mb[3] << 16) | mcp->mb[2])); return rval; } else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, "Done %s.\n", __func__); offset = offset + size; } return rval; } int qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, "Entered %s.\n", __func__); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_SET_LED_CONFIG; mcp->mb[1] = led_cfg[0]; mcp->mb[2] = led_cfg[1]; if (IS_QLA8031(ha)) { mcp->mb[3] = led_cfg[2]; mcp->mb[4] = led_cfg[3]; mcp->mb[5] = led_cfg[4]; mcp->mb[6] = led_cfg[5]; } mcp->out_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA8031(ha)) mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1134, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, "Done %s.\n", __func__); } return rval; } int qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, "Entered %s.\n", __func__); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_GET_LED_CONFIG; mcp->out_mb = MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA8031(ha)) mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1137, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { led_cfg[0] = mcp->mb[1]; led_cfg[1] = mcp->mb[2]; if (IS_QLA8031(ha)) { led_cfg[2] = mcp->mb[3]; led_cfg[3] = mcp->mb[4]; led_cfg[4] = mcp->mb[5]; led_cfg[5] = mcp->mb[6]; } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, "Done %s.\n", __func__); } return rval; } int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_P3P_TYPE(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, "Entered %s.\n", __func__); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_SET_LED_CONFIG; if (enable) mcp->mb[7] = 0xE; else mcp->mb[7] = 0xD; mcp->out_mb = MBX_7|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1128, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, "Done %s.\n", __func__); } return rval; } int qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, "Entered %s.\n", __func__); mcp->mb[0] = MBC_WRITE_REMOTE_REG; mcp->mb[1] = LSW(reg); mcp->mb[2] = MSW(reg); mcp->mb[3] = LSW(data); mcp->mb[4] = MSW(data); mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1131, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, "Done %s.\n", __func__); } return rval; } int qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, "Implicit LOGO Unsupported.\n"); return QLA_FUNCTION_FAILED; } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, "Entering %s.\n", __func__); /* Perform Implicit LOGO. */ mcp->mb[0] = MBC_PORT_LOGOUT; mcp->mb[1] = fcport->loop_id; mcp->mb[10] = BIT_15; mcp->out_mb = MBX_10|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) ql_dbg(ql_dbg_mbx, vha, 0x113d, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, "Done %s.\n", __func__); return rval; } int qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; unsigned long retry_max_time = jiffies + (2 * HZ); if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); retry_rd_reg: mcp->mb[0] = MBC_READ_REMOTE_REG; mcp->mb[1] = LSW(reg); mcp->mb[2] = MSW(reg); mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x114c, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { *data = (mcp->mb[3] | (mcp->mb[4] << 16)); if (*data == QLA8XXX_BAD_VALUE) { /* * During soft-reset CAMRAM register reads might * return 0xbad0bad0. So retry for MAX of 2 sec * while reading camram registers. */ if (time_after(jiffies, retry_max_time)) { ql_dbg(ql_dbg_mbx, vha, 0x1141, "Failure to read CAMRAM register. " "data=0x%x.\n", *data); return QLA_FUNCTION_FAILED; } msleep(100); goto retry_rd_reg; } ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); } return rval; } int qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (!IS_QLA83XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; mcp->out_mb = MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1144, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); qla2xxx_dump_fw(vha); } else { ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); } return rval; } int qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint8_t subcode = (uint8_t)options; struct qla_hw_data *ha = vha->hw; if (!IS_QLA8031(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_ACCESS_CONTROL; mcp->mb[1] = options; mcp->out_mb = MBX_1|MBX_0; if (subcode & BIT_2) { mcp->mb[2] = LSW(start_addr); mcp->mb[3] = MSW(start_addr); mcp->mb[4] = LSW(end_addr); mcp->mb[5] = MSW(end_addr); mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; } mcp->in_mb = MBX_2|MBX_1|MBX_0; if (!(subcode & (BIT_2 | BIT_5))) mcp->in_mb |= MBX_4|MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1147, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[4]); qla2xxx_dump_fw(vha); } else { if (subcode & BIT_5) *sector_size = mcp->mb[1]; else if (subcode & (BIT_6 | BIT_7)) { ql_dbg(ql_dbg_mbx, vha, 0x1148, "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); } else if (subcode & (BIT_3 | BIT_4)) { ql_dbg(ql_dbg_mbx, vha, 0x1149, "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); } ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); } return rval; } int qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, uint32_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_MCTP_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, "Entered %s.\n", __func__); mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; mcp->mb[1] = LSW(addr); mcp->mb[2] = MSW(req_dma); mcp->mb[3] = LSW(req_dma); mcp->mb[4] = MSW(size); mcp->mb[5] = LSW(size); mcp->mb[6] = MSW(MSD(req_dma)); mcp->mb[7] = LSW(MSD(req_dma)); mcp->mb[8] = MSW(addr); /* Setting RAM ID to valid */ /* For MCTP RAM ID is 0x40 */ mcp->mb[10] = BIT_7 | 0x40; mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x114e, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, "Done %s.\n", __func__); } return rval; } int qla26xx_dport_diagnostics(scsi_qla_host_t *vha, void *dd_buf, uint size, uint options) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; dma_addr_t dd_dma; if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, "Entered %s.\n", __func__); dd_dma = dma_map_single(&vha->hw->pdev->dev, dd_buf, size, DMA_FROM_DEVICE); if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(dd_buf, 0, size); mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; mcp->mb[1] = options; mcp->mb[2] = MSW(LSD(dd_dma)); mcp->mb[3] = LSW(LSD(dd_dma)); mcp->mb[6] = MSW(MSD(dd_dma)); mcp->mb[7] = LSW(MSD(dd_dma)); mcp->mb[8] = size; mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->buf_size = size; mcp->flags = MBX_DMA_IN; mcp->tov = MBX_TOV_SECONDS * 4; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, "Done %s.\n", __func__); } dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE); return rval; } int qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha, struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp) { int rval; dma_addr_t dd_dma; uint size = sizeof(dd->buf); uint16_t options = dd->options; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, "Entered %s.\n", __func__); dd_dma = dma_map_single(&vha->hw->pdev->dev, dd->buf, size, DMA_FROM_DEVICE); if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(dd->buf, 0, size); mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; mcp->mb[1] = options; mcp->mb[2] = MSW(LSD(dd_dma)); mcp->mb[3] = LSW(LSD(dd_dma)); mcp->mb[6] = MSW(MSD(dd_dma)); mcp->mb[7] = LSW(MSD(dd_dma)); mcp->mb[8] = size; mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0; mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0; mcp->buf_size = size; mcp->flags = MBX_DMA_IN; mcp->tov = MBX_TOV_SECONDS * 4; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, "Done %s.\n", __func__); } dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE); return rval; } static void qla2x00_async_mb_sp_done(srb_t *sp, int res) { sp->u.iocb_cmd.u.mbx.rc = res; complete(&sp->u.iocb_cmd.u.mbx.comp); /* don't free sp here. Let the caller do the free */ } /* * This mailbox uses the iocb interface to send MB command. * This allows non-critial (non chip setup) command to go * out in parrallel. */ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) { int rval = QLA_FUNCTION_FAILED; srb_t *sp; struct srb_iocb *c; if (!vha->hw->flags.fw_started) goto done; /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; c = &sp->u.iocb_cmd; init_completion(&c->u.mbx.comp); sp->type = SRB_MB_IOCB; sp->name = mb_to_str(mcp->mb[0]); qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_mb_sp_done); memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1018, "%s: %s Failed submission. %x.\n", __func__, sp->name, rval); goto done_free_sp; } ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", sp->name, sp->handle); wait_for_completion(&c->u.mbx.comp); memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); rval = c->u.mbx.rc; switch (rval) { case QLA_FUNCTION_TIMEOUT: ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", __func__, sp->name, rval); break; case QLA_SUCCESS: ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", __func__, sp->name); break; default: ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", __func__, sp->name, rval); break; } done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } /* * qla24xx_gpdb_wait * NOTE: Do not call this routine from DPC thread */ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) { int rval = QLA_FUNCTION_FAILED; dma_addr_t pd_dma; struct port_database_24xx *pd; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; if (!vha->hw->flags.fw_started) goto done; pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { ql_log(ql_log_warn, vha, 0xd047, "Failed to allocate port database structure.\n"); goto done_free_sp; } memset(&mc, 0, sizeof(mc)); mc.mb[0] = MBC_GET_PORT_DATABASE; mc.mb[1] = fcport->loop_id; mc.mb[2] = MSW(pd_dma); mc.mb[3] = LSW(pd_dma); mc.mb[6] = MSW(MSD(pd_dma)); mc.mb[7] = LSW(MSD(pd_dma)); mc.mb[9] = vha->vp_idx; mc.mb[10] = opt; rval = qla24xx_send_mb_cmd(vha, &mc); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1193, "%s: %8phC fail\n", __func__, fcport->port_name); goto done_free_sp; } rval = __qla24xx_parse_gpdb(vha, fcport, pd); ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", __func__, fcport->port_name); done_free_sp: if (pd) dma_pool_free(ha->s_dma_pool, pd, pd_dma); done: return rval; } int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, struct port_database_24xx *pd) { int rval = QLA_SUCCESS; uint64_t zero = 0; u8 current_login_state, last_login_state; if (NVME_TARGET(vha->hw, fcport)) { current_login_state = pd->current_login_state >> 4; last_login_state = pd->last_login_state >> 4; } else { current_login_state = pd->current_login_state & 0xf; last_login_state = pd->last_login_state & 0xf; } /* Check for logged in state. */ if (current_login_state != PDS_PRLI_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x119a, "Unable to verify login-state (%x/%x) for loop_id %x.\n", current_login_state, last_login_state, fcport->loop_id); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } if (fcport->loop_id == FC_NO_LOOP_ID || (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && memcmp(fcport->port_name, pd->port_name, 8))) { /* We lost the device mid way. */ rval = QLA_NOT_LOGGED_IN; goto gpd_error_out; } /* Names are little-endian. */ memcpy(fcport->node_name, pd->node_name, WWN_SIZE); memcpy(fcport->port_name, pd->port_name, WWN_SIZE); /* Get port_id of device. */ fcport->d_id.b.domain = pd->port_id[0]; fcport->d_id.b.area = pd->port_id[1]; fcport->d_id.b.al_pa = pd->port_id[2]; fcport->d_id.b.rsvd_1 = 0; ql_dbg(ql_dbg_disc, vha, 0x2062, "%8phC SVC Param w3 %02x%02x", fcport->port_name, pd->prli_svc_param_word_3[1], pd->prli_svc_param_word_3[0]); if (NVME_TARGET(vha->hw, fcport)) { fcport->port_type = FCT_NVME; if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) fcport->port_type |= FCT_NVME_INITIATOR; if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type |= FCT_NVME_TARGET; if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) fcport->port_type |= FCT_NVME_DISCOVERY; } else { /* If not target must be initiator or unknown type. */ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; } /* Passback COS information. */ fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? FC_COS_CLASS2 : FC_COS_CLASS3; if (pd->prli_svc_param_word_3[0] & BIT_7) { fcport->flags |= FCF_CONF_COMP_SUPPORTED; fcport->conf_compl_supported = 1; } gpd_error_out: return rval; } /* * qla24xx_gidlist__wait * NOTE: don't call this routine from DPC thread. */ int qla24xx_gidlist_wait(struct scsi_qla_host *vha, void *id_list, dma_addr_t id_list_dma, uint16_t *entries) { int rval = QLA_FUNCTION_FAILED; mbx_cmd_t mc; if (!vha->hw->flags.fw_started) goto done; memset(&mc, 0, sizeof(mc)); mc.mb[0] = MBC_GET_ID_LIST; mc.mb[2] = MSW(id_list_dma); mc.mb[3] = LSW(id_list_dma); mc.mb[6] = MSW(MSD(id_list_dma)); mc.mb[7] = LSW(MSD(id_list_dma)); mc.mb[8] = 0; mc.mb[9] = vha->vp_idx; rval = qla24xx_send_mb_cmd(vha, &mc); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x119b, "%s: fail\n", __func__); } else { *entries = mc.mb[1]; ql_dbg(ql_dbg_mbx, vha, 0x119c, "%s: done\n", __func__); } done: return rval; } int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, "Entered %s\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; mcp->mb[1] = 1; mcp->mb[2] = value; mcp->out_mb = MBX_2 | MBX_1 | MBX_0; mcp->in_mb = MBX_2 | MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); return rval; } int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, "Entered %s\n", __func__); memset(mcp->mb, 0, sizeof(mcp->mb)); mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; mcp->mb[1] = 0; mcp->out_mb = MBX_1 | MBX_0; mcp->in_mb = MBX_2 | MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) *value = mc.mb[2]; ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); return rval; } int qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) { struct qla_hw_data *ha = vha->hw; uint16_t iter, addr, offset; dma_addr_t phys_addr; int rval, c; u8 *sfp_data; memset(ha->sfp_data, 0, SFP_DEV_SIZE); addr = 0xa0; phys_addr = ha->sfp_data_dma; sfp_data = ha->sfp_data; offset = c = 0; for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { if (iter == 4) { /* Skip to next device address. */ addr = 0xa2; offset = 0; } rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, addr, offset, SFP_BLOCK_SIZE, BIT_1); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x706d, "Unable to read SFP data (%x/%x/%x).\n", rval, addr, offset); return rval; } if (buf && (c < count)) { u16 sz; if ((count - c) >= SFP_BLOCK_SIZE) sz = SFP_BLOCK_SIZE; else sz = count - c; memcpy(buf, sfp_data, sz); buf += SFP_BLOCK_SIZE; c += sz; } phys_addr += SFP_BLOCK_SIZE; sfp_data += SFP_BLOCK_SIZE; offset += SFP_BLOCK_SIZE; } return rval; } int qla24xx_res_count_wait(struct scsi_qla_host *vha, uint16_t *out_mb, int out_mb_sz) { int rval = QLA_FUNCTION_FAILED; mbx_cmd_t mc; if (!vha->hw->flags.fw_started) goto done; memset(&mc, 0, sizeof(mc)); mc.mb[0] = MBC_GET_RESOURCE_COUNTS; rval = qla24xx_send_mb_cmd(vha, &mc); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: fail\n", __func__); } else { if (out_mb_sz <= SIZEOF_IOCB_MB_REG) memcpy(out_mb, mc.mb, out_mb_sz); else memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: done\n", __func__); } done: return rval; } int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, uint32_t sfub_len) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; mcp->mb[1] = opts; mcp->mb[2] = region; mcp->mb[3] = MSW(len); mcp->mb[4] = LSW(len); mcp->mb[5] = MSW(sfub_dma_addr); mcp->mb[6] = LSW(sfub_dma_addr); mcp->mb[7] = MSW(MSD(sfub_dma_addr)); mcp->mb[8] = LSW(MSD(sfub_dma_addr)); mcp->mb[9] = sfub_len; mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } return rval; } int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, "Entered %s.\n", __func__); mcp->mb[0] = MBC_WRITE_REMOTE_REG; mcp->mb[1] = LSW(addr); mcp->mb[2] = MSW(addr); mcp->mb[3] = LSW(data); mcp->mb[4] = MSW(data); mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e9, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, "Done %s.\n", __func__); } return rval; } int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, "Entered %s.\n", __func__); mcp->mb[0] = MBC_READ_REMOTE_REG; mcp->mb[1] = LSW(addr); mcp->mb[2] = MSW(addr); mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e9, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, "Done %s.\n", __func__); } return rval; } int ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led) { struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; int rval; if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n", __func__, options); mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG; mcp->mb[1] = options; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; if (options & BIT_0) { if (options & BIT_1) { mcp->mb[2] = led[2]; mcp->out_mb |= MBX_2; } if (options & BIT_2) { mcp->mb[3] = led[0]; mcp->out_mb |= MBX_3; } if (options & BIT_3) { mcp->mb[4] = led[1]; mcp->out_mb |= MBX_4; } } else { mcp->in_mb |= MBX_4|MBX_3|MBX_2; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval) { ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n", __func__, rval, mcp->mb[0], mcp->mb[1]); return rval; } if (options & BIT_0) { ha->beacon_blink_led = 0; ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__); } else { led[2] = mcp->mb[2]; led[0] = mcp->mb[3]; led[1] = mcp->mb[4]; ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n", __func__, led[0], led[1], led[2]); } return rval; } /** * qla_no_op_mb(): This MB is used to check if FW is still alive and * able to generate an interrupt. Otherwise, a timeout will trigger * FW dump + reset * @vha: host adapter pointer * Return: None */ void qla_no_op_mb(struct scsi_qla_host *vha) { mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; int rval; memset(&mc, 0, sizeof(mc)); mcp->mb[0] = 0; // noop cmd= 0 mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; mcp->tov = 5; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval) { ql_dbg(ql_dbg_async, vha, 0x7071, "Failed %s %x\n", __func__, rval); } } int qla_mailbox_passthru(scsi_qla_host_t *vha, uint16_t *mbx_in, uint16_t *mbx_out) { mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; int rval = -EINVAL; memset(&mc, 0, sizeof(mc)); /* Receiving all 32 register's contents */ memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t))); mcp->out_mb = 0xFFFFFFFF; mcp->in_mb = 0xFFFFFFFF; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; mcp->bufp = NULL; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0xf0a2, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n", __func__); /* passing all 32 register's contents */ memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t)); } return rval; }
linux-master
drivers/scsi/qla2xxx/qla_mbx.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include "qla_gbl.h" #include <linux/kthread.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/bsg-lib.h> static void qla2xxx_free_fcport_work(struct work_struct *work) { struct fc_port *fcport = container_of(work, typeof(*fcport), free_work); qla2x00_free_fcport(fcport); } /* BSG support for ELS/CT pass through */ void qla2x00_bsg_job_done(srb_t *sp, int res) { struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_reply *bsg_reply = bsg_job->reply; ql_dbg(ql_dbg_user, sp->vha, 0x7009, "%s: sp hdl %x, result=%x bsg ptr %p\n", __func__, sp->handle, res, bsg_job); /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); bsg_reply->result = res; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } void qla2x00_bsg_sp_free(srb_t *sp) { struct qla_hw_data *ha = sp->vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_request *bsg_request = bsg_job->request; struct qla_mt_iocb_rqst_fx00 *piocb_rqst; if (sp->type == SRB_FXIOCB_BCMD) { piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); } else { if (sp->remap.remapped) { dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf, sp->remap.rsp.dma); dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf, sp->remap.req.dma); } else { dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); } } if (sp->type == SRB_CT_CMD || sp->type == SRB_FXIOCB_BCMD || sp->type == SRB_ELS_CMD_HST) { INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work); queue_work(ha->wq, &sp->fcport->free_work); } qla2x00_rel_sp(sp); } int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) { int i, ret, num_valid; uint8_t *bcode; struct qla_fcp_prio_entry *pri_entry; uint32_t *bcode_val_ptr, bcode_val; ret = 1; num_valid = 0; bcode = (uint8_t *)pri_cfg; bcode_val_ptr = (uint32_t *)pri_cfg; bcode_val = (uint32_t)(*bcode_val_ptr); if (bcode_val == 0xFFFFFFFF) { /* No FCP Priority config data in flash */ ql_dbg(ql_dbg_user, vha, 0x7051, "No FCP Priority config data.\n"); return 0; } if (memcmp(bcode, "HQOS", 4)) { /* Invalid FCP priority data header*/ ql_dbg(ql_dbg_user, vha, 0x7052, "Invalid FCP Priority data header. bcode=0x%x.\n", bcode_val); return 0; } if (flag != 1) return ret; pri_entry = &pri_cfg->entry[0]; for (i = 0; i < pri_cfg->num_entries; i++) { if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) num_valid++; pri_entry++; } if (num_valid == 0) { /* No valid FCP priority data entries */ ql_dbg(ql_dbg_user, vha, 0x7053, "No valid FCP Priority data entries.\n"); ret = 0; } else { /* FCP priority data is valid */ ql_dbg(ql_dbg_user, vha, 0x7054, "Valid FCP priority data. num entries = %d.\n", num_valid); } return ret; } static int qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) { struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int ret = 0; uint32_t len; uint32_t oper; if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { ret = -EINVAL; goto exit_fcp_prio_cfg; } /* Get the sub command */ oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; /* Only set config is allowed if config memory is not allocated */ if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { ret = -EINVAL; goto exit_fcp_prio_cfg; } switch (oper) { case QLFC_FCP_PRIO_DISABLE: if (ha->flags.fcp_prio_enabled) { ha->flags.fcp_prio_enabled = 0; ha->fcp_prio_cfg->attributes &= ~FCP_PRIO_ATTR_ENABLE; qla24xx_update_all_fcp_prio(vha); bsg_reply->result = DID_OK; } else { ret = -EINVAL; bsg_reply->result = (DID_ERROR << 16); goto exit_fcp_prio_cfg; } break; case QLFC_FCP_PRIO_ENABLE: if (!ha->flags.fcp_prio_enabled) { if (ha->fcp_prio_cfg) { ha->flags.fcp_prio_enabled = 1; ha->fcp_prio_cfg->attributes |= FCP_PRIO_ATTR_ENABLE; qla24xx_update_all_fcp_prio(vha); bsg_reply->result = DID_OK; } else { ret = -EINVAL; bsg_reply->result = (DID_ERROR << 16); goto exit_fcp_prio_cfg; } } break; case QLFC_FCP_PRIO_GET_CONFIG: len = bsg_job->reply_payload.payload_len; if (!len || len > FCP_PRIO_CFG_SIZE) { ret = -EINVAL; bsg_reply->result = (DID_ERROR << 16); goto exit_fcp_prio_cfg; } bsg_reply->result = DID_OK; bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer( bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, len); break; case QLFC_FCP_PRIO_SET_CONFIG: len = bsg_job->request_payload.payload_len; if (!len || len > FCP_PRIO_CFG_SIZE) { bsg_reply->result = (DID_ERROR << 16); ret = -EINVAL; goto exit_fcp_prio_cfg; } if (!ha->fcp_prio_cfg) { ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); if (!ha->fcp_prio_cfg) { ql_log(ql_log_warn, vha, 0x7050, "Unable to allocate memory for fcp prio " "config data (%x).\n", FCP_PRIO_CFG_SIZE); bsg_reply->result = (DID_ERROR << 16); ret = -ENOMEM; goto exit_fcp_prio_cfg; } } memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, FCP_PRIO_CFG_SIZE); /* validate fcp priority data */ if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) { bsg_reply->result = (DID_ERROR << 16); ret = -EINVAL; /* If buffer was invalidatic int * fcp_prio_cfg is of no use */ vfree(ha->fcp_prio_cfg); ha->fcp_prio_cfg = NULL; goto exit_fcp_prio_cfg; } ha->flags.fcp_prio_enabled = 0; if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) ha->flags.fcp_prio_enabled = 1; qla24xx_update_all_fcp_prio(vha); bsg_reply->result = DID_OK; break; default: ret = -EINVAL; break; } exit_fcp_prio_cfg: if (!ret) bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return ret; } static int qla2x00_process_els(struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_rport *rport; fc_port_t *fcport = NULL; struct Scsi_Host *host; scsi_qla_host_t *vha; struct qla_hw_data *ha; srb_t *sp; const char *type; int req_sg_cnt, rsp_sg_cnt; int rval = (DID_ERROR << 16); uint32_t els_cmd = 0; int qla_port_allocated = 0; if (bsg_request->msgcode == FC_BSG_RPT_ELS) { rport = fc_bsg_to_rport(bsg_job); if (!rport) { rval = -ENOMEM; goto done; } fcport = *(fc_port_t **) rport->dd_data; host = rport_to_shost(rport); vha = shost_priv(host); ha = vha->hw; type = "FC_BSG_RPT_ELS"; } else { host = fc_bsg_to_shost(bsg_job); vha = shost_priv(host); ha = vha->hw; type = "FC_BSG_HST_ELS_NOLOGIN"; els_cmd = bsg_request->rqst_data.h_els.command_code; if (els_cmd == ELS_AUTH_ELS) return qla_edif_process_els(vha, bsg_job); } if (!vha->flags.online) { ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); rval = -EIO; goto done; } /* pass through is supported only for ISP 4Gb or higher */ if (!IS_FWI2_CAPABLE(ha)) { ql_dbg(ql_dbg_user, vha, 0x7001, "ELS passthru not supported for ISP23xx based adapters.\n"); rval = -EPERM; goto done; } /* Multiple SG's are not supported for ELS requests */ if (bsg_job->request_payload.sg_cnt > 1 || bsg_job->reply_payload.sg_cnt > 1) { ql_dbg(ql_dbg_user, vha, 0x7002, "Multiple SG's are not supported for ELS requests, " "request_sg_cnt=%x reply_sg_cnt=%x.\n", bsg_job->request_payload.sg_cnt, bsg_job->reply_payload.sg_cnt); rval = -EPERM; goto done; } /* ELS request for rport */ if (bsg_request->msgcode == FC_BSG_RPT_ELS) { /* make sure the rport is logged in, * if not perform fabric login */ if (atomic_read(&fcport->state) != FCS_ONLINE) { ql_dbg(ql_dbg_user, vha, 0x7003, "Port %06X is not online for ELS passthru.\n", fcport->d_id.b24); rval = -EIO; goto done; } } else { /* Allocate a dummy fcport structure, since functions * preparing the IOCB and mailbox command retrieves port * specific information from fcport structure. For Host based * ELS commands there will be no fcport structure allocated */ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (!fcport) { rval = -ENOMEM; goto done; } qla_port_allocated = 1; /* Initialize all required fields of fcport */ fcport->vha = vha; fcport->d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[0]; fcport->d_id.b.area = bsg_request->rqst_data.h_els.port_id[1]; fcport->d_id.b.domain = bsg_request->rqst_data.h_els.port_id[2]; fcport->loop_id = (fcport->d_id.b.al_pa == 0xFD) ? NPH_FABRIC_CONTROLLER : NPH_F_PORT; } req_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!req_sg_cnt) { dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); rval = -ENOMEM; goto done_free_fcport; } rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if (!rsp_sg_cnt) { dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); rval = -ENOMEM; goto done_free_fcport; } if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { ql_log(ql_log_warn, vha, 0x7008, "dma mapping resulted in different sg counts, " "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); rval = -EAGAIN; goto done_unmap_sg; } /* Alloc SRB structure */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { rval = -ENOMEM; goto done_unmap_sg; } sp->type = (bsg_request->msgcode == FC_BSG_RPT_ELS ? SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); sp->name = (bsg_request->msgcode == FC_BSG_RPT_ELS ? "bsg_els_rpt" : "bsg_els_hst"); sp->u.bsg_job = bsg_job; sp->free = qla2x00_bsg_sp_free; sp->done = qla2x00_bsg_job_done; ql_dbg(ql_dbg_user, vha, 0x700a, "bsg rqst type: %s els type: %x - loop-id=%x " "portid=%-2x%02x%02x.\n", type, bsg_request->rqst_data.h_els.command_code, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, "qla2x00_start_sp failed = %d\n", rval); qla2x00_rel_sp(sp); rval = -EIO; goto done_unmap_sg; } return rval; done_unmap_sg: dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); goto done_free_fcport; done_free_fcport: if (qla_port_allocated) qla2x00_free_fcport(fcport); done: return rval; } static inline uint16_t qla24xx_calc_ct_iocbs(uint16_t dsds) { uint16_t iocbs; iocbs = 1; if (dsds > 2) { iocbs += (dsds - 2) / 5; if ((dsds - 2) % 5) iocbs++; } return iocbs; } static int qla2x00_process_ct(struct bsg_job *bsg_job) { srb_t *sp; struct fc_bsg_request *bsg_request = bsg_job->request; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = (DID_ERROR << 16); int req_sg_cnt, rsp_sg_cnt; uint16_t loop_id; struct fc_port *fcport; char *type = "FC_BSG_HST_CT"; req_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!req_sg_cnt) { ql_log(ql_log_warn, vha, 0x700f, "dma_map_sg return %d for request\n", req_sg_cnt); rval = -ENOMEM; goto done; } rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if (!rsp_sg_cnt) { ql_log(ql_log_warn, vha, 0x7010, "dma_map_sg return %d for reply\n", rsp_sg_cnt); rval = -ENOMEM; goto done; } if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { ql_log(ql_log_warn, vha, 0x7011, "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); rval = -EAGAIN; goto done_unmap_sg; } if (!vha->flags.online) { ql_log(ql_log_warn, vha, 0x7012, "Host is not online.\n"); rval = -EIO; goto done_unmap_sg; } loop_id = (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000) >> 24; switch (loop_id) { case 0xFC: loop_id = NPH_SNS; break; case 0xFA: loop_id = vha->mgmt_svr_loop_id; break; default: ql_dbg(ql_dbg_user, vha, 0x7013, "Unknown loop id: %x.\n", loop_id); rval = -EINVAL; goto done_unmap_sg; } /* Allocate a dummy fcport structure, since functions preparing the * IOCB and mailbox command retrieves port specific information * from fcport structure. For Host based ELS commands there will be * no fcport structure allocated */ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (!fcport) { ql_log(ql_log_warn, vha, 0x7014, "Failed to allocate fcport.\n"); rval = -ENOMEM; goto done_unmap_sg; } /* Initialize all required fields of fcport */ fcport->vha = vha; fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0]; fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1]; fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2]; fcport->loop_id = loop_id; /* Alloc SRB structure */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { ql_log(ql_log_warn, vha, 0x7015, "qla2x00_get_sp failed.\n"); rval = -ENOMEM; goto done_free_fcport; } sp->type = SRB_CT_CMD; sp->name = "bsg_ct"; sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); sp->u.bsg_job = bsg_job; sp->free = qla2x00_bsg_sp_free; sp->done = qla2x00_bsg_job_done; ql_dbg(ql_dbg_user, vha, 0x7016, "bsg rqst type: %s else type: %x - " "loop-id=%x portid=%02x%02x%02x.\n", type, (bsg_request->rqst_data.h_ct.preamble_word2 >> 16), fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7017, "qla2x00_start_sp failed=%d.\n", rval); qla2x00_rel_sp(sp); rval = -EIO; goto done_free_fcport; } return rval; done_free_fcport: qla2x00_free_fcport(fcport); done_unmap_sg: dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); done: return rval; } /* Disable loopback mode */ static inline int qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, int wait, int wait2) { int ret = 0; int rval = 0; uint16_t new_config[4]; struct qla_hw_data *ha = vha->hw; if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) goto done_reset_internal; memset(new_config, 0 , sizeof(new_config)); if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == ENABLE_INTERNAL_LOOPBACK || (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == ENABLE_EXTERNAL_LOOPBACK) { new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; ha->notify_dcbx_comp = wait; ha->notify_lb_portup_comp = wait2; ret = qla81xx_set_port_config(vha, new_config); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7025, "Set port config failed.\n"); ha->notify_dcbx_comp = 0; ha->notify_lb_portup_comp = 0; rval = -EINVAL; goto done_reset_internal; } /* Wait for DCBX complete event */ if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, (DCBX_COMP_TIMEOUT * HZ))) { ql_dbg(ql_dbg_user, vha, 0x7026, "DCBX completion not received.\n"); ha->notify_dcbx_comp = 0; ha->notify_lb_portup_comp = 0; rval = -EINVAL; goto done_reset_internal; } else ql_dbg(ql_dbg_user, vha, 0x7027, "DCBX completion received.\n"); if (wait2 && !wait_for_completion_timeout(&ha->lb_portup_comp, (LB_PORTUP_COMP_TIMEOUT * HZ))) { ql_dbg(ql_dbg_user, vha, 0x70c5, "Port up completion not received.\n"); ha->notify_lb_portup_comp = 0; rval = -EINVAL; goto done_reset_internal; } else ql_dbg(ql_dbg_user, vha, 0x70c6, "Port up completion received.\n"); ha->notify_dcbx_comp = 0; ha->notify_lb_portup_comp = 0; } done_reset_internal: return rval; } /* * Set the port configuration to enable the internal or external loopback * depending on the loopback mode. */ static inline int qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, uint16_t *new_config, uint16_t mode) { int ret = 0; int rval = 0; unsigned long rem_tmo = 0, current_tmo = 0; struct qla_hw_data *ha = vha->hw; if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) goto done_set_internal; if (mode == INTERNAL_LOOPBACK) new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); else if (mode == EXTERNAL_LOOPBACK) new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); ql_dbg(ql_dbg_user, vha, 0x70be, "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); ha->notify_dcbx_comp = 1; ret = qla81xx_set_port_config(vha, new_config); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7021, "set port config failed.\n"); ha->notify_dcbx_comp = 0; rval = -EINVAL; goto done_set_internal; } /* Wait for DCBX complete event */ current_tmo = DCBX_COMP_TIMEOUT * HZ; while (1) { rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, current_tmo); if (!ha->idc_extend_tmo || rem_tmo) { ha->idc_extend_tmo = 0; break; } current_tmo = ha->idc_extend_tmo * HZ; ha->idc_extend_tmo = 0; } if (!rem_tmo) { ql_dbg(ql_dbg_user, vha, 0x7022, "DCBX completion not received.\n"); ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); /* * If the reset of the loopback mode doesn't work take a FCoE * dump and reset the chip. */ if (ret) { qla2xxx_dump_fw(vha); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } rval = -EINVAL; } else { if (ha->flags.idc_compl_status) { ql_dbg(ql_dbg_user, vha, 0x70c3, "Bad status in IDC Completion AEN\n"); rval = -EINVAL; ha->flags.idc_compl_status = 0; } else ql_dbg(ql_dbg_user, vha, 0x7023, "DCBX completion received.\n"); } ha->notify_dcbx_comp = 0; ha->idc_extend_tmo = 0; done_set_internal: return rval; } static int qla2x00_process_loopback(struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval; uint8_t command_sent; char *type; struct msg_echo_lb elreq; uint16_t response[MAILBOX_REGISTER_COUNT]; uint16_t config[4], new_config[4]; uint8_t *fw_sts_ptr; void *req_data = NULL; dma_addr_t req_data_dma; uint32_t req_data_len; uint8_t *rsp_data = NULL; dma_addr_t rsp_data_dma; uint32_t rsp_data_len; if (!vha->flags.online) { ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); return -EIO; } memset(&elreq, 0, sizeof(elreq)); elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!elreq.req_sg_cnt) { ql_log(ql_log_warn, vha, 0x701a, "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); return -ENOMEM; } elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if (!elreq.rsp_sg_cnt) { ql_log(ql_log_warn, vha, 0x701b, "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); rval = -ENOMEM; goto done_unmap_req_sg; } if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { ql_log(ql_log_warn, vha, 0x701c, "dma mapping resulted in different sg counts, " "request_sg_cnt: %x dma_request_sg_cnt: %x " "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); rval = -EAGAIN; goto done_unmap_sg; } req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, &req_data_dma, GFP_KERNEL); if (!req_data) { ql_log(ql_log_warn, vha, 0x701d, "dma alloc failed for req_data.\n"); rval = -ENOMEM; goto done_unmap_sg; } rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, &rsp_data_dma, GFP_KERNEL); if (!rsp_data) { ql_log(ql_log_warn, vha, 0x7004, "dma alloc failed for rsp_data.\n"); rval = -ENOMEM; goto done_free_dma_req; } /* Copy the request buffer in req_data now */ sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, req_data, req_data_len); elreq.send_dma = req_data_dma; elreq.rcv_dma = rsp_data_dma; elreq.transfer_size = req_data_len; elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; elreq.iteration_count = bsg_request->rqst_data.h_vendor.vendor_cmd[2]; if (atomic_read(&vha->loop_state) == LOOP_READY && ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) || ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && get_unaligned_le32(req_data) == ELS_OPCODE_BYTE && req_data_len == MAX_ELS_FRAME_PAYLOAD && elreq.options == EXTERNAL_LOOPBACK))) { type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; ql_dbg(ql_dbg_user, vha, 0x701e, "BSG request type: %s.\n", type); command_sent = INT_DEF_LB_ECHO_CMD; rval = qla2x00_echo_test(vha, &elreq, response); } else { if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { memset(config, 0, sizeof(config)); memset(new_config, 0, sizeof(new_config)); if (qla81xx_get_port_config(vha, config)) { ql_log(ql_log_warn, vha, 0x701f, "Get port config failed.\n"); rval = -EPERM; goto done_free_dma_rsp; } if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { ql_dbg(ql_dbg_user, vha, 0x70c4, "Loopback operation already in " "progress.\n"); rval = -EAGAIN; goto done_free_dma_rsp; } ql_dbg(ql_dbg_user, vha, 0x70c0, "elreq.options=%04x\n", elreq.options); if (elreq.options == EXTERNAL_LOOPBACK) if (IS_QLA8031(ha) || IS_QLA8044(ha)) rval = qla81xx_set_loopback_mode(vha, config, new_config, elreq.options); else rval = qla81xx_reset_loopback_mode(vha, config, 1, 0); else rval = qla81xx_set_loopback_mode(vha, config, new_config, elreq.options); if (rval) { rval = -EPERM; goto done_free_dma_rsp; } type = "FC_BSG_HST_VENDOR_LOOPBACK"; ql_dbg(ql_dbg_user, vha, 0x7028, "BSG request type: %s.\n", type); command_sent = INT_DEF_LB_LOOPBACK_CMD; rval = qla2x00_loopback_test(vha, &elreq, response); if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) { ql_log(ql_log_warn, vha, 0x7029, "MBX command error, Aborting ISP.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); /* Also reset the MPI */ if (IS_QLA81XX(ha)) { if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x702a, "MPI reset failed.\n"); } } rval = -EIO; goto done_free_dma_rsp; } if (new_config[0]) { int ret; /* Revert back to original port config * Also clear internal loopback */ ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 1); if (ret) { /* * If the reset of the loopback mode * doesn't work take FCoE dump and then * reset the chip. */ qla2xxx_dump_fw(vha); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } } } else { type = "FC_BSG_HST_VENDOR_LOOPBACK"; ql_dbg(ql_dbg_user, vha, 0x702b, "BSG request type: %s.\n", type); command_sent = INT_DEF_LB_LOOPBACK_CMD; rval = qla2x00_loopback_test(vha, &elreq, response); } } if (rval) { ql_log(ql_log_warn, vha, 0x702c, "Vendor request %s failed.\n", type); rval = 0; bsg_reply->result = (DID_ERROR << 16); bsg_reply->reply_payload_rcv_len = 0; } else { ql_dbg(ql_dbg_user, vha, 0x702d, "Vendor request %s completed.\n", type); bsg_reply->result = (DID_OK << 16); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, rsp_data, rsp_data_len); } bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t); fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response, sizeof(response)); fw_sts_ptr += sizeof(response); *fw_sts_ptr = command_sent; done_free_dma_rsp: dma_free_coherent(&ha->pdev->dev, rsp_data_len, rsp_data, rsp_data_dma); done_free_dma_req: dma_free_coherent(&ha->pdev->dev, req_data_len, req_data, req_data_dma); done_unmap_sg: dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); done_unmap_req_sg: dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!rval) bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rval; } static int qla84xx_reset(struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); struct fc_bsg_reply *bsg_reply = bsg_job->reply; scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; uint32_t flag; if (!IS_QLA84XX(ha)) { ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); return -EINVAL; } flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); if (rval) { ql_log(ql_log_warn, vha, 0x7030, "Vendor request 84xx reset failed.\n"); rval = (DID_ERROR << 16); } else { ql_dbg(ql_dbg_user, vha, 0x7031, "Vendor request 84xx reset completed.\n"); bsg_reply->result = DID_OK; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } return rval; } static int qla84xx_updatefw(struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; struct verify_chip_entry_84xx *mn = NULL; dma_addr_t mn_dma, fw_dma; void *fw_buf = NULL; int rval = 0; uint32_t sg_cnt; uint32_t data_len; uint16_t options; uint32_t flag; uint32_t fw_ver; if (!IS_QLA84XX(ha)) { ql_dbg(ql_dbg_user, vha, 0x7032, "Not 84xx, exiting.\n"); return -EINVAL; } sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!sg_cnt) { ql_log(ql_log_warn, vha, 0x7033, "dma_map_sg returned %d for request.\n", sg_cnt); return -ENOMEM; } if (sg_cnt != bsg_job->request_payload.sg_cnt) { ql_log(ql_log_warn, vha, 0x7034, "DMA mapping resulted in different sg counts, " "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", bsg_job->request_payload.sg_cnt, sg_cnt); rval = -EAGAIN; goto done_unmap_sg; } data_len = bsg_job->request_payload.payload_len; fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, &fw_dma, GFP_KERNEL); if (!fw_buf) { ql_log(ql_log_warn, vha, 0x7035, "DMA alloc failed for fw_buf.\n"); rval = -ENOMEM; goto done_unmap_sg; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, fw_buf, data_len); mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); if (!mn) { ql_log(ql_log_warn, vha, 0x7036, "DMA alloc failed for fw buffer.\n"); rval = -ENOMEM; goto done_free_fw_buf; } flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2); mn->entry_type = VERIFY_CHIP_IOCB_TYPE; mn->entry_count = 1; options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) options |= VCO_DIAG_FW; mn->options = cpu_to_le16(options); mn->fw_ver = cpu_to_le32(fw_ver); mn->fw_size = cpu_to_le32(data_len); mn->fw_seq_size = cpu_to_le32(data_len); put_unaligned_le64(fw_dma, &mn->dsd.address); mn->dsd.length = cpu_to_le32(data_len); mn->data_seg_cnt = cpu_to_le16(1); rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); if (rval) { ql_log(ql_log_warn, vha, 0x7037, "Vendor request 84xx updatefw failed.\n"); rval = (DID_ERROR << 16); } else { ql_dbg(ql_dbg_user, vha, 0x7038, "Vendor request 84xx updatefw completed.\n"); bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->result = DID_OK; } dma_pool_free(ha->s_dma_pool, mn, mn_dma); done_free_fw_buf: dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); done_unmap_sg: dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!rval) bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rval; } static int qla84xx_mgmt_cmd(struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; struct access_chip_84xx *mn = NULL; dma_addr_t mn_dma, mgmt_dma; void *mgmt_b = NULL; int rval = 0; struct qla_bsg_a84_mgmt *ql84_mgmt; uint32_t sg_cnt; uint32_t data_len = 0; uint32_t dma_direction = DMA_NONE; if (!IS_QLA84XX(ha)) { ql_log(ql_log_warn, vha, 0x703a, "Not 84xx, exiting.\n"); return -EINVAL; } mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); if (!mn) { ql_log(ql_log_warn, vha, 0x703c, "DMA alloc failed for fw buffer.\n"); return -ENOMEM; } mn->entry_type = ACCESS_CHIP_IOCB_TYPE; mn->entry_count = 1; ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request); switch (ql84_mgmt->mgmt.cmd) { case QLA84_MGMT_READ_MEM: case QLA84_MGMT_GET_INFO: sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if (!sg_cnt) { ql_log(ql_log_warn, vha, 0x703d, "dma_map_sg returned %d for reply.\n", sg_cnt); rval = -ENOMEM; goto exit_mgmt; } dma_direction = DMA_FROM_DEVICE; if (sg_cnt != bsg_job->reply_payload.sg_cnt) { ql_log(ql_log_warn, vha, 0x703e, "DMA mapping resulted in different sg counts, " "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", bsg_job->reply_payload.sg_cnt, sg_cnt); rval = -EAGAIN; goto done_unmap_sg; } data_len = bsg_job->reply_payload.payload_len; mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, &mgmt_dma, GFP_KERNEL); if (!mgmt_b) { ql_log(ql_log_warn, vha, 0x703f, "DMA alloc failed for mgmt_b.\n"); rval = -ENOMEM; goto done_unmap_sg; } if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { mn->options = cpu_to_le16(ACO_DUMP_MEMORY); mn->parameter1 = cpu_to_le32( ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { mn->options = cpu_to_le16(ACO_REQUEST_INFO); mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); mn->parameter2 = cpu_to_le32( ql84_mgmt->mgmt.mgmtp.u.info.context); } break; case QLA84_MGMT_WRITE_MEM: sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!sg_cnt) { ql_log(ql_log_warn, vha, 0x7040, "dma_map_sg returned %d.\n", sg_cnt); rval = -ENOMEM; goto exit_mgmt; } dma_direction = DMA_TO_DEVICE; if (sg_cnt != bsg_job->request_payload.sg_cnt) { ql_log(ql_log_warn, vha, 0x7041, "DMA mapping resulted in different sg counts, " "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", bsg_job->request_payload.sg_cnt, sg_cnt); rval = -EAGAIN; goto done_unmap_sg; } data_len = bsg_job->request_payload.payload_len; mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, &mgmt_dma, GFP_KERNEL); if (!mgmt_b) { ql_log(ql_log_warn, vha, 0x7042, "DMA alloc failed for mgmt_b.\n"); rval = -ENOMEM; goto done_unmap_sg; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, mgmt_b, data_len); mn->options = cpu_to_le16(ACO_LOAD_MEMORY); mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); break; case QLA84_MGMT_CHNG_CONFIG: mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); break; default: rval = -EIO; goto exit_mgmt; } if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); mn->dseg_count = cpu_to_le16(1); put_unaligned_le64(mgmt_dma, &mn->dsd.address); mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len); } rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); if (rval) { ql_log(ql_log_warn, vha, 0x7043, "Vendor request 84xx mgmt failed.\n"); rval = (DID_ERROR << 16); } else { ql_dbg(ql_dbg_user, vha, 0x7044, "Vendor request 84xx mgmt completed.\n"); bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->result = DID_OK; if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, mgmt_b, data_len); } } done_unmap_sg: if (mgmt_b) dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); if (dma_direction == DMA_TO_DEVICE) dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); else if (dma_direction == DMA_FROM_DEVICE) dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); exit_mgmt: dma_pool_free(ha->s_dma_pool, mn, mn_dma); if (!rval) bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rval; } static int qla24xx_iidma(struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); int rval = 0; struct qla_port_param *port_param = NULL; fc_port_t *fcport = NULL; int found = 0; uint16_t mb[MAILBOX_REGISTER_COUNT]; uint8_t *rsp_ptr = NULL; if (!IS_IIDMA_CAPABLE(vha->hw)) { ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); return -EINVAL; } port_param = (void *)bsg_request + sizeof(struct fc_bsg_request); if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { ql_log(ql_log_warn, vha, 0x7048, "Invalid destination type.\n"); return -EINVAL; } list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->port_type != FCT_TARGET) continue; if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, fcport->port_name, sizeof(fcport->port_name))) continue; found = 1; break; } if (!found) { ql_log(ql_log_warn, vha, 0x7049, "Failed to find port.\n"); return -EINVAL; } if (atomic_read(&fcport->state) != FCS_ONLINE) { ql_log(ql_log_warn, vha, 0x704a, "Port is not online.\n"); return -EINVAL; } if (fcport->flags & FCF_LOGIN_NEEDED) { ql_log(ql_log_warn, vha, 0x704b, "Remote port not logged in flags = 0x%x.\n", fcport->flags); return -EINVAL; } if (port_param->mode) rval = qla2x00_set_idma_speed(vha, fcport->loop_id, port_param->speed, mb); else rval = qla2x00_get_idma_speed(vha, fcport->loop_id, &port_param->speed, mb); if (rval) { ql_log(ql_log_warn, vha, 0x704c, "iiDMA cmd failed for %8phN -- " "%04x %x %04x %04x.\n", fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]); rval = (DID_ERROR << 16); } else { if (!port_param->mode) { bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(struct qla_port_param); rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct fc_bsg_reply); memcpy(rsp_ptr, port_param, sizeof(struct qla_port_param)); } bsg_reply->result = DID_OK; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } return rval; } static int qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, uint8_t is_update) { struct fc_bsg_request *bsg_request = bsg_job->request; uint32_t start = 0; int valid = 0; struct qla_hw_data *ha = vha->hw; if (unlikely(pci_channel_offline(ha->pdev))) return -EINVAL; start = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; if (start > ha->optrom_size) { ql_log(ql_log_warn, vha, 0x7055, "start %d > optrom_size %d.\n", start, ha->optrom_size); return -EINVAL; } if (ha->optrom_state != QLA_SWAITING) { ql_log(ql_log_info, vha, 0x7056, "optrom_state %d.\n", ha->optrom_state); return -EBUSY; } ha->optrom_region_start = start; ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); if (is_update) { if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) valid = 1; else if (start == (ha->flt_region_boot * 4) || start == (ha->flt_region_fw * 4)) valid = 1; else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) valid = 1; if (!valid) { ql_log(ql_log_warn, vha, 0x7058, "Invalid start region 0x%x/0x%x.\n", start, bsg_job->request_payload.payload_len); return -EINVAL; } ha->optrom_region_size = start + bsg_job->request_payload.payload_len > ha->optrom_size ? ha->optrom_size - start : bsg_job->request_payload.payload_len; ha->optrom_state = QLA_SWRITING; } else { ha->optrom_region_size = start + bsg_job->reply_payload.payload_len > ha->optrom_size ? ha->optrom_size - start : bsg_job->reply_payload.payload_len; ha->optrom_state = QLA_SREADING; } ha->optrom_buffer = vzalloc(ha->optrom_region_size); if (!ha->optrom_buffer) { ql_log(ql_log_warn, vha, 0x7059, "Read: Unable to allocate memory for optrom retrieval " "(%x)\n", ha->optrom_region_size); ha->optrom_state = QLA_SWAITING; return -ENOMEM; } return 0; } static int qla2x00_read_optrom(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; if (ha->flags.nic_core_reset_hdlr_active) return -EBUSY; mutex_lock(&ha->optrom_mutex); rval = qla2x00_optrom_setup(bsg_job, vha, 0); if (rval) { mutex_unlock(&ha->optrom_mutex); return rval; } ha->isp_ops->read_optrom(vha, ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, ha->optrom_region_size); bsg_reply->reply_payload_rcv_len = ha->optrom_region_size; bsg_reply->result = DID_OK; vfree(ha->optrom_buffer); ha->optrom_buffer = NULL; ha->optrom_state = QLA_SWAITING; mutex_unlock(&ha->optrom_mutex); bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rval; } static int qla2x00_update_optrom(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; mutex_lock(&ha->optrom_mutex); rval = qla2x00_optrom_setup(bsg_job, vha, 1); if (rval) { mutex_unlock(&ha->optrom_mutex); return rval; } /* Set the isp82xx_no_md_cap not to capture minidump */ ha->flags.isp82xx_no_md_cap = 1; sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, ha->optrom_buffer, ha->optrom_region_size); rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); if (rval) { bsg_reply->result = -EINVAL; rval = -EINVAL; } else { bsg_reply->result = DID_OK; } vfree(ha->optrom_buffer); ha->optrom_buffer = NULL; ha->optrom_state = QLA_SWAITING; mutex_unlock(&ha->optrom_mutex); bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rval; } static int qla2x00_update_fru_versions(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; uint8_t bsg[DMA_POOL_SIZE]; struct qla_image_version_list *list = (void *)bsg; struct qla_image_version *image; uint32_t count; dma_addr_t sfp_dma; void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); if (!sfp) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_NO_MEMORY; goto done; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); image = list->version; count = list->count; while (count--) { memcpy(sfp, &image->field_info, sizeof(image->field_info)); rval = qla2x00_write_sfp(vha, sfp_dma, sfp, image->field_address.device, image->field_address.offset, sizeof(image->field_info), image->field_address.option); if (rval) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_MAILBOX; goto dealloc; } image++; } bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; dealloc: dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static int qla2x00_read_fru_status(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; uint8_t bsg[DMA_POOL_SIZE]; struct qla_status_reg *sr = (void *)bsg; dma_addr_t sfp_dma; uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); if (!sfp) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_NO_MEMORY; goto done; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, sr->field_address.device, sr->field_address.offset, sizeof(sr->status_reg), sr->field_address.option); sr->status_reg = *sfp; if (rval) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_MAILBOX; goto dealloc; } sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; dealloc: dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->reply_payload_rcv_len = sizeof(*sr); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static int qla2x00_write_fru_status(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; uint8_t bsg[DMA_POOL_SIZE]; struct qla_status_reg *sr = (void *)bsg; dma_addr_t sfp_dma; uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); if (!sfp) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_NO_MEMORY; goto done; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); *sfp = sr->status_reg; rval = qla2x00_write_sfp(vha, sfp_dma, sfp, sr->field_address.device, sr->field_address.offset, sizeof(sr->status_reg), sr->field_address.option); if (rval) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_MAILBOX; goto dealloc; } bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; dealloc: dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static int qla2x00_write_i2c(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; uint8_t bsg[DMA_POOL_SIZE]; struct qla_i2c_access *i2c = (void *)bsg; dma_addr_t sfp_dma; uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); if (!sfp) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_NO_MEMORY; goto done; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); memcpy(sfp, i2c->buffer, i2c->length); rval = qla2x00_write_sfp(vha, sfp_dma, sfp, i2c->device, i2c->offset, i2c->length, i2c->option); if (rval) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_MAILBOX; goto dealloc; } bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; dealloc: dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static int qla2x00_read_i2c(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = 0; uint8_t bsg[DMA_POOL_SIZE]; struct qla_i2c_access *i2c = (void *)bsg; dma_addr_t sfp_dma; uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); if (!sfp) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_NO_MEMORY; goto done; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, i2c->device, i2c->offset, i2c->length, i2c->option); if (rval) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_MAILBOX; goto dealloc; } memcpy(i2c->buffer, sfp, i2c->length); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; dealloc: dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->reply_payload_rcv_len = sizeof(*i2c); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static int qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; uint32_t rval = EXT_STATUS_OK; uint16_t req_sg_cnt = 0; uint16_t rsp_sg_cnt = 0; uint16_t nextlid = 0; uint32_t tot_dsds; srb_t *sp = NULL; uint32_t req_data_len; uint32_t rsp_data_len; /* Check the type of the adapter */ if (!IS_BIDI_CAPABLE(ha)) { ql_log(ql_log_warn, vha, 0x70a0, "This adapter is not supported\n"); rval = EXT_STATUS_NOT_SUPPORTED; goto done; } if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { rval = EXT_STATUS_BUSY; goto done; } /* Check if host is online */ if (!vha->flags.online) { ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n"); rval = EXT_STATUS_DEVICE_OFFLINE; goto done; } /* Check if cable is plugged in or not */ if (vha->device_flags & DFLG_NO_CABLE) { ql_log(ql_log_warn, vha, 0x70a2, "Cable is unplugged...\n"); rval = EXT_STATUS_INVALID_CFG; goto done; } /* Check if the switch is connected or not */ if (ha->current_topology != ISP_CFG_F) { ql_log(ql_log_warn, vha, 0x70a3, "Host is not connected to the switch\n"); rval = EXT_STATUS_INVALID_CFG; goto done; } /* Check if operating mode is P2P */ if (ha->operating_mode != P2P) { ql_log(ql_log_warn, vha, 0x70a4, "Host operating mode is not P2p\n"); rval = EXT_STATUS_INVALID_CFG; goto done; } mutex_lock(&ha->selflogin_lock); if (vha->self_login_loop_id == 0) { /* Initialize all required fields of fcport */ vha->bidir_fcport.vha = vha; vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; vha->bidir_fcport.loop_id = vha->loop_id; if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { ql_log(ql_log_warn, vha, 0x70a7, "Failed to login port %06X for bidirectional IOCB\n", vha->bidir_fcport.d_id.b24); mutex_unlock(&ha->selflogin_lock); rval = EXT_STATUS_MAILBOX; goto done; } vha->self_login_loop_id = nextlid - 1; } /* Assign the self login loop id to fcport */ mutex_unlock(&ha->selflogin_lock); vha->bidir_fcport.loop_id = vha->self_login_loop_id; req_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!req_sg_cnt) { rval = EXT_STATUS_NO_MEMORY; goto done; } rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if (!rsp_sg_cnt) { rval = EXT_STATUS_NO_MEMORY; goto done_unmap_req_sg; } if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { ql_dbg(ql_dbg_user, vha, 0x70a9, "Dma mapping resulted in different sg counts " "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " "%x dma_reply_sg_cnt: %x]\n", bsg_job->request_payload.sg_cnt, req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); rval = EXT_STATUS_NO_MEMORY; goto done_unmap_sg; } req_data_len = bsg_job->request_payload.payload_len; rsp_data_len = bsg_job->reply_payload.payload_len; if (req_data_len != rsp_data_len) { rval = EXT_STATUS_BUSY; ql_log(ql_log_warn, vha, 0x70aa, "req_data_len != rsp_data_len\n"); goto done_unmap_sg; } /* Alloc SRB structure */ sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); if (!sp) { ql_dbg(ql_dbg_user, vha, 0x70ac, "Alloc SRB structure failed\n"); rval = EXT_STATUS_NO_MEMORY; goto done_unmap_sg; } /*Populate srb->ctx with bidir ctx*/ sp->u.bsg_job = bsg_job; sp->free = qla2x00_bsg_sp_free; sp->type = SRB_BIDI_CMD; sp->done = qla2x00_bsg_job_done; /* Add the read and write sg count */ tot_dsds = rsp_sg_cnt + req_sg_cnt; rval = qla2x00_start_bidir(sp, vha, tot_dsds); if (rval != EXT_STATUS_OK) goto done_free_srb; /* the bsg request will be completed in the interrupt handler */ return rval; done_free_srb: mempool_free(sp, ha->srb_mempool); done_unmap_sg: dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); done_unmap_req_sg: dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); done: /* Return an error vendor specific response * and complete the bsg request */ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->reply_payload_rcv_len = 0; bsg_reply->result = (DID_OK) << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); /* Always return success, vendor rsp carries correct status */ return 0; } static int qlafx00_mgmt_cmd(struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int rval = (DID_ERROR << 16); struct qla_mt_iocb_rqst_fx00 *piocb_rqst; srb_t *sp; int req_sg_cnt = 0, rsp_sg_cnt = 0; struct fc_port *fcport; char *type = "FC_BSG_HST_FX_MGMT"; /* Copy the IOCB specific information */ piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; /* Dump the vendor information */ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, piocb_rqst, sizeof(*piocb_rqst)); if (!vha->flags.online) { ql_log(ql_log_warn, vha, 0x70d0, "Host is not online.\n"); rval = -EIO; goto done; } if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { req_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!req_sg_cnt) { ql_log(ql_log_warn, vha, 0x70c7, "dma_map_sg return %d for request\n", req_sg_cnt); rval = -ENOMEM; goto done; } } if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if (!rsp_sg_cnt) { ql_log(ql_log_warn, vha, 0x70c8, "dma_map_sg return %d for reply\n", rsp_sg_cnt); rval = -ENOMEM; goto done_unmap_req_sg; } } ql_dbg(ql_dbg_user, vha, 0x70c9, "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); /* Allocate a dummy fcport structure, since functions preparing the * IOCB and mailbox command retrieves port specific information * from fcport structure. For Host based ELS commands there will be * no fcport structure allocated */ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (!fcport) { ql_log(ql_log_warn, vha, 0x70ca, "Failed to allocate fcport.\n"); rval = -ENOMEM; goto done_unmap_rsp_sg; } /* Alloc SRB structure */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { ql_log(ql_log_warn, vha, 0x70cb, "qla2x00_get_sp failed.\n"); rval = -ENOMEM; goto done_free_fcport; } /* Initialize all required fields of fcport */ fcport->vha = vha; fcport->loop_id = le32_to_cpu(piocb_rqst->dataword); sp->type = SRB_FXIOCB_BCMD; sp->name = "bsg_fx_mgmt"; sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); sp->u.bsg_job = bsg_job; sp->free = qla2x00_bsg_sp_free; sp->done = qla2x00_bsg_job_done; ql_dbg(ql_dbg_user, vha, 0x70cc, "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", type, piocb_rqst->func_type, fcport->loop_id); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x70cd, "qla2x00_start_sp failed=%d.\n", rval); mempool_free(sp, ha->srb_mempool); rval = -EIO; goto done_free_fcport; } return rval; done_free_fcport: qla2x00_free_fcport(fcport); done_unmap_rsp_sg: if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); done_unmap_req_sg: if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); done: return rval; } static int qla26xx_serdes_op(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); int rval = 0; struct qla_serdes_reg sr; memset(&sr, 0, sizeof(sr)); sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); switch (sr.cmd) { case INT_SC_SERDES_WRITE_REG: rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); bsg_reply->reply_payload_rcv_len = 0; break; case INT_SC_SERDES_READ_REG: rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); bsg_reply->reply_payload_rcv_len = sizeof(sr); break; default: ql_dbg(ql_dbg_user, vha, 0x708c, "Unknown serdes cmd %x.\n", sr.cmd); rval = -EINVAL; break; } bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval ? EXT_STATUS_MAILBOX : 0; bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static int qla8044_serdes_op(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); int rval = 0; struct qla_serdes_reg_ex sr; memset(&sr, 0, sizeof(sr)); sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); switch (sr.cmd) { case INT_SC_SERDES_WRITE_REG: rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); bsg_reply->reply_payload_rcv_len = 0; break; case INT_SC_SERDES_READ_REG: rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); bsg_reply->reply_payload_rcv_len = sizeof(sr); break; default: ql_dbg(ql_dbg_user, vha, 0x7020, "Unknown serdes cmd %x.\n", sr.cmd); rval = -EINVAL; break; } bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval ? EXT_STATUS_MAILBOX : 0; bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static int qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; struct qla_flash_update_caps cap; if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha)) return -EPERM; memset(&cap, 0, sizeof(cap)); cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 | (uint64_t)ha->fw_attributes_ext[0] << 32 | (uint64_t)ha->fw_attributes_h << 16 | (uint64_t)ha->fw_attributes; sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); bsg_reply->reply_payload_rcv_len = sizeof(cap); bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static int qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; uint64_t online_fw_attr = 0; struct qla_flash_update_caps cap; if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return -EPERM; memset(&cap, 0, sizeof(cap)); sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &cap, sizeof(cap)); online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 | (uint64_t)ha->fw_attributes_ext[0] << 32 | (uint64_t)ha->fw_attributes_h << 16 | (uint64_t)ha->fw_attributes; if (online_fw_attr != cap.capabilities) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_INVALID_PARAM; return -EINVAL; } if (cap.outage_duration < MAX_LOOP_TIMEOUT) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_INVALID_PARAM; return -EINVAL; } bsg_reply->reply_payload_rcv_len = 0; bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static int qla27xx_get_bbcr_data(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; struct qla_bbcr_data bbcr; uint16_t loop_id, topo, sw_cap; uint8_t domain, area, al_pa, state; int rval; if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return -EPERM; memset(&bbcr, 0, sizeof(bbcr)); if (vha->flags.bbcr_enable) bbcr.status = QLA_BBCR_STATUS_ENABLED; else bbcr.status = QLA_BBCR_STATUS_DISABLED; if (bbcr.status == QLA_BBCR_STATUS_ENABLED) { rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); if (rval != QLA_SUCCESS) { bbcr.status = QLA_BBCR_STATUS_UNKNOWN; bbcr.state = QLA_BBCR_STATE_OFFLINE; bbcr.mbx1 = loop_id; goto done; } state = (vha->bbcr >> 12) & 0x1; if (state) { bbcr.state = QLA_BBCR_STATE_OFFLINE; bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT; } else { bbcr.state = QLA_BBCR_STATE_ONLINE; bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf; } bbcr.configured_bbscn = vha->bbcr & 0xf; } done: sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); bsg_reply->reply_payload_rcv_len = sizeof(bbcr); bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static int qla2x00_get_priv_stats(struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); struct link_statistics *stats = NULL; dma_addr_t stats_dma; int rval; uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd; uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; if (test_bit(UNLOADING, &vha->dpc_flags)) return -ENODEV; if (unlikely(pci_channel_offline(ha->pdev))) return -ENODEV; if (qla2x00_reset_active(vha)) return -EBUSY; if (!IS_FWI2_CAPABLE(ha)) return -EPERM; stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, GFP_KERNEL); if (!stats) { ql_log(ql_log_warn, vha, 0x70e2, "Failed to allocate memory for stats.\n"); return -ENOMEM; } rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); if (rval == QLA_SUCCESS) { ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5, stats, sizeof(*stats)); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); } bsg_reply->reply_payload_rcv_len = sizeof(*stats); bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; bsg_job->reply_len = sizeof(*bsg_reply); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, sizeof(*stats), stats, stats_dma); return 0; } static int qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); int rval; struct qla_dport_diag *dd; if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return -EPERM; dd = kmalloc(sizeof(*dd), GFP_KERNEL); if (!dd) { ql_log(ql_log_warn, vha, 0x70db, "Failed to allocate memory for dport.\n"); return -ENOMEM; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); rval = qla26xx_dport_diagnostics( vha, dd->buf, sizeof(dd->buf), dd->options); if (rval == QLA_SUCCESS) { sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); } bsg_reply->reply_payload_rcv_len = sizeof(*dd); bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; bsg_job->reply_len = sizeof(*bsg_reply); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); kfree(dd); return 0; } static int qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); int rval; struct qla_dport_diag_v2 *dd; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint16_t options; if (!IS_DPORT_CAPABLE(vha->hw)) return -EPERM; dd = kzalloc(sizeof(*dd), GFP_KERNEL); if (!dd) return -ENOMEM; sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); options = dd->options; /* Check dport Test in progress */ if (options == QLA_GET_DPORT_RESULT_V2 && vha->dport_status & DPORT_DIAG_IN_PROGRESS) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_DPORT_DIAG_IN_PROCESS; goto dportcomplete; } /* Check chip reset in progress and start/restart requests arrive */ if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS && (options == QLA_START_DPORT_TEST_V2 || options == QLA_RESTART_DPORT_TEST_V2)) { vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS; } /* Check chip reset in progress and get result request arrive */ if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS && options == QLA_GET_DPORT_RESULT_V2) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_DPORT_DIAG_NOT_RUNNING; goto dportcomplete; } rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp); if (rval == QLA_SUCCESS) { bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; if (options == QLA_START_DPORT_TEST_V2 || options == QLA_RESTART_DPORT_TEST_V2) { dd->mbx1 = mcp->mb[0]; dd->mbx2 = mcp->mb[1]; vha->dport_status |= DPORT_DIAG_IN_PROGRESS; } else if (options == QLA_GET_DPORT_RESULT_V2) { dd->mbx1 = le16_to_cpu(vha->dport_data[1]); dd->mbx2 = le16_to_cpu(vha->dport_data[2]); } } else { dd->mbx1 = mcp->mb[0]; dd->mbx2 = mcp->mb[1]; bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_DPORT_DIAG_ERR; } dportcomplete: sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); bsg_reply->reply_payload_rcv_len = sizeof(*dd); bsg_job->reply_len = sizeof(*bsg_reply); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); kfree(dd); return 0; } static int qla2x00_get_flash_image_status(struct bsg_job *bsg_job) { scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct qla_hw_data *ha = vha->hw; struct qla_active_regions regions = { }; struct active_regions active_regions = { }; qla27xx_get_active_image(vha, &active_regions); regions.global_image = active_regions.global; if (IS_QLA27XX(ha)) regions.nvme_params = QLA27XX_PRIMARY_IMAGE; if (IS_QLA28XX(ha)) { qla28xx_get_aux_images(vha, &active_regions); regions.board_config = active_regions.aux.board_config; regions.vpd_nvram = active_regions.aux.vpd_nvram; regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1; regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3; regions.nvme_params = active_regions.aux.nvme_params; } ql_dbg(ql_dbg_user, vha, 0x70e1, "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n", __func__, vha->host_no, regions.global_image, regions.board_config, regions.vpd_nvram, regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions)); bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; bsg_reply->reply_payload_rcv_len = sizeof(regions); bsg_reply->result = DID_OK << 16; bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static int qla2x00_manage_host_stats(struct bsg_job *bsg_job) { scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct ql_vnd_mng_host_stats_param *req_data; struct ql_vnd_mng_host_stats_resp rsp_data; u32 req_data_len; int ret = 0; if (!vha->flags.online) { ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n"); return -EIO; } req_data_len = bsg_job->request_payload.payload_len; if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) { ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); return -EIO; } req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) { ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); return -ENOMEM; } /* Copy the request buffer in req_data */ sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, req_data, req_data_len); switch (req_data->action) { case QLA_STOP: ret = qla2xxx_stop_stats(vha->host, req_data->stat_type); break; case QLA_START: ret = qla2xxx_start_stats(vha->host, req_data->stat_type); break; case QLA_CLEAR: ret = qla2xxx_reset_stats(vha->host, req_data->stat_type); break; default: ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n"); ret = -EIO; break; } kfree(req_data); /* Prepare response */ rsp_data.status = ret; bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &rsp_data, sizeof(struct ql_vnd_mng_host_stats_resp)); bsg_reply->result = DID_OK; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return ret; } static int qla2x00_get_host_stats(struct bsg_job *bsg_job) { scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct ql_vnd_stats_param *req_data; struct ql_vnd_host_stats_resp rsp_data; u32 req_data_len; int ret = 0; u64 ini_entry_count = 0; u64 entry_count = 0; u64 tgt_num = 0; u64 tmp_stat_type = 0; u64 response_len = 0; void *data; req_data_len = bsg_job->request_payload.payload_len; if (req_data_len != sizeof(struct ql_vnd_stats_param)) { ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); return -EIO; } req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) { ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); return -ENOMEM; } /* Copy the request buffer in req_data */ sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, req_data, req_data_len); /* Copy stat type to work on it */ tmp_stat_type = req_data->stat_type; if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) { /* Num of tgts connected to this host */ tgt_num = qla2x00_get_num_tgts(vha); /* unset BIT_17 */ tmp_stat_type &= ~(1 << 17); } /* Total ini stats */ ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); /* Total number of entries */ entry_count = ini_entry_count + tgt_num; response_len = sizeof(struct ql_vnd_host_stats_resp) + (sizeof(struct ql_vnd_stat_entry) * entry_count); if (response_len > bsg_job->reply_payload.payload_len) { rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL; bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL; bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &rsp_data, sizeof(struct ql_vnd_mng_host_stats_resp)); bsg_reply->result = DID_OK; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); goto host_stat_out; } data = kzalloc(response_len, GFP_KERNEL); if (!data) { ret = -ENOMEM; goto host_stat_out; } ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type, data, response_len); rsp_data.status = EXT_STATUS_OK; bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, data, response_len); bsg_reply->result = DID_OK; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); kfree(data); host_stat_out: kfree(req_data); return ret; } static struct fc_rport * qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num) { fc_port_t *fcport = NULL; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->rport->number == tgt_num) return fcport->rport; } return NULL; } static int qla2x00_get_tgt_stats(struct bsg_job *bsg_job) { scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct ql_vnd_tgt_stats_param *req_data; u32 req_data_len; int ret = 0; u64 response_len = 0; struct ql_vnd_tgt_stats_resp *data = NULL; struct fc_rport *rport = NULL; if (!vha->flags.online) { ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n"); return -EIO; } req_data_len = bsg_job->request_payload.payload_len; if (req_data_len != sizeof(struct ql_vnd_stat_entry)) { ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); return -EIO; } req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) { ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); return -ENOMEM; } /* Copy the request buffer in req_data */ sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, req_data, req_data_len); response_len = sizeof(struct ql_vnd_tgt_stats_resp) + sizeof(struct ql_vnd_stat_entry); /* structure + size for one entry */ data = kzalloc(response_len, GFP_KERNEL); if (!data) { kfree(req_data); return -ENOMEM; } if (response_len > bsg_job->reply_payload.payload_len) { data->status = EXT_STATUS_BUFFER_TOO_SMALL; bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL; bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, data, sizeof(struct ql_vnd_tgt_stats_resp)); bsg_reply->result = DID_OK; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); goto tgt_stat_out; } rport = qla2xxx_find_rport(vha, req_data->tgt_id); if (!rport) { ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id); ret = EXT_STATUS_INVALID_PARAM; data->status = EXT_STATUS_INVALID_PARAM; goto reply; } ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type, rport, (void *)data, response_len); bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; reply: bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, data, response_len); bsg_reply->result = DID_OK; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); tgt_stat_out: kfree(data); kfree(req_data); return ret; } static int qla2x00_manage_host_port(struct bsg_job *bsg_job) { scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct ql_vnd_mng_host_port_param *req_data; struct ql_vnd_mng_host_port_resp rsp_data; u32 req_data_len; int ret = 0; req_data_len = bsg_job->request_payload.payload_len; if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) { ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); return -EIO; } req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) { ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); return -ENOMEM; } /* Copy the request buffer in req_data */ sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, req_data, req_data_len); switch (req_data->action) { case QLA_ENABLE: ret = qla2xxx_enable_port(vha->host); break; case QLA_DISABLE: ret = qla2xxx_disable_port(vha->host); break; default: ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n"); ret = -EIO; break; } kfree(req_data); /* Prepare response */ rsp_data.status = ret; bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp); bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &rsp_data, sizeof(struct ql_vnd_mng_host_port_resp)); bsg_reply->result = DID_OK; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return ret; } static int qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n", __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]); switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) { case QL_VND_LOOPBACK: return qla2x00_process_loopback(bsg_job); case QL_VND_A84_RESET: return qla84xx_reset(bsg_job); case QL_VND_A84_UPDATE_FW: return qla84xx_updatefw(bsg_job); case QL_VND_A84_MGMT_CMD: return qla84xx_mgmt_cmd(bsg_job); case QL_VND_IIDMA: return qla24xx_iidma(bsg_job); case QL_VND_FCP_PRIO_CFG_CMD: return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); case QL_VND_READ_FLASH: return qla2x00_read_optrom(bsg_job); case QL_VND_UPDATE_FLASH: return qla2x00_update_optrom(bsg_job); case QL_VND_SET_FRU_VERSION: return qla2x00_update_fru_versions(bsg_job); case QL_VND_READ_FRU_STATUS: return qla2x00_read_fru_status(bsg_job); case QL_VND_WRITE_FRU_STATUS: return qla2x00_write_fru_status(bsg_job); case QL_VND_WRITE_I2C: return qla2x00_write_i2c(bsg_job); case QL_VND_READ_I2C: return qla2x00_read_i2c(bsg_job); case QL_VND_DIAG_IO_CMD: return qla24xx_process_bidir_cmd(bsg_job); case QL_VND_FX00_MGMT_CMD: return qlafx00_mgmt_cmd(bsg_job); case QL_VND_SERDES_OP: return qla26xx_serdes_op(bsg_job); case QL_VND_SERDES_OP_EX: return qla8044_serdes_op(bsg_job); case QL_VND_GET_FLASH_UPDATE_CAPS: return qla27xx_get_flash_upd_cap(bsg_job); case QL_VND_SET_FLASH_UPDATE_CAPS: return qla27xx_set_flash_upd_cap(bsg_job); case QL_VND_GET_BBCR_DATA: return qla27xx_get_bbcr_data(bsg_job); case QL_VND_GET_PRIV_STATS: case QL_VND_GET_PRIV_STATS_EX: return qla2x00_get_priv_stats(bsg_job); case QL_VND_DPORT_DIAGNOSTICS: return qla2x00_do_dport_diagnostics(bsg_job); case QL_VND_DPORT_DIAGNOSTICS_V2: return qla2x00_do_dport_diagnostics_v2(bsg_job); case QL_VND_EDIF_MGMT: return qla_edif_app_mgmt(bsg_job); case QL_VND_SS_GET_FLASH_IMAGE_STATUS: return qla2x00_get_flash_image_status(bsg_job); case QL_VND_MANAGE_HOST_STATS: return qla2x00_manage_host_stats(bsg_job); case QL_VND_GET_HOST_STATS: return qla2x00_get_host_stats(bsg_job); case QL_VND_GET_TGT_STATS: return qla2x00_get_tgt_stats(bsg_job); case QL_VND_MANAGE_HOST_PORT: return qla2x00_manage_host_port(bsg_job); case QL_VND_MBX_PASSTHRU: return qla2x00_mailbox_passthru(bsg_job); default: return -ENOSYS; } } int qla24xx_bsg_request(struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; int ret = -EINVAL; struct fc_rport *rport; struct Scsi_Host *host; scsi_qla_host_t *vha; /* In case no data transferred. */ bsg_reply->reply_payload_rcv_len = 0; if (bsg_request->msgcode == FC_BSG_RPT_ELS) { rport = fc_bsg_to_rport(bsg_job); if (!rport) return ret; host = rport_to_shost(rport); vha = shost_priv(host); } else { host = fc_bsg_to_shost(bsg_job); vha = shost_priv(host); } /* Disable port will bring down the chip, allow enable command */ if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT || bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS) goto skip_chip_chk; if (vha->hw->flags.port_isolated) { bsg_reply->result = DID_ERROR; /* operation not permitted */ return -EPERM; } if (qla2x00_chip_is_down(vha)) { ql_dbg(ql_dbg_user, vha, 0x709f, "BSG: ISP abort active/needed -- cmd=%d.\n", bsg_request->msgcode); SET_DID_STATUS(bsg_reply->result, DID_ERROR); return -EBUSY; } if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { SET_DID_STATUS(bsg_reply->result, DID_ERROR); return -EIO; } skip_chip_chk: ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, "Entered %s msgcode=0x%x. bsg ptr %px\n", __func__, bsg_request->msgcode, bsg_job); switch (bsg_request->msgcode) { case FC_BSG_RPT_ELS: case FC_BSG_HST_ELS_NOLOGIN: ret = qla2x00_process_els(bsg_job); break; case FC_BSG_HST_CT: ret = qla2x00_process_ct(bsg_job); break; case FC_BSG_HST_VENDOR: ret = qla2x00_process_vendor_specific(vha, bsg_job); break; case FC_BSG_HST_ADD_RPORT: case FC_BSG_HST_DEL_RPORT: case FC_BSG_RPT_CT: default: ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); break; } ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, "%s done with return %x\n", __func__, ret); return ret; } int qla24xx_bsg_timeout(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); struct qla_hw_data *ha = vha->hw; srb_t *sp; int cnt, que; unsigned long flags; struct req_que *req; ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n", __func__, bsg_job); if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x9007, "PCI/Register disconnect.\n"); qla_pci_set_eeh_busy(vha); } /* find the bsg job from the active list of commands */ spin_lock_irqsave(&ha->hardware_lock, flags); for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req) continue; for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp && (sp->type == SRB_CT_CMD || sp->type == SRB_ELS_CMD_HST || sp->type == SRB_ELS_CMD_HST_NOLOGIN || sp->type == SRB_FXIOCB_BCMD) && sp->u.bsg_job == bsg_job) { req->outstanding_cmds[cnt] = NULL; spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) { ql_log(ql_log_warn, vha, 0x7089, "mbx abort_command failed.\n"); bsg_reply->result = -EIO; } else { ql_dbg(ql_dbg_user, vha, 0x708a, "mbx abort_command success.\n"); bsg_reply->result = 0; } spin_lock_irqsave(&ha->hardware_lock, flags); goto done; } } } spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); bsg_reply->result = -ENXIO; return 0; done: spin_unlock_irqrestore(&ha->hardware_lock, flags); /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); return 0; } int qla2x00_mailbox_passthru(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); int ret = -EINVAL; int ptsize = sizeof(struct qla_mbx_passthru); struct qla_mbx_passthru *req_data = NULL; uint32_t req_data_len; req_data_len = bsg_job->request_payload.payload_len; if (req_data_len != ptsize) { ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n"); return -EIO; } req_data = kzalloc(ptsize, GFP_KERNEL); if (!req_data) { ql_log(ql_log_warn, vha, 0xf0a4, "req_data memory allocation failure.\n"); return -ENOMEM; } /* Copy the request buffer in req_data */ sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, req_data, ptsize); ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out); /* Copy the req_data in request buffer */ sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, req_data, ptsize); bsg_reply->reply_payload_rcv_len = ptsize; if (ret == QLA_SUCCESS) bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; else bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR; bsg_job->reply_len = sizeof(*bsg_job->reply); bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); kfree(req_data); return ret; }
linux-master
drivers/scsi/qla2xxx/qla_bsg.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include <linux/delay.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/pci.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> #include <scsi/scsi_tcq.h> #define MASK(n) ((1ULL<<(n))-1) #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \ ((addr >> 25) & 0x3ff)) #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \ ((addr >> 25) & 0x3ff)) #define MS_WIN(addr) (addr & 0x0ffc0000) #define QLA82XX_PCI_MN_2M (0) #define QLA82XX_PCI_MS_2M (0x80000) #define QLA82XX_PCI_OCM0_2M (0xc0000) #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800) #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) #define BLOCK_PROTECT_BITS 0x0F /* CRB window related */ #define CRB_BLK(off) ((off >> 20) & 0x3f) #define CRB_SUBBLK(off) ((off >> 16) & 0xf) #define CRB_WINDOW_2M (0x130060) #define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL) #define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \ ((off) & 0xf0000)) #define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL) #define CRB_INDIRECT_2M (0x1e0000UL) #define MAX_CRB_XFORM 60 static unsigned long crb_addr_xform[MAX_CRB_XFORM]; static int qla82xx_crb_table_initialized; #define qla82xx_crb_addr_transform(name) \ (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC }; static void qla82xx_crb_addr_transform_setup(void) { qla82xx_crb_addr_transform(XDMA); qla82xx_crb_addr_transform(TIMR); qla82xx_crb_addr_transform(SRE); qla82xx_crb_addr_transform(SQN3); qla82xx_crb_addr_transform(SQN2); qla82xx_crb_addr_transform(SQN1); qla82xx_crb_addr_transform(SQN0); qla82xx_crb_addr_transform(SQS3); qla82xx_crb_addr_transform(SQS2); qla82xx_crb_addr_transform(SQS1); qla82xx_crb_addr_transform(SQS0); qla82xx_crb_addr_transform(RPMX7); qla82xx_crb_addr_transform(RPMX6); qla82xx_crb_addr_transform(RPMX5); qla82xx_crb_addr_transform(RPMX4); qla82xx_crb_addr_transform(RPMX3); qla82xx_crb_addr_transform(RPMX2); qla82xx_crb_addr_transform(RPMX1); qla82xx_crb_addr_transform(RPMX0); qla82xx_crb_addr_transform(ROMUSB); qla82xx_crb_addr_transform(SN); qla82xx_crb_addr_transform(QMN); qla82xx_crb_addr_transform(QMS); qla82xx_crb_addr_transform(PGNI); qla82xx_crb_addr_transform(PGND); qla82xx_crb_addr_transform(PGN3); qla82xx_crb_addr_transform(PGN2); qla82xx_crb_addr_transform(PGN1); qla82xx_crb_addr_transform(PGN0); qla82xx_crb_addr_transform(PGSI); qla82xx_crb_addr_transform(PGSD); qla82xx_crb_addr_transform(PGS3); qla82xx_crb_addr_transform(PGS2); qla82xx_crb_addr_transform(PGS1); qla82xx_crb_addr_transform(PGS0); qla82xx_crb_addr_transform(PS); qla82xx_crb_addr_transform(PH); qla82xx_crb_addr_transform(NIU); qla82xx_crb_addr_transform(I2Q); qla82xx_crb_addr_transform(EG); qla82xx_crb_addr_transform(MN); qla82xx_crb_addr_transform(MS); qla82xx_crb_addr_transform(CAS2); qla82xx_crb_addr_transform(CAS1); qla82xx_crb_addr_transform(CAS0); qla82xx_crb_addr_transform(CAM); qla82xx_crb_addr_transform(C2C1); qla82xx_crb_addr_transform(C2C0); qla82xx_crb_addr_transform(SMB); qla82xx_crb_addr_transform(OCM0); /* * Used only in P3 just define it for P2 also. */ qla82xx_crb_addr_transform(I2C0); qla82xx_crb_table_initialized = 1; } static struct crb_128M_2M_block_map crb_128M_2M_map[64] = { {{{0, 0, 0, 0} } }, {{{1, 0x0100000, 0x0102000, 0x120000}, {1, 0x0110000, 0x0120000, 0x130000}, {1, 0x0120000, 0x0122000, 0x124000}, {1, 0x0130000, 0x0132000, 0x126000}, {1, 0x0140000, 0x0142000, 0x128000}, {1, 0x0150000, 0x0152000, 0x12a000}, {1, 0x0160000, 0x0170000, 0x110000}, {1, 0x0170000, 0x0172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x01e0000, 0x01e0800, 0x122000}, {0, 0x0000000, 0x0000000, 0x000000} } } , {{{1, 0x0200000, 0x0210000, 0x180000} } }, {{{0, 0, 0, 0} } }, {{{1, 0x0400000, 0x0401000, 0x169000} } }, {{{1, 0x0500000, 0x0510000, 0x140000} } }, {{{1, 0x0600000, 0x0610000, 0x1c0000} } }, {{{1, 0x0700000, 0x0704000, 0x1b8000} } }, {{{1, 0x0800000, 0x0802000, 0x170000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x08f0000, 0x08f2000, 0x172000} } }, {{{1, 0x0900000, 0x0902000, 0x174000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x09f0000, 0x09f2000, 0x176000} } }, {{{0, 0x0a00000, 0x0a02000, 0x178000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0af0000, 0x0af2000, 0x17a000} } }, {{{0, 0x0b00000, 0x0b02000, 0x17c000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } }, {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } }, {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } }, {{{1, 0x0f00000, 0x0f01000, 0x164000} } }, {{{0, 0x1000000, 0x1004000, 0x1a8000} } }, {{{1, 0x1100000, 0x1101000, 0x160000} } }, {{{1, 0x1200000, 0x1201000, 0x161000} } }, {{{1, 0x1300000, 0x1301000, 0x162000} } }, {{{1, 0x1400000, 0x1401000, 0x163000} } }, {{{1, 0x1500000, 0x1501000, 0x165000} } }, {{{1, 0x1600000, 0x1601000, 0x166000} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{1, 0x1d00000, 0x1d10000, 0x190000} } }, {{{1, 0x1e00000, 0x1e01000, 0x16a000} } }, {{{1, 0x1f00000, 0x1f10000, 0x150000} } }, {{{0} } }, {{{1, 0x2100000, 0x2102000, 0x120000}, {1, 0x2110000, 0x2120000, 0x130000}, {1, 0x2120000, 0x2122000, 0x124000}, {1, 0x2130000, 0x2132000, 0x126000}, {1, 0x2140000, 0x2142000, 0x128000}, {1, 0x2150000, 0x2152000, 0x12a000}, {1, 0x2160000, 0x2170000, 0x110000}, {1, 0x2170000, 0x2172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x2200000, 0x2204000, 0x1b0000} } }, {{{0} } }, {{{0} } }, {{{0} } }, {{{0} } }, {{{0} } }, {{{1, 0x2800000, 0x2804000, 0x1a4000} } }, {{{1, 0x2900000, 0x2901000, 0x16b000} } }, {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } }, {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } }, {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } }, {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } }, {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } }, {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } }, {{{1, 0x3000000, 0x3000400, 0x1adc00} } }, {{{0, 0x3100000, 0x3104000, 0x1a8000} } }, {{{1, 0x3200000, 0x3204000, 0x1d4000} } }, {{{1, 0x3300000, 0x3304000, 0x1a0000} } }, {{{0} } }, {{{1, 0x3500000, 0x3500400, 0x1ac000} } }, {{{1, 0x3600000, 0x3600400, 0x1ae000} } }, {{{1, 0x3700000, 0x3700400, 0x1ae400} } }, {{{1, 0x3800000, 0x3804000, 0x1d0000} } }, {{{1, 0x3900000, 0x3904000, 0x1b4000} } }, {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } }, {{{0} } }, {{{0} } }, {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } }, {{{1, 0x3e00000, 0x3e01000, 0x167000} } }, {{{1, 0x3f00000, 0x3f01000, 0x168000} } } }; /* * top 12 bits of crb internal address (hub, agent) */ static unsigned qla82xx_crb_hub_agt[64] = { 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PS, QLA82XX_HW_CRB_HUB_AGT_ADR_MN, QLA82XX_HW_CRB_HUB_AGT_ADR_MS, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_SRE, QLA82XX_HW_CRB_HUB_AGT_ADR_NIU, QLA82XX_HW_CRB_HUB_AGT_ADR_QMN, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3, QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4, QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3, QLA82XX_HW_CRB_HUB_AGT_ADR_PGND, QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI, QLA82XX_HW_CRB_HUB_AGT_ADR_SN, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_EG, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PS, QLA82XX_HW_CRB_HUB_AGT_ADR_CAM, 0, 0, 0, 0, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7, QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9, QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_SMB, QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0, QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC, 0, }; /* Device states */ static const char *const q_dev_state[] = { [QLA8XXX_DEV_UNKNOWN] = "Unknown", [QLA8XXX_DEV_COLD] = "Cold/Re-init", [QLA8XXX_DEV_INITIALIZING] = "Initializing", [QLA8XXX_DEV_READY] = "Ready", [QLA8XXX_DEV_NEED_RESET] = "Need Reset", [QLA8XXX_DEV_NEED_QUIESCENT] = "Need Quiescent", [QLA8XXX_DEV_FAILED] = "Failed", [QLA8XXX_DEV_QUIESCENT] = "Quiescent", }; const char *qdev_state(uint32_t dev_state) { return (dev_state < MAX_STATES) ? q_dev_state[dev_state] : "Unknown"; } /* * In: 'off_in' is offset from CRB space in 128M pci map * Out: 'off_out' is 2M pci map addr * side effect: lock crb window */ static void qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in, void __iomem **off_out) { u32 win_read; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ha->crb_win = CRB_HI(off_in); writel(ha->crb_win, CRB_WINDOW_2M + ha->nx_pcibase); /* Read back value to make sure write has gone through before trying * to use it. */ win_read = rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase); if (win_read != ha->crb_win) { ql_dbg(ql_dbg_p3p, vha, 0xb000, "%s: Written crbwin (0x%x) " "!= Read crbwin (0x%x), off=0x%lx.\n", __func__, ha->crb_win, win_read, off_in); } *off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; } static int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in, void __iomem **off_out) { struct crb_128M_2M_sub_block_map *m; if (off_in >= QLA82XX_CRB_MAX) return -1; if (off_in >= QLA82XX_PCI_CAMQM && off_in < QLA82XX_PCI_CAMQM_2M_END) { *off_out = (off_in - QLA82XX_PCI_CAMQM) + QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; return 0; } if (off_in < QLA82XX_PCI_CRBSPACE) return -1; off_in -= QLA82XX_PCI_CRBSPACE; /* Try direct map */ m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)]; if (m->valid && (m->start_128M <= off_in) && (m->end_128M > off_in)) { *off_out = off_in + m->start_2M - m->start_128M + ha->nx_pcibase; return 0; } /* Not in direct map, use crb window */ *off_out = (void __iomem *)off_in; return 1; } #define CRB_WIN_LOCK_TIMEOUT 100000000 static int qla82xx_crb_win_lock(struct qla_hw_data *ha) { int done = 0, timeout = 0; while (!done) { /* acquire semaphore3 from PCI HW block */ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); if (done == 1) break; if (timeout >= CRB_WIN_LOCK_TIMEOUT) return -1; timeout++; } qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum); return 0; } int qla82xx_wr_32(struct qla_hw_data *ha, ulong off_in, u32 data) { void __iomem *off; unsigned long flags = 0; int rv; rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off); BUG_ON(rv == -1); if (rv == 1) { #ifndef __CHECKER__ write_lock_irqsave(&ha->hw_lock, flags); #endif qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, off_in, &off); } writel(data, (void __iomem *)off); if (rv == 1) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); #ifndef __CHECKER__ write_unlock_irqrestore(&ha->hw_lock, flags); #endif } return 0; } int qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in) { void __iomem *off; unsigned long flags = 0; int rv; u32 data; rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off); BUG_ON(rv == -1); if (rv == 1) { #ifndef __CHECKER__ write_lock_irqsave(&ha->hw_lock, flags); #endif qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, off_in, &off); } data = rd_reg_dword(off); if (rv == 1) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); #ifndef __CHECKER__ write_unlock_irqrestore(&ha->hw_lock, flags); #endif } return data; } /* * Context: task, might sleep */ int qla82xx_idc_lock(struct qla_hw_data *ha) { const int delay_ms = 100, timeout_ms = 2000; int done, total = 0; might_sleep(); while (true) { /* acquire semaphore5 from PCI HW block */ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK)); if (done == 1) break; if (WARN_ON_ONCE(total >= timeout_ms)) return -1; total += delay_ms; msleep(delay_ms); } return 0; } void qla82xx_idc_unlock(struct qla_hw_data *ha) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); } /* * check memory access boundary. * used by test agent. support ddr access only for now */ static unsigned long qla82xx_pci_mem_bound_check(struct qla_hw_data *ha, unsigned long long addr, int size) { if (!addr_in_range(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) || !addr_in_range(addr + size - 1, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) || ((size != 1) && (size != 2) && (size != 4) && (size != 8))) return 0; else return 1; } static int qla82xx_pci_set_window_warning_count; static unsigned long qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) { int window; u32 win_read; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX)) { /* DDR network side */ window = MN_WIN(addr); ha->ddr_mn_window = window; qla82xx_wr_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla82xx_rd_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); if ((win_read << 17) != window) { ql_dbg(ql_dbg_p3p, vha, 0xb003, "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n", __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; } else if (addr_in_range(addr, QLA82XX_ADDR_OCM0, QLA82XX_ADDR_OCM0_MAX)) { unsigned int temp1; if ((addr & 0x00ff800) == 0xff800) { ql_log(ql_log_warn, vha, 0xb004, "%s: QM access not handled.\n", __func__); addr = -1UL; } window = OCM_WIN(addr); ha->ddr_mn_window = window; qla82xx_wr_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla82xx_rd_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); temp1 = ((window & 0x1FF) << 7) | ((window & 0x0FFFE0000) >> 17); if (win_read != temp1) { ql_log(ql_log_warn, vha, 0xb005, "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n", __func__, temp1, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; } else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, QLA82XX_P3_ADDR_QDR_NET_MAX)) { /* QDR network side */ window = MS_WIN(addr); ha->qdr_sn_window = window; qla82xx_wr_32(ha, ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla82xx_rd_32(ha, ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); if (win_read != window) { ql_log(ql_log_warn, vha, 0xb006, "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n", __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; } else { /* * peg gdb frequently accesses memory that doesn't exist, * this limits the chit chat so debugging isn't slowed down. */ if ((qla82xx_pci_set_window_warning_count++ < 8) || (qla82xx_pci_set_window_warning_count%64 == 0)) { ql_log(ql_log_warn, vha, 0xb007, "%s: Warning:%s Unknown address range!.\n", __func__, QLA2XXX_DRIVER_NAME); } addr = -1UL; } return addr; } /* check if address is in the same windows as the previous access */ static int qla82xx_pci_is_same_window(struct qla_hw_data *ha, unsigned long long addr) { int window; unsigned long long qdr_max; qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; /* DDR network side */ if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX)) BUG(); else if (addr_in_range(addr, QLA82XX_ADDR_OCM0, QLA82XX_ADDR_OCM0_MAX)) return 1; else if (addr_in_range(addr, QLA82XX_ADDR_OCM1, QLA82XX_ADDR_OCM1_MAX)) return 1; else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) { /* QDR network side */ window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f; if (ha->qdr_sn_window == window) return 1; } return 0; } static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha, u64 off, void *data, int size) { unsigned long flags; void __iomem *addr = NULL; int ret = 0; u64 start; uint8_t __iomem *mem_ptr = NULL; unsigned long mem_base; unsigned long mem_page; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); write_lock_irqsave(&ha->hw_lock, flags); /* * If attempting to access unknown address or straddle hw windows, * do not access. */ start = qla82xx_pci_set_window(ha, off); if ((start == -1UL) || (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); ql_log(ql_log_fatal, vha, 0xb008, "%s out of bound pci memory " "access, offset is 0x%llx.\n", QLA2XXX_DRIVER_NAME, off); return -1; } write_unlock_irqrestore(&ha->hw_lock, flags); mem_base = pci_resource_start(ha->pdev, 0); mem_page = start & PAGE_MASK; /* Map two pages whenever user tries to access addresses in two * consecutive pages. */ if (mem_page != ((start + size - 1) & PAGE_MASK)) mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); else mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); if (mem_ptr == NULL) { *(u8 *)data = 0; return -1; } addr = mem_ptr; addr += start & (PAGE_SIZE - 1); write_lock_irqsave(&ha->hw_lock, flags); switch (size) { case 1: *(u8 *)data = readb(addr); break; case 2: *(u16 *)data = readw(addr); break; case 4: *(u32 *)data = readl(addr); break; case 8: *(u64 *)data = readq(addr); break; default: ret = -1; break; } write_unlock_irqrestore(&ha->hw_lock, flags); if (mem_ptr) iounmap(mem_ptr); return ret; } static int qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, u64 off, void *data, int size) { unsigned long flags; void __iomem *addr = NULL; int ret = 0; u64 start; uint8_t __iomem *mem_ptr = NULL; unsigned long mem_base; unsigned long mem_page; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); write_lock_irqsave(&ha->hw_lock, flags); /* * If attempting to access unknown address or straddle hw windows, * do not access. */ start = qla82xx_pci_set_window(ha, off); if ((start == -1UL) || (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); ql_log(ql_log_fatal, vha, 0xb009, "%s out of bound memory " "access, offset is 0x%llx.\n", QLA2XXX_DRIVER_NAME, off); return -1; } write_unlock_irqrestore(&ha->hw_lock, flags); mem_base = pci_resource_start(ha->pdev, 0); mem_page = start & PAGE_MASK; /* Map two pages whenever user tries to access addresses in two * consecutive pages. */ if (mem_page != ((start + size - 1) & PAGE_MASK)) mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); else mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); if (mem_ptr == NULL) return -1; addr = mem_ptr; addr += start & (PAGE_SIZE - 1); write_lock_irqsave(&ha->hw_lock, flags); switch (size) { case 1: writeb(*(u8 *)data, addr); break; case 2: writew(*(u16 *)data, addr); break; case 4: writel(*(u32 *)data, addr); break; case 8: writeq(*(u64 *)data, addr); break; default: ret = -1; break; } write_unlock_irqrestore(&ha->hw_lock, flags); if (mem_ptr) iounmap(mem_ptr); return ret; } #define MTU_FUDGE_FACTOR 100 static unsigned long qla82xx_decode_crb_addr(unsigned long addr) { int i; unsigned long base_addr, offset, pci_base; if (!qla82xx_crb_table_initialized) qla82xx_crb_addr_transform_setup(); pci_base = ADDR_ERROR; base_addr = addr & 0xfff00000; offset = addr & 0x000fffff; for (i = 0; i < MAX_CRB_XFORM; i++) { if (crb_addr_xform[i] == base_addr) { pci_base = i << 20; break; } } if (pci_base == ADDR_ERROR) return pci_base; return pci_base + offset; } static long rom_max_timeout = 100; static long qla82xx_rom_lock_timeout = 100; static int qla82xx_rom_lock(struct qla_hw_data *ha) { int done = 0, timeout = 0; uint32_t lock_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (!done) { /* acquire semaphore2 from PCI HW block */ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); if (done == 1) break; if (timeout >= qla82xx_rom_lock_timeout) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); ql_dbg(ql_dbg_p3p, vha, 0xb157, "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", __func__, ha->portnum, lock_owner); return -1; } timeout++; } qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum); return 0; } static void qla82xx_rom_unlock(struct qla_hw_data *ha) { qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff); qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); } static int qla82xx_wait_rom_busy(struct qla_hw_data *ha) { long timeout = 0; long done = 0 ; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (done == 0) { done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); done &= 4; timeout++; if (timeout >= rom_max_timeout) { ql_dbg(ql_dbg_p3p, vha, 0xb00a, "%s: Timeout reached waiting for rom busy.\n", QLA2XXX_DRIVER_NAME); return -1; } } return 0; } static int qla82xx_wait_rom_done(struct qla_hw_data *ha) { long timeout = 0; long done = 0 ; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (done == 0) { done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); done &= 2; timeout++; if (timeout >= rom_max_timeout) { ql_dbg(ql_dbg_p3p, vha, 0xb00b, "%s: Timeout reached waiting for rom done.\n", QLA2XXX_DRIVER_NAME); return -1; } } return 0; } static int qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag) { uint32_t off_value, rval = 0; wrt_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000); /* Read back value to make sure write has gone through */ rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase); off_value = (off & 0x0000FFFF); if (flag) wrt_reg_dword(off_value + CRB_INDIRECT_2M + ha->nx_pcibase, data); else rval = rd_reg_dword(off_value + CRB_INDIRECT_2M + ha->nx_pcibase); return rval; } static int qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { /* Dword reads to flash. */ qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1); *valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE + (addr & 0x0000FFFF), 0, 0); return 0; } static int qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { int ret, loops = 0; uint32_t lock_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { udelay(100); schedule(); loops++; } if (loops >= 50000) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); ql_log(ql_log_fatal, vha, 0x00b9, "Failed to acquire SEM2 lock, Lock Owner %u.\n", lock_owner); return -1; } ret = qla82xx_do_rom_fast_read(ha, addr, valp); qla82xx_rom_unlock(ha); return ret; } static int qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb00c, "Error waiting for rom done.\n"); return -1; } *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); return 0; } static int qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) { uint32_t val = 0; int i, ret; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); for (i = 0; i < 50000; i++) { ret = qla82xx_read_status_reg(ha, &val); if (ret < 0 || (val & 1) == 0) return ret; udelay(10); cond_resched(); } ql_log(ql_log_warn, vha, 0xb00d, "Timeout reached waiting for write finish.\n"); return -1; } static int qla82xx_flash_set_write_enable(struct qla_hw_data *ha) { uint32_t val; qla82xx_wait_rom_busy(ha); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) return -1; if (qla82xx_read_status_reg(ha, &val) != 0) return -1; if ((val & 2) != 2) return -1; return 0; } static int qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (qla82xx_flash_set_write_enable(ha)) return -1; qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb00e, "Error waiting for rom done.\n"); return -1; } return qla82xx_flash_wait_write_finish(ha); } static int qla82xx_write_disable_flash(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb00f, "Error waiting for rom done.\n"); return -1; } return 0; } static int ql82xx_rom_lock_d(struct qla_hw_data *ha) { int loops = 0; uint32_t lock_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { udelay(100); cond_resched(); loops++; } if (loops >= 50000) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); ql_log(ql_log_warn, vha, 0xb010, "ROM lock failed, Lock Owner %u.\n", lock_owner); return -1; } return 0; } static int qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, uint32_t data) { int ret = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb011, "ROM lock failed.\n"); return ret; } ret = qla82xx_flash_set_write_enable(ha); if (ret < 0) goto done_write; qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb012, "Error waiting for rom done.\n"); ret = -1; goto done_write; } ret = qla82xx_flash_wait_write_finish(ha); done_write: qla82xx_rom_unlock(ha); return ret; } /* This routine does CRB initialize sequence * to put the ISP into operational state */ static int qla82xx_pinit_from_rom(scsi_qla_host_t *vha) { int addr, val; int i ; struct crb_addr_pair *buf; unsigned long off; unsigned offset, n; struct qla_hw_data *ha = vha->hw; struct crb_addr_pair { long addr; long data; }; /* Halt all the individual PEGs and other blocks of the ISP */ qla82xx_rom_lock(ha); /* disable all I2Q */ qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0); /* disable all niu interrupts */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); /* disable xge rx/tx */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); /* disable xg1 rx/tx */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); /* disable sideband mac */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00); /* disable ap0 mac */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00); /* disable ap1 mac */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00); /* halt sre */ val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); /* halt epg */ qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); /* halt timers */ qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0); /* halt pegs */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); msleep(20); /* big hammer */ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) /* don't reset CAM block on reset */ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); else qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); qla82xx_rom_unlock(ha); /* Read the signature value from the flash. * Offset 0: Contain signature (0xcafecafe) * Offset 4: Offset and number of addr/value pairs * that present in CRB initialize sequence */ n = 0; if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || qla82xx_rom_fast_read(ha, 4, &n) != 0) { ql_log(ql_log_fatal, vha, 0x006e, "Error Reading crb_init area: n: %08x.\n", n); return -1; } /* Offset in flash = lower 16 bits * Number of entries = upper 16 bits */ offset = n & 0xffffU; n = (n >> 16) & 0xffffU; /* number of addr/value pair should not exceed 1024 entries */ if (n >= 1024) { ql_log(ql_log_fatal, vha, 0x0071, "Card flash not initialized:n=0x%x.\n", n); return -1; } ql_log(ql_log_info, vha, 0x0072, "%d CRB init values found in ROM.\n", n); buf = kmalloc_array(n, sizeof(struct crb_addr_pair), GFP_KERNEL); if (buf == NULL) { ql_log(ql_log_fatal, vha, 0x010c, "Unable to allocate memory.\n"); return -ENOMEM; } for (i = 0; i < n; i++) { if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 || qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) { kfree(buf); return -1; } buf[i].addr = addr; buf[i].data = val; } for (i = 0; i < n; i++) { /* Translate internal CRB initialization * address to PCI bus address */ off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) + QLA82XX_PCI_CRBSPACE; /* Not all CRB addr/value pair to be written, * some of them are skipped */ /* skipping cold reboot MAGIC */ if (off == QLA82XX_CAM_RAM(0x1fc)) continue; /* do not reset PCI */ if (off == (ROMUSB_GLB + 0xbc)) continue; /* skip core clock, so that firmware can increase the clock */ if (off == (ROMUSB_GLB + 0xc8)) continue; /* skip the function enable register */ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION)) continue; if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2)) continue; if ((off & 0x0ff00000) == QLA82XX_CRB_SMB) continue; if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET) continue; if (off == ADDR_ERROR) { ql_log(ql_log_fatal, vha, 0x0116, "Unknown addr: 0x%08lx.\n", buf[i].addr); continue; } qla82xx_wr_32(ha, off, buf[i].data); /* ISP requires much bigger delay to settle down, * else crb_window returns 0xffffffff */ if (off == QLA82XX_ROMUSB_GLB_SW_RESET) msleep(1000); /* ISP requires millisec delay between * successive CRB register updation */ msleep(1); } kfree(buf); /* Resetting the data and instruction cache */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8); /* Clear all protocol processing engines */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); return 0; } static int qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, u64 off, void *data, int size) { int i, j, ret = 0, loop, sz[2], off0; int scale, shift_amount, startword; uint32_t temp; uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; /* * If not MN, go check for MS or invalid. */ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) mem_crb = QLA82XX_CRB_QDR_NET; else { mem_crb = QLA82XX_CRB_DDR_NET; if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) return qla82xx_pci_mem_write_direct(ha, off, data, size); } off0 = off & 0x7; sz[0] = (size < (8 - off0)) ? size : (8 - off0); sz[1] = size - sz[0]; off8 = off & 0xfffffff0; loop = (((off & 0xf) + size - 1) >> 4) + 1; shift_amount = 4; scale = 2; startword = (off & 0xf)/8; for (i = 0; i < loop; i++) { if (qla82xx_pci_mem_read_2M(ha, off8 + (i << shift_amount), &word[i * scale], 8)) return -1; } switch (size) { case 1: tmpw = *((uint8_t *)data); break; case 2: tmpw = *((uint16_t *)data); break; case 4: tmpw = *((uint32_t *)data); break; case 8: default: tmpw = *((uint64_t *)data); break; } if (sz[0] == 8) { word[startword] = tmpw; } else { word[startword] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); word[startword] |= tmpw << (off0 * 8); } if (sz[1] != 0) { word[startword+1] &= ~(~0ULL << (sz[1] * 8)); word[startword+1] |= tmpw >> (sz[0] * 8); } for (i = 0; i < loop; i++) { temp = off8 + (i << shift_amount); qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); temp = 0; qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); temp = word[i * scale] & 0xffffffff; qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); temp = (word[i * scale] >> 32) & 0xffffffff; qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); temp = word[i*scale + 1] & 0xffffffff; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO, temp); temp = (word[i*scale + 1] >> 32) & 0xffffffff; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI, temp); temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); if ((temp & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&ha->pdev->dev, "failed to write through agent.\n"); ret = -1; break; } } return ret; } static int qla82xx_fw_load_from_flash(struct qla_hw_data *ha) { int i; long size = 0; long flashaddr = ha->flt_region_bootload << 2; long memaddr = BOOTLD_START; u64 data; u32 high, low; size = (IMAGE_START - BOOTLD_START) / 8; for (i = 0; i < size; i++) { if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) || (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) { return -1; } data = ((u64)high << 32) | low ; qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8); flashaddr += 8; memaddr += 8; if (i % 0x1000 == 0) msleep(1); } udelay(100); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); read_unlock(&ha->hw_lock); return 0; } int qla82xx_pci_mem_read_2M(struct qla_hw_data *ha, u64 off, void *data, int size) { int i, j = 0, k, start, end, loop, sz[2], off0[2]; int shift_amount; uint32_t temp; uint64_t off8, val, mem_crb, word[2] = {0, 0}; /* * If not MN, go check for MS or invalid. */ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) mem_crb = QLA82XX_CRB_QDR_NET; else { mem_crb = QLA82XX_CRB_DDR_NET; if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) return qla82xx_pci_mem_read_direct(ha, off, data, size); } off8 = off & 0xfffffff0; off0[0] = off & 0xf; sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]); shift_amount = 4; loop = ((off0[0] + size - 1) >> shift_amount) + 1; off0[1] = 0; sz[1] = size - sz[0]; for (i = 0; i < loop; i++) { temp = off8 + (i << shift_amount); qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); temp = 0; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp); temp = MIU_TA_CTL_ENABLE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); if ((temp & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&ha->pdev->dev, "failed to read through agent.\n"); break; } start = off0[i] >> 2; end = (off0[i] + sz[i] - 1) >> 2; for (k = start; k <= end; k++) { temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_RDDATA(k)); word[i] |= ((uint64_t)temp << (32 * (k & 1))); } } if (j >= MAX_CTL_CHECK) return -1; if ((off0[0] & 7) == 0) { val = word[0]; } else { val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); } switch (size) { case 1: *(uint8_t *)data = val; break; case 2: *(uint16_t *)data = val; break; case 4: *(uint32_t *)data = val; break; case 8: *(uint64_t *)data = val; break; } return 0; } static struct qla82xx_uri_table_desc * qla82xx_get_table_desc(const u8 *unirom, int section) { uint32_t i; struct qla82xx_uri_table_desc *directory = (struct qla82xx_uri_table_desc *)&unirom[0]; uint32_t offset; uint32_t tab_type; uint32_t entries = le32_to_cpu(directory->num_entries); for (i = 0; i < entries; i++) { offset = le32_to_cpu(directory->findex) + (i * le32_to_cpu(directory->entry_size)); tab_type = get_unaligned_le32((u32 *)&unirom[offset] + 8); if (tab_type == section) return (struct qla82xx_uri_table_desc *)&unirom[offset]; } return NULL; } static struct qla82xx_uri_data_desc * qla82xx_get_data_desc(struct qla_hw_data *ha, u32 section, u32 idx_offset) { const u8 *unirom = ha->hablob->fw->data; int idx = get_unaligned_le32((u32 *)&unirom[ha->file_prd_off] + idx_offset); struct qla82xx_uri_table_desc *tab_desc = NULL; uint32_t offset; tab_desc = qla82xx_get_table_desc(unirom, section); if (!tab_desc) return NULL; offset = le32_to_cpu(tab_desc->findex) + (le32_to_cpu(tab_desc->entry_size) * idx); return (struct qla82xx_uri_data_desc *)&unirom[offset]; } static u8 * qla82xx_get_bootld_offset(struct qla_hw_data *ha) { u32 offset = BOOTLD_START; struct qla82xx_uri_data_desc *uri_desc = NULL; if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF); if (uri_desc) offset = le32_to_cpu(uri_desc->findex); } return (u8 *)&ha->hablob->fw->data[offset]; } static u32 qla82xx_get_fw_size(struct qla_hw_data *ha) { struct qla82xx_uri_data_desc *uri_desc = NULL; if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, QLA82XX_URI_FIRMWARE_IDX_OFF); if (uri_desc) return le32_to_cpu(uri_desc->size); } return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]); } static u8 * qla82xx_get_fw_offs(struct qla_hw_data *ha) { u32 offset = IMAGE_START; struct qla82xx_uri_data_desc *uri_desc = NULL; if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, QLA82XX_URI_FIRMWARE_IDX_OFF); if (uri_desc) offset = le32_to_cpu(uri_desc->findex); } return (u8 *)&ha->hablob->fw->data[offset]; } /* PCI related functions */ int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) { unsigned long val = 0; u32 control; switch (region) { case 0: val = 0; break; case 1: pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control); val = control + QLA82XX_MSIX_TBL_SPACE; break; } return val; } int qla82xx_iospace_config(struct qla_hw_data *ha) { uint32_t len = 0; if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000c, "Failed to reserver selected regions.\n"); goto iospace_error_exit; } /* Use MMIO operations for all accesses. */ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000d, "Region #0 not an MMIO resource, aborting.\n"); goto iospace_error_exit; } len = pci_resource_len(ha->pdev, 0); ha->nx_pcibase = ioremap(pci_resource_start(ha->pdev, 0), len); if (!ha->nx_pcibase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000e, "Cannot remap pcibase MMIO, aborting.\n"); goto iospace_error_exit; } /* Mapping of IO base pointer */ if (IS_QLA8044(ha)) { ha->iobase = ha->nx_pcibase; } else if (IS_QLA82XX(ha)) { ha->iobase = ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11); } if (!ql2xdbwr) { ha->nxdb_wr_ptr = ioremap((pci_resource_start(ha->pdev, 4) + (ha->pdev->devfn << 12)), 4); if (!ha->nxdb_wr_ptr) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000f, "Cannot remap MMIO, aborting.\n"); goto iospace_error_exit; } /* Mapping of IO base pointer, * door bell read and write pointer */ ha->nxdb_rd_ptr = ha->nx_pcibase + (512 * 1024) + (ha->pdev->devfn * 8); } else { ha->nxdb_wr_ptr = (void __iomem *)(ha->pdev->devfn == 6 ? QLA82XX_CAMRAM_DB1 : QLA82XX_CAMRAM_DB2); } ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = ha->max_rsp_queues + 1; ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006, "nx_pci_base=%p iobase=%p " "max_req_queues=%d msix_count=%d.\n", ha->nx_pcibase, ha->iobase, ha->max_req_queues, ha->msix_count); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010, "nx_pci_base=%p iobase=%p " "max_req_queues=%d msix_count=%d.\n", ha->nx_pcibase, ha->iobase, ha->max_req_queues, ha->msix_count); return 0; iospace_error_exit: return -ENOMEM; } /* GS related functions */ /* Initialization related functions */ /** * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers. * @vha: HA context * * Returns 0 on success. */ int qla82xx_pci_config(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int ret; pci_set_master(ha->pdev); ret = pci_set_mwi(ha->pdev); ha->chip_revision = ha->pdev->revision; ql_dbg(ql_dbg_init, vha, 0x0043, "Chip revision:%d; pci_set_mwi() returned %d.\n", ha->chip_revision, ret); return 0; } /** * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers. * @vha: HA context * * Returns 0 on success. */ int qla82xx_reset_chip(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; ha->isp_ops->disable_intrs(ha); return QLA_SUCCESS; } void qla82xx_config_rings(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; struct init_cb_81xx *icb; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; /* Setup ring parameters in initialization control block. */ icb = (struct init_cb_81xx *)ha->init_cb; icb->request_q_outpointer = cpu_to_le16(0); icb->response_q_inpointer = cpu_to_le16(0); icb->request_q_length = cpu_to_le16(req->length); icb->response_q_length = cpu_to_le16(rsp->length); put_unaligned_le64(req->dma, &icb->request_q_address); put_unaligned_le64(rsp->dma, &icb->response_q_address); wrt_reg_dword(&reg->req_q_out[0], 0); wrt_reg_dword(&reg->rsp_q_in[0], 0); wrt_reg_dword(&reg->rsp_q_out[0], 0); } static int qla82xx_fw_load_from_blob(struct qla_hw_data *ha) { u64 *ptr64; u32 i, flashaddr, size; __le64 data; size = (IMAGE_START - BOOTLD_START) / 8; ptr64 = (u64 *)qla82xx_get_bootld_offset(ha); flashaddr = BOOTLD_START; for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) return -EIO; flashaddr += 8; } flashaddr = FLASH_ADDR_START; size = qla82xx_get_fw_size(ha) / 8; ptr64 = (u64 *)qla82xx_get_fw_offs(ha); for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) return -EIO; flashaddr += 8; } udelay(100); /* Write a magic value to CAMRAM register * at a specified offset to indicate * that all data is written and * ready for firmware to initialize. */ qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); read_unlock(&ha->hw_lock); return 0; } static int qla82xx_set_product_offset(struct qla_hw_data *ha) { struct qla82xx_uri_table_desc *ptab_desc = NULL; const uint8_t *unirom = ha->hablob->fw->data; uint32_t i; uint32_t entries; uint32_t flags, file_chiprev, offset; uint8_t chiprev = ha->chip_revision; /* Hardcoding mn_present flag for P3P */ int mn_present = 0; uint32_t flagbit; ptab_desc = qla82xx_get_table_desc(unirom, QLA82XX_URI_DIR_SECT_PRODUCT_TBL); if (!ptab_desc) return -1; entries = le32_to_cpu(ptab_desc->num_entries); for (i = 0; i < entries; i++) { offset = le32_to_cpu(ptab_desc->findex) + (i * le32_to_cpu(ptab_desc->entry_size)); flags = le32_to_cpu(*((__le32 *)&unirom[offset] + QLA82XX_URI_FLAGS_OFF)); file_chiprev = le32_to_cpu(*((__le32 *)&unirom[offset] + QLA82XX_URI_CHIP_REV_OFF)); flagbit = mn_present ? 1 : 2; if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) { ha->file_prd_off = offset; return 0; } } return -1; } static int qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) { uint32_t val; uint32_t min_size; struct qla_hw_data *ha = vha->hw; const struct firmware *fw = ha->hablob->fw; ha->fw_type = fw_type; if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) { if (qla82xx_set_product_offset(ha)) return -EINVAL; min_size = QLA82XX_URI_FW_MIN_SIZE; } else { val = get_unaligned_le32(&fw->data[QLA82XX_FW_MAGIC_OFFSET]); if (val != QLA82XX_BDINFO_MAGIC) return -EINVAL; min_size = QLA82XX_FW_MIN_SIZE; } if (fw->size < min_size) return -EINVAL; return 0; } static int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) { u32 val = 0; int retries = 60; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); do { read_lock(&ha->hw_lock); val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE); read_unlock(&ha->hw_lock); switch (val) { case PHAN_INITIALIZE_COMPLETE: case PHAN_INITIALIZE_ACK: return QLA_SUCCESS; case PHAN_INITIALIZE_FAILED: break; default: break; } ql_log(ql_log_info, vha, 0x00a8, "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n", val, retries); msleep(500); } while (--retries); ql_log(ql_log_fatal, vha, 0x00a9, "Cmd Peg initialization failed: 0x%x.\n", val); val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); read_unlock(&ha->hw_lock); return QLA_FUNCTION_FAILED; } static int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) { u32 val = 0; int retries = 60; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); do { read_lock(&ha->hw_lock); val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE); read_unlock(&ha->hw_lock); switch (val) { case PHAN_INITIALIZE_COMPLETE: case PHAN_INITIALIZE_ACK: return QLA_SUCCESS; case PHAN_INITIALIZE_FAILED: break; default: break; } ql_log(ql_log_info, vha, 0x00ab, "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n", val, retries); msleep(500); } while (--retries); ql_log(ql_log_fatal, vha, 0x00ac, "Rcv Peg initialization failed: 0x%x.\n", val); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); read_unlock(&ha->hw_lock); return QLA_FUNCTION_FAILED; } /* ISR related functions */ static struct qla82xx_legacy_intr_set legacy_intr[] = QLA82XX_LEGACY_INTR_CONFIG; /* * qla82xx_mbx_completion() - Process mailbox command completions. * @ha: SCSI driver HA context * @mb0: Mailbox0 register */ void qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; __le16 __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; wptr = &reg->mailbox_out[1]; /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; for (cnt = 1; cnt < ha->mbx_count; cnt++) { ha->mailbox_out[cnt] = rd_reg_word(wptr); wptr++; } if (!ha->mcp) ql_dbg(ql_dbg_async, vha, 0x5053, "MBX pointer ERROR.\n"); } /** * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla82xx_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; int status = 0, status1 = 0; unsigned long flags; unsigned long iter; uint32_t stat = 0; uint16_t mb[8]; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0xb053, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; if (!ha->flags.msi_enabled) { status = qla82xx_rd_32(ha, ISR_INT_VECTOR); if (!(status & ha->nx_legacy_intr.int_vec_bit)) return IRQ_NONE; status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG); if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1)) return IRQ_NONE; } /* clear the interrupt */ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); /* read twice to ensure write is flushed */ qla82xx_rd_32(ha, ISR_INT_VECTOR); qla82xx_rd_32(ha, ISR_INT_VECTOR); reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 1; iter--; ) { if (rd_reg_dword(&reg->host_int)) { stat = rd_reg_dword(&reg->host_status); switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = rd_reg_word(&reg->mailbox_out[1]); mb[2] = rd_reg_word(&reg->mailbox_out[2]); mb[3] = rd_reg_word(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_async, vha, 0x5054, "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } } wrt_reg_dword(&reg->host_int, 0); } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!ha->flags.msi_enabled) qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); return IRQ_HANDLED; } irqreturn_t qla82xx_msix_default(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; int status = 0; unsigned long flags; uint32_t stat = 0; uint32_t host_int = 0; uint16_t mb[8]; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { host_int = rd_reg_dword(&reg->host_int); if (qla2x00_check_reg32_for_disconnect(vha, host_int)) break; if (host_int) { stat = rd_reg_dword(&reg->host_status); switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = rd_reg_word(&reg->mailbox_out[1]); mb[2] = rd_reg_word(&reg->mailbox_out[2]); mb[3] = rd_reg_word(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_async, vha, 0x5041, "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } } wrt_reg_dword(&reg->host_int, 0); } while (0); qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } irqreturn_t qla82xx_msix_rsp_q(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; unsigned long flags; uint32_t host_int = 0; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); host_int = rd_reg_dword(&reg->host_int); if (qla2x00_check_reg32_for_disconnect(vha, host_int)) goto out; qla24xx_process_response_queue(vha, rsp); wrt_reg_dword(&reg->host_int, 0); out: spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } void qla82xx_poll(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; uint32_t stat; uint32_t host_int = 0; uint16_t mb[8]; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer.\n", __func__); return; } ha = rsp->hw; reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); host_int = rd_reg_dword(&reg->host_int); if (qla2x00_check_reg32_for_disconnect(vha, host_int)) goto out; if (host_int) { stat = rd_reg_dword(&reg->host_status); switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); break; case 0x12: mb[0] = MSW(stat); mb[1] = rd_reg_word(&reg->mailbox_out[1]); mb[2] = rd_reg_word(&reg->mailbox_out[2]); mb[3] = rd_reg_word(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_p3p, vha, 0xb013, "Unrecognized interrupt type (%d).\n", stat * 0xff); break; } wrt_reg_dword(&reg->host_int, 0); } out: spin_unlock_irqrestore(&ha->hardware_lock, flags); } void qla82xx_enable_intrs(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_mbx_intr_enable(vha); spin_lock_irq(&ha->hardware_lock); if (IS_QLA8044(ha)) qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 0); else qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); spin_unlock_irq(&ha->hardware_lock); ha->interrupts_on = 1; } void qla82xx_disable_intrs(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (ha->interrupts_on) qla82xx_mbx_intr_disable(vha); spin_lock_irq(&ha->hardware_lock); if (IS_QLA8044(ha)) qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1); else qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); spin_unlock_irq(&ha->hardware_lock); ha->interrupts_on = 0; } void qla82xx_init_flags(struct qla_hw_data *ha) { struct qla82xx_legacy_intr_set *nx_legacy_intr; /* ISP 8021 initializations */ rwlock_init(&ha->hw_lock); ha->qdr_sn_window = -1; ha->ddr_mn_window = -1; ha->curr_window = 255; ha->portnum = PCI_FUNC(ha->pdev->devfn); nx_legacy_intr = &legacy_intr[ha->portnum]; ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; } static inline void qla82xx_set_idc_version(scsi_qla_host_t *vha) { int idc_ver; uint32_t drv_active; struct qla_hw_data *ha = vha->hw; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) { qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); ql_log(ql_log_info, vha, 0xb082, "IDC version updated to %d\n", QLA82XX_IDC_VERSION); } else { idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION); if (idc_ver != QLA82XX_IDC_VERSION) ql_log(ql_log_info, vha, 0xb083, "qla2xxx driver IDC version %d is not compatible " "with IDC version %d of the other drivers\n", QLA82XX_IDC_VERSION, idc_ver); } } inline void qla82xx_set_drv_active(scsi_qla_host_t *vha) { uint32_t drv_active; struct qla_hw_data *ha = vha->hw; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); /* If reset value is all FF's, initialize DRV_ACTIVE */ if (drv_active == 0xffffffff) { qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, QLA82XX_DRV_NOT_ACTIVE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); } drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); } inline void qla82xx_clear_drv_active(struct qla_hw_data *ha) { uint32_t drv_active; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); } static inline int qla82xx_need_reset(struct qla_hw_data *ha) { uint32_t drv_state; int rval; if (ha->flags.nic_core_reset_owner) return 1; else { drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); return rval; } } static inline void qla82xx_set_rst_ready(struct qla_hw_data *ha) { uint32_t drv_state; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); /* If reset value is all FF's, initialize DRV_STATE */ if (drv_state == 0xffffffff) { qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); } drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); ql_dbg(ql_dbg_init, vha, 0x00bb, "drv_state = 0x%08x.\n", drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } static inline void qla82xx_clear_rst_ready(struct qla_hw_data *ha) { uint32_t drv_state; drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } static inline void qla82xx_set_qsnt_ready(struct qla_hw_data *ha) { uint32_t qsnt_state; qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); } void qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t qsnt_state; qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); } static int qla82xx_load_fw(scsi_qla_host_t *vha) { int rst; struct fw_blob *blob; struct qla_hw_data *ha = vha->hw; if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x009f, "Error during CRB initialization.\n"); return QLA_FUNCTION_FAILED; } udelay(500); /* Bring QM and CAMRAM out of reset */ rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET); rst &= ~((1 << 28) | (1 << 24)); qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst); /* * FW Load priority: * 1) Operational firmware residing in flash. * 2) Firmware via request-firmware interface (.bin file). */ if (ql2xfwloadbin == 2) goto try_blob_fw; ql_log(ql_log_info, vha, 0x00a0, "Attempting to load firmware from flash.\n"); if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0x00a1, "Firmware loaded successfully from flash.\n"); return QLA_SUCCESS; } else { ql_log(ql_log_warn, vha, 0x0108, "Firmware load from flash failed.\n"); } try_blob_fw: ql_log(ql_log_info, vha, 0x00a2, "Attempting to load firmware from blob.\n"); /* Load firmware blob. */ blob = ha->hablob = qla2x00_request_firmware(vha); if (!blob) { ql_log(ql_log_fatal, vha, 0x00a3, "Firmware image not present.\n"); goto fw_load_failed; } /* Validating firmware blob */ if (qla82xx_validate_firmware_blob(vha, QLA82XX_FLASH_ROMIMAGE)) { /* Fallback to URI format */ if (qla82xx_validate_firmware_blob(vha, QLA82XX_UNIFIED_ROMIMAGE)) { ql_log(ql_log_fatal, vha, 0x00a4, "No valid firmware image found.\n"); return QLA_FUNCTION_FAILED; } } if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0x00a5, "Firmware loaded successfully from binary blob.\n"); return QLA_SUCCESS; } ql_log(ql_log_fatal, vha, 0x00a6, "Firmware load failed for binary blob.\n"); blob->fw = NULL; blob = NULL; fw_load_failed: return QLA_FUNCTION_FAILED; } int qla82xx_start_firmware(scsi_qla_host_t *vha) { uint16_t lnk; struct qla_hw_data *ha = vha->hw; /* scrub dma mask expansion register */ qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE); /* Put both the PEG CMD and RCV PEG to default state * of 0 before resetting the hardware */ qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0); qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0); /* Overwrite stale initialization register values */ qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); if (qla82xx_load_fw(vha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x00a7, "Error trying to start fw.\n"); return QLA_FUNCTION_FAILED; } /* Handshake with the card before we register the devices. */ if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x00aa, "Error during card handshake.\n"); return QLA_FUNCTION_FAILED; } /* Negotiated Link width */ pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); ha->link_width = (lnk >> 4) & 0x3f; /* Synchronize with Receive peg */ return qla82xx_check_rcvpeg_state(ha); } static __le32 * qla82xx_read_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr, uint32_t length) { uint32_t i; uint32_t val; struct qla_hw_data *ha = vha->hw; /* Dword reads to flash. */ for (i = 0; i < length/4; i++, faddr += 4) { if (qla82xx_rom_fast_read(ha, faddr, &val)) { ql_log(ql_log_warn, vha, 0x0106, "Do ROM fast read failed.\n"); goto done_read; } dwptr[i] = cpu_to_le32(val); } done_read: return dwptr; } static int qla82xx_unprotect_flash(struct qla_hw_data *ha) { int ret; uint32_t val; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb014, "ROM Lock failed.\n"); return ret; } ret = qla82xx_read_status_reg(ha, &val); if (ret < 0) goto done_unprotect; val &= ~(BLOCK_PROTECT_BITS << 2); ret = qla82xx_write_status_reg(ha, val); if (ret < 0) { val |= (BLOCK_PROTECT_BITS << 2); qla82xx_write_status_reg(ha, val); } if (qla82xx_write_disable_flash(ha) != 0) ql_log(ql_log_warn, vha, 0xb015, "Write disable failed.\n"); done_unprotect: qla82xx_rom_unlock(ha); return ret; } static int qla82xx_protect_flash(struct qla_hw_data *ha) { int ret; uint32_t val; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb016, "ROM Lock failed.\n"); return ret; } ret = qla82xx_read_status_reg(ha, &val); if (ret < 0) goto done_protect; val |= (BLOCK_PROTECT_BITS << 2); /* LOCK all sectors */ ret = qla82xx_write_status_reg(ha, val); if (ret < 0) ql_log(ql_log_warn, vha, 0xb017, "Write status register failed.\n"); if (qla82xx_write_disable_flash(ha) != 0) ql_log(ql_log_warn, vha, 0xb018, "Write disable failed.\n"); done_protect: qla82xx_rom_unlock(ha); return ret; } static int qla82xx_erase_sector(struct qla_hw_data *ha, int addr) { int ret = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb019, "ROM Lock failed.\n"); return ret; } qla82xx_flash_set_write_enable(ha); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb01a, "Error waiting for rom done.\n"); ret = -1; goto done; } ret = qla82xx_flash_wait_write_finish(ha); done: qla82xx_rom_unlock(ha); return ret; } /* * Address and length are byte address */ void * qla82xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { scsi_block_requests(vha->host); qla82xx_read_flash_data(vha, buf, offset, length); scsi_unblock_requests(vha->host); return buf; } static int qla82xx_write_flash_data(struct scsi_qla_host *vha, __le32 *dwptr, uint32_t faddr, uint32_t dwords) { int ret; uint32_t liter; uint32_t rest_addr; dma_addr_t optrom_dma; void *optrom = NULL; int page_mode = 0; struct qla_hw_data *ha = vha->hw; ret = -1; /* Prepare burst-capable write on supported ISPs. */ if (page_mode && !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) { optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); if (!optrom) { ql_log(ql_log_warn, vha, 0xb01b, "Unable to allocate memory " "for optrom burst write (%x KB).\n", OPTROM_BURST_SIZE / 1024); } } rest_addr = ha->fdt_block_size - 1; ret = qla82xx_unprotect_flash(ha); if (ret) { ql_log(ql_log_warn, vha, 0xb01c, "Unable to unprotect flash for update.\n"); goto write_done; } for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { /* Are we at the beginning of a sector? */ if ((faddr & rest_addr) == 0) { ret = qla82xx_erase_sector(ha, faddr); if (ret) { ql_log(ql_log_warn, vha, 0xb01d, "Unable to erase sector: address=%x.\n", faddr); break; } } /* Go with burst-write. */ if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { /* Copy data to DMA'ble buffer. */ memcpy(optrom, dwptr, OPTROM_BURST_SIZE); ret = qla2x00_load_ram(vha, optrom_dma, (ha->flash_data_off | faddr), OPTROM_BURST_DWORDS); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xb01e, "Unable to burst-write optrom segment " "(%x/%x/%llx).\n", ret, (ha->flash_data_off | faddr), (unsigned long long)optrom_dma); ql_log(ql_log_warn, vha, 0xb01f, "Reverting to slow-write.\n"); dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); optrom = NULL; } else { liter += OPTROM_BURST_DWORDS - 1; faddr += OPTROM_BURST_DWORDS - 1; dwptr += OPTROM_BURST_DWORDS - 1; continue; } } ret = qla82xx_write_flash_dword(ha, faddr, le32_to_cpu(*dwptr)); if (ret) { ql_dbg(ql_dbg_p3p, vha, 0xb020, "Unable to program flash address=%x data=%x.\n", faddr, *dwptr); break; } } ret = qla82xx_protect_flash(ha); if (ret) ql_log(ql_log_warn, vha, 0xb021, "Unable to protect flash after update.\n"); write_done: if (optrom) dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); return ret; } int qla82xx_write_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { int rval; /* Suspend HBA. */ scsi_block_requests(vha->host); rval = qla82xx_write_flash_data(vha, buf, offset, length >> 2); scsi_unblock_requests(vha->host); /* Convert return ISP82xx to generic */ if (rval) rval = QLA_FUNCTION_FAILED; else rval = QLA_SUCCESS; return rval; } void qla82xx_start_iocbs(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; uint32_t dbval; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else req->ring_ptr++; dbval = 0x04 | (ha->portnum << 5); dbval = dbval | (req->id << 8) | (req->ring_index << 16); if (ql2xdbwr) qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval); else { wrt_reg_dword(ha->nxdb_wr_ptr, dbval); wmb(); while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { wrt_reg_dword(ha->nxdb_wr_ptr, dbval); wmb(); } } } static void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); uint32_t lock_owner = 0; if (qla82xx_rom_lock(ha)) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); /* Someone else is holding the lock. */ ql_log(ql_log_info, vha, 0xb022, "Resetting rom_lock, Lock Owner %u.\n", lock_owner); } /* * Either we got the lock, or someone * else died while holding it. * In either case, unlock. */ qla82xx_rom_unlock(ha); } /* * qla82xx_device_bootstrap * Initialize device, set DEV_READY, start fw * * Note: * IDC lock must be held upon entry * * Return: * Success : 0 * Failed : 1 */ static int qla82xx_device_bootstrap(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; int i; uint32_t old_count, count; struct qla_hw_data *ha = vha->hw; int need_reset = 0; need_reset = qla82xx_need_reset(ha); if (need_reset) { /* We are trying to perform a recovery here. */ if (ha->flags.isp82xx_fw_hung) qla82xx_rom_lock_recovery(ha); } else { old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); for (i = 0; i < 10; i++) { msleep(200); count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); if (count != old_count) { rval = QLA_SUCCESS; goto dev_ready; } } qla82xx_rom_lock_recovery(ha); } /* set to DEV_INITIALIZING */ ql_log(ql_log_info, vha, 0x009e, "HW State: INITIALIZING.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING); qla82xx_idc_unlock(ha); rval = qla82xx_start_firmware(vha); qla82xx_idc_lock(ha); if (rval != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x00ad, "HW State: FAILED.\n"); qla82xx_clear_drv_active(ha); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); return rval; } dev_ready: ql_log(ql_log_info, vha, 0x00ae, "HW State: READY.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY); return QLA_SUCCESS; } /* * qla82xx_need_qsnt_handler * Code to start quiescence sequence * * Note: * IDC lock must be held upon entry * * Return: void */ static void qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t dev_state, drv_state, drv_active; unsigned long reset_timeout; if (vha->flags.online) { /*Block any further I/O and wait for pending cmnds to complete*/ qla2x00_quiesce_io(vha); } /* Set the quiescence ready bit */ qla82xx_set_qsnt_ready(ha); /*wait for 30 secs for other functions to ack */ reset_timeout = jiffies + (30 * HZ); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); /* Its 2 that is written when qsnt is acked, moving one bit */ drv_active = drv_active << 0x01; while (drv_state != drv_active) { if (time_after_eq(jiffies, reset_timeout)) { /* quiescence timeout, other functions didn't ack * changing the state to DEV_READY */ ql_log(ql_log_info, vha, 0xb023, "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d " "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME, drv_active, drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY); ql_log(ql_log_info, vha, 0xb025, "HW State: DEV_READY.\n"); qla82xx_idc_unlock(ha); qla2x00_perform_loop_resync(vha); qla82xx_idc_lock(ha); qla82xx_clear_qsnt_ready(vha); return; } qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); drv_active = drv_active << 0x01; } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); /* everyone acked so set the state to DEV_QUIESCENCE */ if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { ql_log(ql_log_info, vha, 0xb026, "HW State: DEV_QUIESCENT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT); } } /* * qla82xx_wait_for_state_change * Wait for device state to change from given current state * * Note: * IDC lock must not be held upon entry * * Return: * Changed device state. */ uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state) { struct qla_hw_data *ha = vha->hw; uint32_t dev_state; do { msleep(1000); qla82xx_idc_lock(ha); dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); qla82xx_idc_unlock(ha); } while (dev_state == curr_state); return dev_state; } void qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; /* Disable the board */ ql_log(ql_log_fatal, vha, 0x00b8, "Disabling the board.\n"); if (IS_QLA82XX(ha)) { qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); } else if (IS_QLA8044(ha)) { qla8044_clear_drv_active(ha); qla8044_idc_unlock(ha); } /* Set DEV_FAILED flag to disable timer */ vha->device_flags |= DFLG_DEV_FAILED; qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); qla2x00_mark_all_devices_lost(vha); vha->flags.online = 0; vha->flags.init_done = 0; } /* * qla82xx_need_reset_handler * Code to start reset sequence * * Note: * IDC lock must be held upon entry * * Return: * Success : 0 * Failed : 1 */ static void qla82xx_need_reset_handler(scsi_qla_host_t *vha) { uint32_t dev_state, drv_state, drv_active; uint32_t active_mask = 0; unsigned long reset_timeout; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; if (vha->flags.online) { qla82xx_idc_unlock(ha); qla2x00_abort_isp_cleanup(vha); ha->isp_ops->get_flash_version(vha, req->ring); ha->isp_ops->nvram_config(vha); qla82xx_idc_lock(ha); } drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); if (!ha->flags.nic_core_reset_owner) { ql_dbg(ql_dbg_p3p, vha, 0xb028, "reset_acknowledged by 0x%x\n", ha->portnum); qla82xx_set_rst_ready(ha); } else { active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); drv_active &= active_mask; ql_dbg(ql_dbg_p3p, vha, 0xb029, "active_mask: 0x%08x\n", active_mask); } /* wait for 10 seconds for reset ack from all functions */ reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); ql_dbg(ql_dbg_p3p, vha, 0xb02a, "drv_state: 0x%08x, drv_active: 0x%08x, " "dev_state: 0x%08x, active_mask: 0x%08x\n", drv_state, drv_active, dev_state, active_mask); while (drv_state != drv_active && dev_state != QLA8XXX_DEV_INITIALIZING) { if (time_after_eq(jiffies, reset_timeout)) { ql_log(ql_log_warn, vha, 0x00b5, "Reset timeout.\n"); break; } qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); if (ha->flags.nic_core_reset_owner) drv_active &= active_mask; dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); } ql_dbg(ql_dbg_p3p, vha, 0xb02b, "drv_state: 0x%08x, drv_active: 0x%08x, " "dev_state: 0x%08x, active_mask: 0x%08x\n", drv_state, drv_active, dev_state, active_mask); ql_log(ql_log_info, vha, 0x00b6, "Device state is 0x%x = %s.\n", dev_state, qdev_state(dev_state)); /* Force to DEV_COLD unless someone else is starting a reset */ if (dev_state != QLA8XXX_DEV_INITIALIZING && dev_state != QLA8XXX_DEV_COLD) { ql_log(ql_log_info, vha, 0x00b7, "HW State: COLD/RE-INIT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD); qla82xx_set_rst_ready(ha); if (ql2xmdenable) { if (qla82xx_md_collect(vha)) ql_log(ql_log_warn, vha, 0xb02c, "Minidump not collected.\n"); } else ql_log(ql_log_warn, vha, 0xb04f, "Minidump disabled.\n"); } } int qla82xx_check_md_needed(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint16_t fw_major_version, fw_minor_version, fw_subminor_version; int rval = QLA_SUCCESS; fw_major_version = ha->fw_major_version; fw_minor_version = ha->fw_minor_version; fw_subminor_version = ha->fw_subminor_version; rval = qla2x00_get_fw_version(vha); if (rval != QLA_SUCCESS) return rval; if (ql2xmdenable) { if (!ha->fw_dumped) { if ((fw_major_version != ha->fw_major_version || fw_minor_version != ha->fw_minor_version || fw_subminor_version != ha->fw_subminor_version) || (ha->prev_minidump_failed)) { ql_dbg(ql_dbg_p3p, vha, 0xb02d, "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n", fw_major_version, fw_minor_version, fw_subminor_version, ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version, ha->prev_minidump_failed); /* Release MiniDump resources */ qla82xx_md_free(vha); /* ALlocate MiniDump resources */ qla82xx_md_prep(vha); } } else ql_log(ql_log_info, vha, 0xb02e, "Firmware dump available to retrieve\n"); } return rval; } static int qla82xx_check_fw_alive(scsi_qla_host_t *vha) { uint32_t fw_heartbeat_counter; int status = 0; fw_heartbeat_counter = qla82xx_rd_32(vha->hw, QLA82XX_PEG_ALIVE_COUNTER); /* all 0xff, assume AER/EEH in progress, ignore */ if (fw_heartbeat_counter == 0xffffffff) { ql_dbg(ql_dbg_timer, vha, 0x6003, "FW heartbeat counter is 0xffffffff, " "returning status=%d.\n", status); return status; } if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat++; /* FW not alive after 2 seconds */ if (vha->seconds_since_last_heartbeat == 2) { vha->seconds_since_last_heartbeat = 0; status = 1; } } else vha->seconds_since_last_heartbeat = 0; vha->fw_heartbeat_counter = fw_heartbeat_counter; if (status) ql_dbg(ql_dbg_timer, vha, 0x6004, "Returning status=%d.\n", status); return status; } /* * qla82xx_device_state_handler * Main state handler * * Note: * IDC lock must be held upon entry * * Return: * Success : 0 * Failed : 1 */ int qla82xx_device_state_handler(scsi_qla_host_t *vha) { uint32_t dev_state; uint32_t old_dev_state; int rval = QLA_SUCCESS; unsigned long dev_init_timeout; struct qla_hw_data *ha = vha->hw; int loopcount = 0; qla82xx_idc_lock(ha); if (!vha->flags.init_done) { qla82xx_set_drv_active(vha); qla82xx_set_idc_version(vha); } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); old_dev_state = dev_state; ql_log(ql_log_info, vha, 0x009b, "Device state is 0x%x = %s.\n", dev_state, qdev_state(dev_state)); /* wait for 30 seconds for device to go ready */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); while (1) { if (time_after_eq(jiffies, dev_init_timeout)) { ql_log(ql_log_fatal, vha, 0x009c, "Device init failed.\n"); rval = QLA_FUNCTION_FAILED; break; } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (old_dev_state != dev_state) { loopcount = 0; old_dev_state = dev_state; } if (loopcount < 5) { ql_log(ql_log_info, vha, 0x009d, "Device state is 0x%x = %s.\n", dev_state, qdev_state(dev_state)); } switch (dev_state) { case QLA8XXX_DEV_READY: ha->flags.nic_core_reset_owner = 0; goto rel_lock; case QLA8XXX_DEV_COLD: rval = qla82xx_device_bootstrap(vha); break; case QLA8XXX_DEV_INITIALIZING: qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); break; case QLA8XXX_DEV_NEED_RESET: if (!ql2xdontresethba) qla82xx_need_reset_handler(vha); else { qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); } dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); break; case QLA8XXX_DEV_NEED_QUIESCENT: qla82xx_need_qsnt_handler(vha); /* Reset timeout value after quiescence handler */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); break; case QLA8XXX_DEV_QUIESCENT: /* Owner will exit and other will wait for the state * to get changed */ if (ha->flags.quiesce_owner) goto rel_lock; qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); /* Reset timeout value after quiescence handler */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); break; case QLA8XXX_DEV_FAILED: qla8xxx_dev_failed_handler(vha); rval = QLA_FUNCTION_FAILED; goto exit; default: qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); } loopcount++; } rel_lock: qla82xx_idc_unlock(ha); exit: return rval; } static int qla82xx_check_temp(scsi_qla_host_t *vha) { uint32_t temp, temp_state, temp_val; struct qla_hw_data *ha = vha->hw; temp = qla82xx_rd_32(ha, CRB_TEMP_STATE); temp_state = qla82xx_get_temp_state(temp); temp_val = qla82xx_get_temp_val(temp); if (temp_state == QLA82XX_TEMP_PANIC) { ql_log(ql_log_warn, vha, 0x600e, "Device temperature %d degrees C exceeds " " maximum allowed. Hardware has been shut down.\n", temp_val); return 1; } else if (temp_state == QLA82XX_TEMP_WARN) { ql_log(ql_log_warn, vha, 0x600f, "Device temperature %d degrees C exceeds " "operating range. Immediate action needed.\n", temp_val); } return 0; } int qla82xx_read_temperature(scsi_qla_host_t *vha) { uint32_t temp; temp = qla82xx_rd_32(vha->hw, CRB_TEMP_STATE); return qla82xx_get_temp_val(temp); } void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (ha->flags.mbox_busy) { ha->flags.mbox_int = 1; ha->flags.mbox_busy = 0; ql_log(ql_log_warn, vha, 0x6010, "Doing premature completion of mbx command.\n"); if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) complete(&ha->mbx_intr_comp); } } void qla82xx_watchdog(scsi_qla_host_t *vha) { uint32_t dev_state, halt_status; struct qla_hw_data *ha = vha->hw; /* don't poll if reset is going on */ if (!ha->flags.nic_core_reset_hdlr_active) { dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (qla82xx_check_temp(vha)) { set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); } else if (dev_state == QLA8XXX_DEV_NEED_RESET && !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x6001, "Adapter reset needed.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x6002, "Quiescent needed.\n"); set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); } else if (dev_state == QLA8XXX_DEV_FAILED && !test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) && vha->flags.online == 1) { ql_log(ql_log_warn, vha, 0xb055, "Adapter state is failed. Offlining.\n"); set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); } else { if (qla82xx_check_fw_alive(vha)) { ql_dbg(ql_dbg_timer, vha, 0x6011, "disabling pause transmit on port 0 & 1.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1); halt_status = qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS1); ql_log(ql_log_info, vha, 0x6005, "dumping hw/fw registers:.\n " " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n " " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n " " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n " " PEG_NET_4_PC: 0x%x.\n", halt_status, qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c)); if (((halt_status & 0x1fffff00) >> 8) == 0x67) ql_log(ql_log_warn, vha, 0xb052, "Firmware aborted with " "error code 0x00006700. Device is " "being reset.\n"); if (halt_status & HALT_STATUS_UNRECOVERABLE) { set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); } else { ql_log(ql_log_info, vha, 0x6006, "Detect abort needed.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } ha->flags.isp82xx_fw_hung = 1; ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n"); qla82xx_clear_pending_mbx(vha); } } } } int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval = -1; struct qla_hw_data *ha = vha->hw; if (IS_QLA82XX(ha)) rval = qla82xx_device_state_handler(vha); else if (IS_QLA8044(ha)) { qla8044_idc_lock(ha); /* Decide the reset ownership */ qla83xx_reset_ownership(vha); qla8044_idc_unlock(ha); rval = qla8044_device_state_handler(vha); } return rval; } void qla82xx_set_reset_owner(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t dev_state = 0; if (IS_QLA82XX(ha)) dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); else if (IS_QLA8044(ha)) dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); if (dev_state == QLA8XXX_DEV_READY) { ql_log(ql_log_info, vha, 0xb02f, "HW State: NEED RESET\n"); if (IS_QLA82XX(ha)) { qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_NEED_RESET); ha->flags.nic_core_reset_owner = 1; ql_dbg(ql_dbg_p3p, vha, 0xb030, "reset_owner is 0x%x\n", ha->portnum); } else if (IS_QLA8044(ha)) qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_NEED_RESET); } else ql_log(ql_log_info, vha, 0xb031, "Device state is 0x%x = %s.\n", dev_state, qdev_state(dev_state)); } /* * qla82xx_abort_isp * Resets ISP and aborts all outstanding commands. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qla82xx_abort_isp(scsi_qla_host_t *vha) { int rval = -1; struct qla_hw_data *ha = vha->hw; if (vha->device_flags & DFLG_DEV_FAILED) { ql_log(ql_log_warn, vha, 0x8024, "Device in failed state, exiting.\n"); return QLA_SUCCESS; } ha->flags.nic_core_reset_hdlr_active = 1; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); if (IS_QLA82XX(ha)) rval = qla82xx_device_state_handler(vha); else if (IS_QLA8044(ha)) { qla8044_idc_lock(ha); /* Decide the reset ownership */ qla83xx_reset_ownership(vha); qla8044_idc_unlock(ha); rval = qla8044_device_state_handler(vha); } qla82xx_idc_lock(ha); qla82xx_clear_rst_ready(ha); qla82xx_idc_unlock(ha); if (rval == QLA_SUCCESS) { ha->flags.isp82xx_fw_hung = 0; ha->flags.nic_core_reset_hdlr_active = 0; qla82xx_restart_isp(vha); } if (rval) { vha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (ha->isp_abort_cnt == 0) { ql_log(ql_log_warn, vha, 0x8027, "ISP error recover failed - board " "disabled.\n"); /* * The next call disables the board * completely. */ ha->isp_ops->reset_adapter(vha); vha->flags.online = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); rval = QLA_SUCCESS; } else { /* schedule another ISP abort */ ha->isp_abort_cnt--; ql_log(ql_log_warn, vha, 0x8036, "ISP abort - retry remaining %d.\n", ha->isp_abort_cnt); rval = QLA_FUNCTION_FAILED; } } else { ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; ql_dbg(ql_dbg_taskm, vha, 0x8029, "ISP error recovery - retrying (%d) more times.\n", ha->isp_abort_cnt); set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); rval = QLA_FUNCTION_FAILED; } } return rval; } /* * qla82xx_fcoe_ctx_reset * Perform a quick reset and aborts all outstanding commands. * This will only perform an FCoE context reset and avoids a full blown * chip reset. * * Input: * ha = adapter block pointer. * is_reset_path = flag for identifying the reset path. * * Returns: * 0 = success */ int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha) { int rval = QLA_FUNCTION_FAILED; if (vha->flags.online) { /* Abort all outstanding commands, so as to be requeued later */ qla2x00_abort_isp_cleanup(vha); } /* Stop currently executing firmware. * This will destroy existing FCoE context at the F/W end. */ qla2x00_try_to_stop_firmware(vha); /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */ rval = qla82xx_restart_isp(vha); return rval; } /* * qla2x00_wait_for_fcoe_ctx_reset * Wait till the FCoE context is reset. * * Note: * Does context switching here. * Release SPIN_LOCK (if any) before calling this routine. * * Return: * Success (fcoe_ctx reset is done) : 0 * Failed (fcoe_ctx reset not completed within max loop timout ) : 1 */ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) { int status = QLA_FUNCTION_FAILED; unsigned long wait_reset; wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && time_before(jiffies, wait_reset)) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ); if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { status = QLA_SUCCESS; break; } } ql_dbg(ql_dbg_p3p, vha, 0xb027, "%s: status=%d.\n", __func__, status); return status; } void qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) { int i, fw_state = 0; unsigned long flags; struct qla_hw_data *ha = vha->hw; /* Check if 82XX firmware is alive or not * We may have arrived here from NEED_RESET * detection only */ if (!ha->flags.isp82xx_fw_hung) { for (i = 0; i < 2; i++) { msleep(1000); if (IS_QLA82XX(ha)) fw_state = qla82xx_check_fw_alive(vha); else if (IS_QLA8044(ha)) fw_state = qla8044_check_fw_alive(vha); if (fw_state) { ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); break; } } } ql_dbg(ql_dbg_init, vha, 0x00b0, "Entered %s fw_hung=%d.\n", __func__, ha->flags.isp82xx_fw_hung); /* Abort all commands gracefully if fw NOT hung */ if (!ha->flags.isp82xx_fw_hung) { int cnt, que; srb_t *sp; struct req_que *req; spin_lock_irqsave(&ha->hardware_lock, flags); for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req) continue; for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { if ((!sp->u.scmd.crc_ctx || (sp->flags & SRB_FCP_CMND_DMA_VALID)) && !ha->flags.isp82xx_fw_hung) { spin_unlock_irqrestore( &ha->hardware_lock, flags); if (ha->isp_ops->abort_command(sp)) { ql_log(ql_log_info, vha, 0x00b1, "mbx abort failed.\n"); } else { ql_log(ql_log_info, vha, 0x00b2, "mbx abort success.\n"); } spin_lock_irqsave(&ha->hardware_lock, flags); } } } } spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait for pending cmds (physical and virtual) to complete */ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == QLA_SUCCESS) { ql_dbg(ql_dbg_init, vha, 0x00b3, "Done wait for " "pending commands.\n"); } else { WARN_ON_ONCE(true); } } } /* Minidump related functions */ static int qla82xx_minidump_process_control(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) { struct qla_hw_data *ha = vha->hw; struct qla82xx_md_entry_crb *crb_entry; uint32_t read_value, opcode, poll_time; uint32_t addr, index, crb_addr; unsigned long wtime; struct qla82xx_md_template_hdr *tmplt_hdr; uint32_t rval = QLA_SUCCESS; int i; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr; crb_addr = crb_entry->addr; for (i = 0; i < crb_entry->op_count; i++) { opcode = crb_entry->crb_ctrl.opcode; if (opcode & QLA82XX_DBG_OPCODE_WR) { qla82xx_md_rw_32(ha, crb_addr, crb_entry->value_1, 1); opcode &= ~QLA82XX_DBG_OPCODE_WR; } if (opcode & QLA82XX_DBG_OPCODE_RW) { read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); qla82xx_md_rw_32(ha, crb_addr, read_value, 1); opcode &= ~QLA82XX_DBG_OPCODE_RW; } if (opcode & QLA82XX_DBG_OPCODE_AND) { read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); read_value &= crb_entry->value_2; opcode &= ~QLA82XX_DBG_OPCODE_AND; if (opcode & QLA82XX_DBG_OPCODE_OR) { read_value |= crb_entry->value_3; opcode &= ~QLA82XX_DBG_OPCODE_OR; } qla82xx_md_rw_32(ha, crb_addr, read_value, 1); } if (opcode & QLA82XX_DBG_OPCODE_OR) { read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); read_value |= crb_entry->value_3; qla82xx_md_rw_32(ha, crb_addr, read_value, 1); opcode &= ~QLA82XX_DBG_OPCODE_OR; } if (opcode & QLA82XX_DBG_OPCODE_POLL) { poll_time = crb_entry->crb_strd.poll_timeout; wtime = jiffies + poll_time; read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); do { if ((read_value & crb_entry->value_2) == crb_entry->value_1) break; else if (time_after_eq(jiffies, wtime)) { /* capturing dump failed */ rval = QLA_FUNCTION_FAILED; break; } else read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); } while (1); opcode &= ~QLA82XX_DBG_OPCODE_POLL; } if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { if (crb_entry->crb_strd.state_index_a) { index = crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else addr = crb_addr; read_value = qla82xx_md_rw_32(ha, addr, 0, 0); index = crb_entry->crb_ctrl.state_index_v; tmplt_hdr->saved_state_array[index] = read_value; opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; } if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { if (crb_entry->crb_strd.state_index_a) { index = crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else addr = crb_addr; if (crb_entry->crb_ctrl.state_index_v) { index = crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; } else read_value = crb_entry->value_1; qla82xx_md_rw_32(ha, addr, read_value, 1); opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; } if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { index = crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; read_value <<= crb_entry->crb_ctrl.shl; read_value >>= crb_entry->crb_ctrl.shr; if (crb_entry->value_2) read_value &= crb_entry->value_2; read_value |= crb_entry->value_3; read_value += crb_entry->value_1; tmplt_hdr->saved_state_array[index] = read_value; opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; } crb_addr += crb_entry->crb_strd.addr_stride; } return rval; } static void qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla82xx_md_entry_rdocm *ocm_hdr; __le32 *data_ptr = *d_ptr; ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr; r_addr = ocm_hdr->read_addr; r_stride = ocm_hdr->read_addr_stride; loop_cnt = ocm_hdr->op_count; for (i = 0; i < loop_cnt; i++) { r_value = rd_reg_dword(r_addr + ha->nx_pcibase); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; struct qla82xx_md_entry_mux *mux_hdr; __le32 *data_ptr = *d_ptr; mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr; r_addr = mux_hdr->read_addr; s_addr = mux_hdr->select_addr; s_stride = mux_hdr->select_value_stride; s_value = mux_hdr->select_value; loop_cnt = mux_hdr->op_count; for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, s_addr, s_value, 1); r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); *data_ptr++ = cpu_to_le32(s_value); *data_ptr++ = cpu_to_le32(r_value); s_value += s_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla82xx_md_entry_crb *crb_hdr; __le32 *data_ptr = *d_ptr; crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr; r_addr = crb_hdr->addr; r_stride = crb_hdr->crb_strd.addr_stride; loop_cnt = crb_hdr->op_count; for (i = 0; i < loop_cnt; i++) { r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); *data_ptr++ = cpu_to_le32(r_addr); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } *d_ptr = data_ptr; } static int qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; unsigned long p_wait, w_time, p_mask; uint32_t c_value_w, c_value_r; struct qla82xx_md_entry_cache *cache_hdr; int rval = QLA_FUNCTION_FAILED; __le32 *data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = cache_hdr->addr_ctrl.init_tag_value; r_cnt = cache_hdr->read_ctrl.read_addr_cnt; p_wait = cache_hdr->cache_ctrl.poll_wait; p_mask = cache_hdr->cache_ctrl.poll_mask; for (i = 0; i < loop_count; i++) { qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); if (c_value_w) qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); if (p_mask) { w_time = jiffies + p_wait; do { c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0); if ((c_value_r & p_mask) == 0) break; else if (time_after_eq(jiffies, w_time)) { /* capturing dump failed */ ql_dbg(ql_dbg_p3p, vha, 0xb032, "c_value_r: 0x%x, poll_mask: 0x%lx, " "w_time: 0x%lx\n", c_value_r, p_mask, w_time); return rval; } } while (1); } addr = r_addr; for (k = 0; k < r_cnt; k++) { r_value = qla82xx_md_rw_32(ha, addr, 0, 0); *data_ptr++ = cpu_to_le32(r_value); addr += cache_hdr->read_ctrl.read_addr_stride; } t_value += cache_hdr->addr_ctrl.tag_value_stride; } *d_ptr = data_ptr; return QLA_SUCCESS; } static void qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; uint32_t c_value_w; struct qla82xx_md_entry_cache *cache_hdr; __le32 *data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = cache_hdr->addr_ctrl.init_tag_value; r_cnt = cache_hdr->read_ctrl.read_addr_cnt; for (i = 0; i < loop_count; i++) { qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); addr = r_addr; for (k = 0; k < r_cnt; k++) { r_value = qla82xx_md_rw_32(ha, addr, 0, 0); *data_ptr++ = cpu_to_le32(r_value); addr += cache_hdr->read_ctrl.read_addr_stride; } t_value += cache_hdr->addr_ctrl.tag_value_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_queue(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t s_addr, r_addr; uint32_t r_stride, r_value, r_cnt, qid = 0; uint32_t i, k, loop_cnt; struct qla82xx_md_entry_queue *q_hdr; __le32 *data_ptr = *d_ptr; q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr; s_addr = q_hdr->select_addr; r_cnt = q_hdr->rd_strd.read_addr_cnt; r_stride = q_hdr->rd_strd.read_addr_stride; loop_cnt = q_hdr->op_count; for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, s_addr, qid, 1); r_addr = q_hdr->read_addr; for (k = 0; k < r_cnt; k++) { r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } qid += q_hdr->q_strd.queue_id_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_value; uint32_t i, loop_cnt; struct qla82xx_md_entry_rdrom *rom_hdr; __le32 *data_ptr = *d_ptr; rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr; r_addr = rom_hdr->read_addr; loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (r_addr & 0xFFFF0000), 1); r_value = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF), 0, 0); *data_ptr++ = cpu_to_le32(r_value); r_addr += sizeof(uint32_t); } *d_ptr = data_ptr; } static int qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_value, r_data; uint32_t i, j, loop_cnt; struct qla82xx_md_entry_rdmem *m_hdr; unsigned long flags; int rval = QLA_FUNCTION_FAILED; __le32 *data_ptr = *d_ptr; m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr; r_addr = m_hdr->read_addr; loop_cnt = m_hdr->read_data_size/16; if (r_addr & 0xf) { ql_log(ql_log_warn, vha, 0xb033, "Read addr 0x%x not 16 bytes aligned\n", r_addr); return rval; } if (m_hdr->read_data_size % 16) { ql_log(ql_log_warn, vha, 0xb034, "Read data[0x%x] not multiple of 16 bytes\n", m_hdr->read_data_size); return rval; } ql_dbg(ql_dbg_p3p, vha, 0xb035, "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", __func__, r_addr, m_hdr->read_data_size, loop_cnt); write_lock_irqsave(&ha->hw_lock, flags); for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1); r_value = 0; qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1); r_value = MIU_TA_CTL_ENABLE; qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); for (j = 0; j < MAX_CTL_CHECK; j++) { r_value = qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, 0, 0); if ((r_value & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { printk_ratelimited(KERN_ERR "failed to read through agent\n"); write_unlock_irqrestore(&ha->hw_lock, flags); return rval; } for (j = 0; j < 4; j++) { r_data = qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_RDDATA[j], 0, 0); *data_ptr++ = cpu_to_le32(r_data); } r_addr += 16; } write_unlock_irqrestore(&ha->hw_lock, flags); *d_ptr = data_ptr; return QLA_SUCCESS; } int qla82xx_validate_template_chksum(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint64_t chksum = 0; uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr; int count = ha->md_template_size/sizeof(uint32_t); while (count-- > 0) chksum += *d_ptr++; while (chksum >> 32) chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32); return ~chksum; } static void qla82xx_mark_entry_skipped(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, int index) { entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; ql_dbg(ql_dbg_p3p, vha, 0xb036, "Skipping entry[%d]: " "ETYPE[0x%x]-ELEVEL[0x%x]\n", index, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); } int qla82xx_md_collect(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int no_entry_hdr = 0; qla82xx_md_entry_hdr_t *entry_hdr; struct qla82xx_md_template_hdr *tmplt_hdr; __le32 *data_ptr; uint32_t total_data_size = 0, f_capture_mask, data_collected = 0; int i = 0, rval = QLA_FUNCTION_FAILED; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; data_ptr = ha->md_dump; if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xb037, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); goto md_failed; } ha->fw_dumped = false; if (!ha->md_tmplt_hdr || !ha->md_dump) { ql_log(ql_log_warn, vha, 0xb038, "Memory not allocated for minidump capture\n"); goto md_failed; } if (ha->flags.isp82xx_no_md_cap) { ql_log(ql_log_warn, vha, 0xb054, "Forced reset from application, " "ignore minidump capture\n"); ha->flags.isp82xx_no_md_cap = 0; goto md_failed; } if (qla82xx_validate_template_chksum(vha)) { ql_log(ql_log_info, vha, 0xb039, "Template checksum validation error\n"); goto md_failed; } no_entry_hdr = tmplt_hdr->num_of_entries; ql_dbg(ql_dbg_p3p, vha, 0xb03a, "No of entry headers in Template: 0x%x\n", no_entry_hdr); ql_dbg(ql_dbg_p3p, vha, 0xb03b, "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; /* Validate whether required debug level is set */ if ((f_capture_mask & 0x3) != 0x3) { ql_log(ql_log_warn, vha, 0xb03c, "Minimum required capture mask[0x%x] level not set\n", f_capture_mask); goto md_failed; } tmplt_hdr->driver_capture_mask = ql2xmdcapmask; tmplt_hdr->driver_info[0] = vha->host_no; tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) | (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) | QLA_DRIVER_BETA_VER; total_data_size = ha->md_dump_size; ql_dbg(ql_dbg_p3p, vha, 0xb03d, "Total minidump data_size 0x%x to be captured\n", total_data_size); /* Check whether template obtained is valid */ if (tmplt_hdr->entry_type != QLA82XX_TLHDR) { ql_log(ql_log_warn, vha, 0xb04e, "Bad template header entry type: 0x%x obtained\n", tmplt_hdr->entry_type); goto md_failed; } entry_hdr = (qla82xx_md_entry_hdr_t *) (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); /* Walk through the entry headers */ for (i = 0; i < no_entry_hdr; i++) { if (data_collected > total_data_size) { ql_log(ql_log_warn, vha, 0xb03e, "More MiniDump data collected: [0x%x]\n", data_collected); goto md_failed; } if (!(entry_hdr->d_ctrl.entry_capture_mask & ql2xmdcapmask)) { entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; ql_dbg(ql_dbg_p3p, vha, 0xb03f, "Skipping entry[%d]: " "ETYPE[0x%x]-ELEVEL[0x%x]\n", i, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); goto skip_nxt_entry; } ql_dbg(ql_dbg_p3p, vha, 0xb040, "[%s]: data ptr[%d]: %p, entry_hdr: %p\n" "entry_type: 0x%x, capture_mask: 0x%x\n", __func__, i, data_ptr, entry_hdr, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); ql_dbg(ql_dbg_p3p, vha, 0xb041, "Data collected: [0x%x], Dump size left:[0x%x]\n", data_collected, (ha->md_dump_size - data_collected)); /* Decode the entry type and take * required action to capture debug data */ switch (entry_hdr->entry_type) { case QLA82XX_RDEND: qla82xx_mark_entry_skipped(vha, entry_hdr, i); break; case QLA82XX_CNTRL: rval = qla82xx_minidump_process_control(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_RDCRB: qla82xx_minidump_process_rdcrb(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDMEM: rval = qla82xx_minidump_process_rdmem(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_BOARD: case QLA82XX_RDROM: qla82xx_minidump_process_rdrom(vha, entry_hdr, &data_ptr); break; case QLA82XX_L2DTG: case QLA82XX_L2ITG: case QLA82XX_L2DAT: case QLA82XX_L2INS: rval = qla82xx_minidump_process_l2tag(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_L1DAT: case QLA82XX_L1INS: qla82xx_minidump_process_l1cache(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDOCM: qla82xx_minidump_process_rdocm(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDMUX: qla82xx_minidump_process_rdmux(vha, entry_hdr, &data_ptr); break; case QLA82XX_QUEUE: qla82xx_minidump_process_queue(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDNOP: default: qla82xx_mark_entry_skipped(vha, entry_hdr, i); break; } ql_dbg(ql_dbg_p3p, vha, 0xb042, "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr); data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->md_dump; skip_nxt_entry: entry_hdr = (qla82xx_md_entry_hdr_t *) (((uint8_t *)entry_hdr) + entry_hdr->entry_size); } if (data_collected != total_data_size) { ql_dbg(ql_dbg_p3p, vha, 0xb043, "MiniDump data mismatch: Data collected: [0x%x]," "total_data_size:[0x%x]\n", data_collected, total_data_size); goto md_failed; } ql_log(ql_log_info, vha, 0xb044, "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); ha->fw_dumped = true; qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); md_failed: return rval; } int qla82xx_md_alloc(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int i, k; struct qla82xx_md_template_hdr *tmplt_hdr; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) { ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF; ql_log(ql_log_info, vha, 0xb045, "Forcing driver capture mask to firmware default capture mask: 0x%x.\n", ql2xmdcapmask); } for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) { if (i & ql2xmdcapmask) ha->md_dump_size += tmplt_hdr->capture_size_array[k]; } if (ha->md_dump) { ql_log(ql_log_warn, vha, 0xb046, "Firmware dump previously allocated.\n"); return 1; } ha->md_dump = vmalloc(ha->md_dump_size); if (ha->md_dump == NULL) { ql_log(ql_log_warn, vha, 0xb047, "Unable to allocate memory for Minidump size " "(0x%x).\n", ha->md_dump_size); return 1; } return 0; } void qla82xx_md_free(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; /* Release the template header allocated */ if (ha->md_tmplt_hdr) { ql_log(ql_log_info, vha, 0xb048, "Free MiniDump template: %p, size (%d KB)\n", ha->md_tmplt_hdr, ha->md_template_size / 1024); dma_free_coherent(&ha->pdev->dev, ha->md_template_size, ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); ha->md_tmplt_hdr = NULL; } /* Release the template data buffer allocated */ if (ha->md_dump) { ql_log(ql_log_info, vha, 0xb049, "Free MiniDump memory: %p, size (%d KB)\n", ha->md_dump, ha->md_dump_size / 1024); vfree(ha->md_dump); ha->md_dump_size = 0; ha->md_dump = NULL; } } void qla82xx_md_prep(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int rval; /* Get Minidump template size */ rval = qla82xx_md_get_template_size(vha); if (rval == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0xb04a, "MiniDump Template size obtained (%d KB)\n", ha->md_template_size / 1024); /* Get Minidump template */ if (IS_QLA8044(ha)) rval = qla8044_md_get_template(vha); else rval = qla82xx_md_get_template(vha); if (rval == QLA_SUCCESS) { ql_dbg(ql_dbg_p3p, vha, 0xb04b, "MiniDump Template obtained\n"); /* Allocate memory for minidump */ rval = qla82xx_md_alloc(vha); if (rval == QLA_SUCCESS) ql_log(ql_log_info, vha, 0xb04c, "MiniDump memory allocated (%d KB)\n", ha->md_dump_size / 1024); else { ql_log(ql_log_info, vha, 0xb04d, "Free MiniDump template: %p, size: (%d KB)\n", ha->md_tmplt_hdr, ha->md_template_size / 1024); dma_free_coherent(&ha->pdev->dev, ha->md_template_size, ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); ha->md_tmplt_hdr = NULL; } } } } int qla82xx_beacon_on(struct scsi_qla_host *vha) { int rval; struct qla_hw_data *ha = vha->hw; qla82xx_idc_lock(ha); rval = qla82xx_mbx_beacon_ctl(vha, 1); if (rval) { ql_log(ql_log_warn, vha, 0xb050, "mbx set led config failed in %s\n", __func__); goto exit; } ha->beacon_blink_led = 1; exit: qla82xx_idc_unlock(ha); return rval; } int qla82xx_beacon_off(struct scsi_qla_host *vha) { int rval; struct qla_hw_data *ha = vha->hw; qla82xx_idc_lock(ha); rval = qla82xx_mbx_beacon_ctl(vha, 0); if (rval) { ql_log(ql_log_warn, vha, 0xb051, "mbx set led config failed in %s\n", __func__); goto exit; } ha->beacon_blink_led = 0; exit: qla82xx_idc_unlock(ha); return rval; } void qla82xx_fw_dump(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (!ha->allow_cna_fw_dump) return; scsi_block_requests(vha->host); ha->flags.isp82xx_no_md_cap = 1; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); }
linux-master
drivers/scsi/qla2xxx/qla_nx.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include <linux/delay.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> /* * NVRAM support routines */ /** * qla2x00_lock_nvram_access() - * @ha: HA context */ static void qla2x00_lock_nvram_access(struct qla_hw_data *ha) { uint16_t data; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { data = rd_reg_word(&reg->nvram); while (data & NVR_BUSY) { udelay(100); data = rd_reg_word(&reg->nvram); } /* Lock resource */ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0x1); rd_reg_word(&reg->u.isp2300.host_semaphore); udelay(5); data = rd_reg_word(&reg->u.isp2300.host_semaphore); while ((data & BIT_0) == 0) { /* Lock failed */ udelay(100); wrt_reg_word(&reg->u.isp2300.host_semaphore, 0x1); rd_reg_word(&reg->u.isp2300.host_semaphore); udelay(5); data = rd_reg_word(&reg->u.isp2300.host_semaphore); } } } /** * qla2x00_unlock_nvram_access() - * @ha: HA context */ static void qla2x00_unlock_nvram_access(struct qla_hw_data *ha) { struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { wrt_reg_word(&reg->u.isp2300.host_semaphore, 0); rd_reg_word(&reg->u.isp2300.host_semaphore); } } /** * qla2x00_nv_write() - Prepare for NVRAM read/write operation. * @ha: HA context * @data: Serial interface selector */ static void qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data) { struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE); rd_reg_word(&reg->nvram); /* PCI Posting. */ NVRAM_DELAY(); wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_CLOCK | NVR_WRT_ENABLE); rd_reg_word(&reg->nvram); /* PCI Posting. */ NVRAM_DELAY(); wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE); rd_reg_word(&reg->nvram); /* PCI Posting. */ NVRAM_DELAY(); } /** * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from * NVRAM. * @ha: HA context * @nv_cmd: NVRAM command * * Bit definitions for NVRAM command: * * Bit 26 = start bit * Bit 25, 24 = opcode * Bit 23-16 = address * Bit 15-0 = write data * * Returns the word read from nvram @addr. */ static uint16_t qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd) { uint8_t cnt; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint16_t data = 0; uint16_t reg_data; /* Send command to NVRAM. */ nv_cmd <<= 5; for (cnt = 0; cnt < 11; cnt++) { if (nv_cmd & BIT_31) qla2x00_nv_write(ha, NVR_DATA_OUT); else qla2x00_nv_write(ha, 0); nv_cmd <<= 1; } /* Read data from NVRAM. */ for (cnt = 0; cnt < 16; cnt++) { wrt_reg_word(&reg->nvram, NVR_SELECT | NVR_CLOCK); rd_reg_word(&reg->nvram); /* PCI Posting. */ NVRAM_DELAY(); data <<= 1; reg_data = rd_reg_word(&reg->nvram); if (reg_data & NVR_DATA_IN) data |= BIT_0; wrt_reg_word(&reg->nvram, NVR_SELECT); rd_reg_word(&reg->nvram); /* PCI Posting. */ NVRAM_DELAY(); } /* Deselect chip. */ wrt_reg_word(&reg->nvram, NVR_DESELECT); rd_reg_word(&reg->nvram); /* PCI Posting. */ NVRAM_DELAY(); return data; } /** * qla2x00_get_nvram_word() - Calculates word position in NVRAM and calls the * request routine to get the word from NVRAM. * @ha: HA context * @addr: Address in NVRAM to read * * Returns the word read from nvram @addr. */ static uint16_t qla2x00_get_nvram_word(struct qla_hw_data *ha, uint32_t addr) { uint16_t data; uint32_t nv_cmd; nv_cmd = addr << 16; nv_cmd |= NV_READ_OP; data = qla2x00_nvram_request(ha, nv_cmd); return (data); } /** * qla2x00_nv_deselect() - Deselect NVRAM operations. * @ha: HA context */ static void qla2x00_nv_deselect(struct qla_hw_data *ha) { struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; wrt_reg_word(&reg->nvram, NVR_DESELECT); rd_reg_word(&reg->nvram); /* PCI Posting. */ NVRAM_DELAY(); } /** * qla2x00_write_nvram_word() - Write NVRAM data. * @ha: HA context * @addr: Address in NVRAM to write * @data: word to program */ static void qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, __le16 data) { int count; uint16_t word; uint32_t nv_cmd, wait_cnt; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla2x00_nv_write(ha, NVR_DATA_OUT); qla2x00_nv_write(ha, 0); qla2x00_nv_write(ha, 0); for (word = 0; word < 8; word++) qla2x00_nv_write(ha, NVR_DATA_OUT); qla2x00_nv_deselect(ha); /* Write data */ nv_cmd = (addr << 16) | NV_WRITE_OP; nv_cmd |= (__force u16)data; nv_cmd <<= 5; for (count = 0; count < 27; count++) { if (nv_cmd & BIT_31) qla2x00_nv_write(ha, NVR_DATA_OUT); else qla2x00_nv_write(ha, 0); nv_cmd <<= 1; } qla2x00_nv_deselect(ha); /* Wait for NVRAM to become ready */ wrt_reg_word(&reg->nvram, NVR_SELECT); rd_reg_word(&reg->nvram); /* PCI Posting. */ wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { ql_dbg(ql_dbg_user, vha, 0x708d, "NVRAM didn't go ready...\n"); break; } NVRAM_DELAY(); word = rd_reg_word(&reg->nvram); } while ((word & NVR_DATA_IN) == 0); qla2x00_nv_deselect(ha); /* Disable writes */ qla2x00_nv_write(ha, NVR_DATA_OUT); for (count = 0; count < 10; count++) qla2x00_nv_write(ha, 0); qla2x00_nv_deselect(ha); } static int qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr, __le16 data, uint32_t tmo) { int ret, count; uint16_t word; uint32_t nv_cmd; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; ret = QLA_SUCCESS; qla2x00_nv_write(ha, NVR_DATA_OUT); qla2x00_nv_write(ha, 0); qla2x00_nv_write(ha, 0); for (word = 0; word < 8; word++) qla2x00_nv_write(ha, NVR_DATA_OUT); qla2x00_nv_deselect(ha); /* Write data */ nv_cmd = (addr << 16) | NV_WRITE_OP; nv_cmd |= (__force u16)data; nv_cmd <<= 5; for (count = 0; count < 27; count++) { if (nv_cmd & BIT_31) qla2x00_nv_write(ha, NVR_DATA_OUT); else qla2x00_nv_write(ha, 0); nv_cmd <<= 1; } qla2x00_nv_deselect(ha); /* Wait for NVRAM to become ready */ wrt_reg_word(&reg->nvram, NVR_SELECT); rd_reg_word(&reg->nvram); /* PCI Posting. */ do { NVRAM_DELAY(); word = rd_reg_word(&reg->nvram); if (!--tmo) { ret = QLA_FUNCTION_FAILED; break; } } while ((word & NVR_DATA_IN) == 0); qla2x00_nv_deselect(ha); /* Disable writes */ qla2x00_nv_write(ha, NVR_DATA_OUT); for (count = 0; count < 10; count++) qla2x00_nv_write(ha, 0); qla2x00_nv_deselect(ha); return ret; } /** * qla2x00_clear_nvram_protection() - * @ha: HA context */ static int qla2x00_clear_nvram_protection(struct qla_hw_data *ha) { int ret, stat; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint32_t word, wait_cnt; __le16 wprot, wprot_old; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); /* Clear NVRAM write protection. */ ret = QLA_FUNCTION_FAILED; wprot_old = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base)); stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base, cpu_to_le16(0x1234), 100000); wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base)); if (stat != QLA_SUCCESS || wprot != cpu_to_le16(0x1234)) { /* Write enable. */ qla2x00_nv_write(ha, NVR_DATA_OUT); qla2x00_nv_write(ha, 0); qla2x00_nv_write(ha, 0); for (word = 0; word < 8; word++) qla2x00_nv_write(ha, NVR_DATA_OUT); qla2x00_nv_deselect(ha); /* Enable protection register. */ qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); qla2x00_nv_write(ha, NVR_PR_ENABLE); qla2x00_nv_write(ha, NVR_PR_ENABLE); for (word = 0; word < 8; word++) qla2x00_nv_write(ha, NVR_DATA_OUT | NVR_PR_ENABLE); qla2x00_nv_deselect(ha); /* Clear protection register (ffff is cleared). */ qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); for (word = 0; word < 8; word++) qla2x00_nv_write(ha, NVR_DATA_OUT | NVR_PR_ENABLE); qla2x00_nv_deselect(ha); /* Wait for NVRAM to become ready. */ wrt_reg_word(&reg->nvram, NVR_SELECT); rd_reg_word(&reg->nvram); /* PCI Posting. */ wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { ql_dbg(ql_dbg_user, vha, 0x708e, "NVRAM didn't go ready...\n"); break; } NVRAM_DELAY(); word = rd_reg_word(&reg->nvram); } while ((word & NVR_DATA_IN) == 0); if (wait_cnt) ret = QLA_SUCCESS; } else qla2x00_write_nvram_word(ha, ha->nvram_base, wprot_old); return ret; } static void qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat) { struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint32_t word, wait_cnt; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (stat != QLA_SUCCESS) return; /* Set NVRAM write protection. */ /* Write enable. */ qla2x00_nv_write(ha, NVR_DATA_OUT); qla2x00_nv_write(ha, 0); qla2x00_nv_write(ha, 0); for (word = 0; word < 8; word++) qla2x00_nv_write(ha, NVR_DATA_OUT); qla2x00_nv_deselect(ha); /* Enable protection register. */ qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); qla2x00_nv_write(ha, NVR_PR_ENABLE); qla2x00_nv_write(ha, NVR_PR_ENABLE); for (word = 0; word < 8; word++) qla2x00_nv_write(ha, NVR_DATA_OUT | NVR_PR_ENABLE); qla2x00_nv_deselect(ha); /* Enable protection register. */ qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); qla2x00_nv_write(ha, NVR_PR_ENABLE); qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT); for (word = 0; word < 8; word++) qla2x00_nv_write(ha, NVR_PR_ENABLE); qla2x00_nv_deselect(ha); /* Wait for NVRAM to become ready. */ wrt_reg_word(&reg->nvram, NVR_SELECT); rd_reg_word(&reg->nvram); /* PCI Posting. */ wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { ql_dbg(ql_dbg_user, vha, 0x708f, "NVRAM didn't go ready...\n"); break; } NVRAM_DELAY(); word = rd_reg_word(&reg->nvram); } while ((word & NVR_DATA_IN) == 0); } /*****************************************************************************/ /* Flash Manipulation Routines */ /*****************************************************************************/ static inline uint32_t flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr) { return ha->flash_conf_off + faddr; } static inline uint32_t flash_data_addr(struct qla_hw_data *ha, uint32_t faddr) { return ha->flash_data_off + faddr; } static inline uint32_t nvram_conf_addr(struct qla_hw_data *ha, uint32_t naddr) { return ha->nvram_conf_off + naddr; } static inline uint32_t nvram_data_addr(struct qla_hw_data *ha, uint32_t naddr) { return ha->nvram_data_off + naddr; } static int qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data) { struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; ulong cnt = 30000; wrt_reg_dword(&reg->flash_addr, addr & ~FARX_DATA_FLAG); while (cnt--) { if (rd_reg_dword(&reg->flash_addr) & FARX_DATA_FLAG) { *data = rd_reg_dword(&reg->flash_data); return QLA_SUCCESS; } udelay(10); cond_resched(); } ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090, "Flash read dword at %x timeout.\n", addr); *data = 0xDEADDEAD; return QLA_FUNCTION_TIMEOUT; } int qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t dwords) { ulong i; int ret = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; /* Dword reads to flash. */ faddr = flash_data_addr(ha, faddr); for (i = 0; i < dwords; i++, faddr++, dwptr++) { ret = qla24xx_read_flash_dword(ha, faddr, dwptr); if (ret != QLA_SUCCESS) break; cpu_to_le32s(dwptr); } return ret; } static int qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data) { struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; ulong cnt = 500000; wrt_reg_dword(&reg->flash_data, data); wrt_reg_dword(&reg->flash_addr, addr | FARX_DATA_FLAG); while (cnt--) { if (!(rd_reg_dword(&reg->flash_addr) & FARX_DATA_FLAG)) return QLA_SUCCESS; udelay(10); cond_resched(); } ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090, "Flash write dword at %x timeout.\n", addr); return QLA_FUNCTION_TIMEOUT; } static void qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id, uint8_t *flash_id) { uint32_t faddr, ids = 0; *man_id = *flash_id = 0; faddr = flash_conf_addr(ha, 0x03ab); if (!qla24xx_read_flash_dword(ha, faddr, &ids)) { *man_id = LSB(ids); *flash_id = MSB(ids); } /* Check if man_id and flash_id are valid. */ if (ids != 0xDEADDEAD && (*man_id == 0 || *flash_id == 0)) { /* Read information using 0x9f opcode * Device ID, Mfg ID would be read in the format: * <Ext Dev Info><Device ID Part2><Device ID Part 1><Mfg ID> * Example: ATMEL 0x00 01 45 1F * Extract MFG and Dev ID from last two bytes. */ faddr = flash_conf_addr(ha, 0x009f); if (!qla24xx_read_flash_dword(ha, faddr, &ids)) { *man_id = LSB(ids); *flash_id = MSB(ids); } } } static int qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) { const char *loc, *locations[] = { "DEF", "PCI" }; uint32_t pcihdr, pcids; uint16_t cnt, chksum; __le16 *wptr; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct qla_flt_location *fltl = (void *)req->ring; uint32_t *dcode = (uint32_t *)req->ring; uint8_t *buf = (void *)req->ring, *bcode, last_image; /* * FLT-location structure resides after the last PCI region. */ /* Begin with sane defaults. */ loc = locations[0]; *start = 0; if (IS_QLA24XX_TYPE(ha)) *start = FA_FLASH_LAYOUT_ADDR_24; else if (IS_QLA25XX(ha)) *start = FA_FLASH_LAYOUT_ADDR; else if (IS_QLA81XX(ha)) *start = FA_FLASH_LAYOUT_ADDR_81; else if (IS_P3P_TYPE(ha)) { *start = FA_FLASH_LAYOUT_ADDR_82; goto end; } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { *start = FA_FLASH_LAYOUT_ADDR_83; goto end; } else if (IS_QLA28XX(ha)) { *start = FA_FLASH_LAYOUT_ADDR_28; goto end; } /* Begin with first PCI expansion ROM header. */ pcihdr = 0; do { /* Verify PCI expansion ROM header. */ qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); bcode = buf + (pcihdr % 4); if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) goto end; /* Locate PCI data structure. */ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); bcode = buf + (pcihdr % 4); /* Validate signature of PCI data structure. */ if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || bcode[0x2] != 'I' || bcode[0x3] != 'R') goto end; last_image = bcode[0x15] & BIT_7; /* Locate next PCI expansion ROM. */ pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512; } while (!last_image); /* Now verify FLT-location structure. */ qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2); if (memcmp(fltl->sig, "QFLT", 4)) goto end; wptr = (__force __le16 *)req->ring; cnt = sizeof(*fltl) / sizeof(*wptr); for (chksum = 0; cnt--; wptr++) chksum += le16_to_cpu(*wptr); if (chksum) { ql_log(ql_log_fatal, vha, 0x0045, "Inconsistent FLTL detected: checksum=0x%x.\n", chksum); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e, fltl, sizeof(*fltl)); return QLA_FUNCTION_FAILED; } /* Good data. Use specified location. */ loc = locations[1]; *start = (le16_to_cpu(fltl->start_hi) << 16 | le16_to_cpu(fltl->start_lo)) >> 2; end: ql_dbg(ql_dbg_init, vha, 0x0046, "FLTL[%s] = 0x%x.\n", loc, *start); return QLA_SUCCESS; } static void qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) { const char *locations[] = { "DEF", "FLT" }, *loc = locations[1]; const uint32_t def_fw[] = { FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR_81 }; const uint32_t def_boot[] = { FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR_81 }; const uint32_t def_vpd_nvram[] = { FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR_81 }; const uint32_t def_vpd0[] = { 0, 0, FA_VPD0_ADDR_81 }; const uint32_t def_vpd1[] = { 0, 0, FA_VPD1_ADDR_81 }; const uint32_t def_nvram0[] = { 0, 0, FA_NVRAM0_ADDR_81 }; const uint32_t def_nvram1[] = { 0, 0, FA_NVRAM1_ADDR_81 }; const uint32_t def_fdt[] = { FA_FLASH_DESCR_ADDR_24, FA_FLASH_DESCR_ADDR, FA_FLASH_DESCR_ADDR_81 }; const uint32_t def_npiv_conf0[] = { FA_NPIV_CONF0_ADDR_24, FA_NPIV_CONF0_ADDR, FA_NPIV_CONF0_ADDR_81 }; const uint32_t def_npiv_conf1[] = { FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR, FA_NPIV_CONF1_ADDR_81 }; const uint32_t fcp_prio_cfg0[] = { FA_FCP_PRIO0_ADDR, FA_FCP_PRIO0_ADDR_25, 0 }; const uint32_t fcp_prio_cfg1[] = { FA_FCP_PRIO1_ADDR, FA_FCP_PRIO1_ADDR_25, 0 }; struct qla_hw_data *ha = vha->hw; uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0; struct qla_flt_header *flt = ha->flt; struct qla_flt_region *region = &flt->region[0]; __le16 *wptr; uint16_t cnt, chksum; uint32_t start; /* Assign FCP prio region since older adapters may not have FLT, or FCP prio region in it's FLT. */ ha->flt_region_fcp_prio = (ha->port_no == 0) ? fcp_prio_cfg0[def] : fcp_prio_cfg1[def]; ha->flt_region_flt = flt_addr; wptr = (__force __le16 *)ha->flt; ha->isp_ops->read_optrom(vha, flt, flt_addr << 2, (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE)); if (le16_to_cpu(*wptr) == 0xffff) goto no_flash_data; if (flt->version != cpu_to_le16(1)) { ql_log(ql_log_warn, vha, 0x0047, "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", le16_to_cpu(flt->version), le16_to_cpu(flt->length), le16_to_cpu(flt->checksum)); goto no_flash_data; } cnt = (sizeof(*flt) + le16_to_cpu(flt->length)) / sizeof(*wptr); for (chksum = 0; cnt--; wptr++) chksum += le16_to_cpu(*wptr); if (chksum) { ql_log(ql_log_fatal, vha, 0x0048, "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", le16_to_cpu(flt->version), le16_to_cpu(flt->length), le16_to_cpu(flt->checksum)); goto no_flash_data; } cnt = le16_to_cpu(flt->length) / sizeof(*region); for ( ; cnt; cnt--, region++) { /* Store addresses as DWORD offsets. */ start = le32_to_cpu(region->start) >> 2; ql_dbg(ql_dbg_init, vha, 0x0049, "FLT[%#x]: start=%#x end=%#x size=%#x.\n", le16_to_cpu(region->code), start, le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size) >> 2); if (region->attribute) ql_log(ql_dbg_init, vha, 0xffff, "Region %x is secure\n", region->code); switch (le16_to_cpu(region->code)) { case FLT_REG_FCOE_FW: if (!IS_QLA8031(ha)) break; ha->flt_region_fw = start; break; case FLT_REG_FW: if (IS_QLA8031(ha)) break; ha->flt_region_fw = start; break; case FLT_REG_BOOT_CODE: ha->flt_region_boot = start; break; case FLT_REG_VPD_0: if (IS_QLA8031(ha)) break; ha->flt_region_vpd_nvram = start; if (IS_P3P_TYPE(ha)) break; if (ha->port_no == 0) ha->flt_region_vpd = start; break; case FLT_REG_VPD_1: if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) break; if (ha->port_no == 1) ha->flt_region_vpd = start; break; case FLT_REG_VPD_2: if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) break; if (ha->port_no == 2) ha->flt_region_vpd = start; break; case FLT_REG_VPD_3: if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) break; if (ha->port_no == 3) ha->flt_region_vpd = start; break; case FLT_REG_NVRAM_0: if (IS_QLA8031(ha)) break; if (ha->port_no == 0) ha->flt_region_nvram = start; break; case FLT_REG_NVRAM_1: if (IS_QLA8031(ha)) break; if (ha->port_no == 1) ha->flt_region_nvram = start; break; case FLT_REG_NVRAM_2: if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) break; if (ha->port_no == 2) ha->flt_region_nvram = start; break; case FLT_REG_NVRAM_3: if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) break; if (ha->port_no == 3) ha->flt_region_nvram = start; break; case FLT_REG_FDT: ha->flt_region_fdt = start; break; case FLT_REG_NPIV_CONF_0: if (ha->port_no == 0) ha->flt_region_npiv_conf = start; break; case FLT_REG_NPIV_CONF_1: if (ha->port_no == 1) ha->flt_region_npiv_conf = start; break; case FLT_REG_GOLD_FW: ha->flt_region_gold_fw = start; break; case FLT_REG_FCP_PRIO_0: if (ha->port_no == 0) ha->flt_region_fcp_prio = start; break; case FLT_REG_FCP_PRIO_1: if (ha->port_no == 1) ha->flt_region_fcp_prio = start; break; case FLT_REG_BOOT_CODE_82XX: ha->flt_region_boot = start; break; case FLT_REG_BOOT_CODE_8044: if (IS_QLA8044(ha)) ha->flt_region_boot = start; break; case FLT_REG_FW_82XX: ha->flt_region_fw = start; break; case FLT_REG_CNA_FW: if (IS_CNA_CAPABLE(ha)) ha->flt_region_fw = start; break; case FLT_REG_GOLD_FW_82XX: ha->flt_region_gold_fw = start; break; case FLT_REG_BOOTLOAD_82XX: ha->flt_region_bootload = start; break; case FLT_REG_VPD_8XXX: if (IS_CNA_CAPABLE(ha)) ha->flt_region_vpd = start; break; case FLT_REG_FCOE_NVRAM_0: if (!(IS_QLA8031(ha) || IS_QLA8044(ha))) break; if (ha->port_no == 0) ha->flt_region_nvram = start; break; case FLT_REG_FCOE_NVRAM_1: if (!(IS_QLA8031(ha) || IS_QLA8044(ha))) break; if (ha->port_no == 1) ha->flt_region_nvram = start; break; case FLT_REG_IMG_PRI_27XX: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_img_status_pri = start; break; case FLT_REG_IMG_SEC_27XX: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_img_status_sec = start; break; case FLT_REG_FW_SEC_27XX: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_fw_sec = start; break; case FLT_REG_BOOTLOAD_SEC_27XX: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_boot_sec = start; break; case FLT_REG_AUX_IMG_PRI_28XX: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_aux_img_status_pri = start; break; case FLT_REG_AUX_IMG_SEC_28XX: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_aux_img_status_sec = start; break; case FLT_REG_NVRAM_SEC_28XX_0: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) if (ha->port_no == 0) ha->flt_region_nvram_sec = start; break; case FLT_REG_NVRAM_SEC_28XX_1: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) if (ha->port_no == 1) ha->flt_region_nvram_sec = start; break; case FLT_REG_NVRAM_SEC_28XX_2: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) if (ha->port_no == 2) ha->flt_region_nvram_sec = start; break; case FLT_REG_NVRAM_SEC_28XX_3: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) if (ha->port_no == 3) ha->flt_region_nvram_sec = start; break; case FLT_REG_VPD_SEC_27XX_0: case FLT_REG_VPD_SEC_28XX_0: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ha->flt_region_vpd_nvram_sec = start; if (ha->port_no == 0) ha->flt_region_vpd_sec = start; } break; case FLT_REG_VPD_SEC_27XX_1: case FLT_REG_VPD_SEC_28XX_1: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) if (ha->port_no == 1) ha->flt_region_vpd_sec = start; break; case FLT_REG_VPD_SEC_27XX_2: case FLT_REG_VPD_SEC_28XX_2: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) if (ha->port_no == 2) ha->flt_region_vpd_sec = start; break; case FLT_REG_VPD_SEC_27XX_3: case FLT_REG_VPD_SEC_28XX_3: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) if (ha->port_no == 3) ha->flt_region_vpd_sec = start; break; } } goto done; no_flash_data: /* Use hardcoded defaults. */ loc = locations[0]; ha->flt_region_fw = def_fw[def]; ha->flt_region_boot = def_boot[def]; ha->flt_region_vpd_nvram = def_vpd_nvram[def]; ha->flt_region_vpd = (ha->port_no == 0) ? def_vpd0[def] : def_vpd1[def]; ha->flt_region_nvram = (ha->port_no == 0) ? def_nvram0[def] : def_nvram1[def]; ha->flt_region_fdt = def_fdt[def]; ha->flt_region_npiv_conf = (ha->port_no == 0) ? def_npiv_conf0[def] : def_npiv_conf1[def]; done: ql_dbg(ql_dbg_init, vha, 0x004a, "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x nvram=0x%x " "fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n", loc, ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf, ha->flt_region_fcp_prio); } static void qla2xxx_get_fdt_info(scsi_qla_host_t *vha) { #define FLASH_BLK_SIZE_4K 0x1000 #define FLASH_BLK_SIZE_32K 0x8000 #define FLASH_BLK_SIZE_64K 0x10000 const char *loc, *locations[] = { "MID", "FDT" }; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; uint16_t cnt, chksum; __le16 *wptr = (__force __le16 *)req->ring; struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring; uint8_t man_id, flash_id; uint16_t mid = 0, fid = 0; ha->isp_ops->read_optrom(vha, fdt, ha->flt_region_fdt << 2, OPTROM_BURST_DWORDS); if (le16_to_cpu(*wptr) == 0xffff) goto no_flash_data; if (memcmp(fdt->sig, "QLID", 4)) goto no_flash_data; for (cnt = 0, chksum = 0; cnt < sizeof(*fdt) >> 1; cnt++, wptr++) chksum += le16_to_cpu(*wptr); if (chksum) { ql_dbg(ql_dbg_init, vha, 0x004c, "Inconsistent FDT detected:" " checksum=0x%x id=%c version0x%x.\n", chksum, fdt->sig[0], le16_to_cpu(fdt->version)); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113, fdt, sizeof(*fdt)); goto no_flash_data; } loc = locations[1]; mid = le16_to_cpu(fdt->man_id); fid = le16_to_cpu(fdt->id); ha->fdt_wrt_disable = fdt->wrt_disable_bits; ha->fdt_wrt_enable = fdt->wrt_enable_bits; ha->fdt_wrt_sts_reg_cmd = fdt->wrt_sts_reg_cmd; if (IS_QLA8044(ha)) ha->fdt_erase_cmd = fdt->erase_cmd; else ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0300 | fdt->erase_cmd); ha->fdt_block_size = le32_to_cpu(fdt->block_size); if (fdt->unprotect_sec_cmd) { ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 | fdt->unprotect_sec_cmd); ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ? flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd) : flash_conf_addr(ha, 0x0336); } goto done; no_flash_data: loc = locations[0]; if (IS_P3P_TYPE(ha)) { ha->fdt_block_size = FLASH_BLK_SIZE_64K; goto done; } qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); mid = man_id; fid = flash_id; ha->fdt_wrt_disable = 0x9c; ha->fdt_erase_cmd = flash_conf_addr(ha, 0x03d8); switch (man_id) { case 0xbf: /* STT flash. */ if (flash_id == 0x8e) ha->fdt_block_size = FLASH_BLK_SIZE_64K; else ha->fdt_block_size = FLASH_BLK_SIZE_32K; if (flash_id == 0x80) ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0352); break; case 0x13: /* ST M25P80. */ ha->fdt_block_size = FLASH_BLK_SIZE_64K; break; case 0x1f: /* Atmel 26DF081A. */ ha->fdt_block_size = FLASH_BLK_SIZE_4K; ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0320); ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0339); ha->fdt_protect_sec_cmd = flash_conf_addr(ha, 0x0336); break; default: /* Default to 64 kb sector size. */ ha->fdt_block_size = FLASH_BLK_SIZE_64K; break; } done: ql_dbg(ql_dbg_init, vha, 0x004d, "FDT[%s]: (0x%x/0x%x) erase=0x%x " "pr=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid, ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, ha->fdt_wrt_disable, ha->fdt_block_size); } static void qla2xxx_get_idc_param(scsi_qla_host_t *vha) { #define QLA82XX_IDC_PARAM_ADDR 0x003e885c __le32 *wptr; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; if (!(IS_P3P_TYPE(ha))) return; wptr = (__force __le32 *)req->ring; ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8); if (*wptr == cpu_to_le32(0xffffffff)) { ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT; ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT; } else { ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr); wptr++; ha->fcoe_reset_timeout = le32_to_cpu(*wptr); } ql_dbg(ql_dbg_init, vha, 0x004e, "fcoe_dev_init_timeout=%d " "fcoe_reset_timeout=%d.\n", ha->fcoe_dev_init_timeout, ha->fcoe_reset_timeout); return; } int qla2xxx_get_flash_info(scsi_qla_host_t *vha) { int ret; uint32_t flt_addr; struct qla_hw_data *ha = vha->hw; if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_SUCCESS; ret = qla2xxx_find_flt_start(vha, &flt_addr); if (ret != QLA_SUCCESS) return ret; qla2xxx_get_flt_info(vha, flt_addr); qla2xxx_get_fdt_info(vha); qla2xxx_get_idc_param(vha); return QLA_SUCCESS; } void qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) { #define NPIV_CONFIG_SIZE (16*1024) void *data; __le16 *wptr; uint16_t cnt, chksum; int i; struct qla_npiv_header hdr; struct qla_npiv_entry *entry; struct qla_hw_data *ha = vha->hw; if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) return; if (ha->flags.nic_core_reset_hdlr_active) return; if (IS_QLA8044(ha)) return; ha->isp_ops->read_optrom(vha, &hdr, ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); if (hdr.version == cpu_to_le16(0xffff)) return; if (hdr.version != cpu_to_le16(1)) { ql_dbg(ql_dbg_user, vha, 0x7090, "Unsupported NPIV-Config " "detected: version=0x%x entries=0x%x checksum=0x%x.\n", le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), le16_to_cpu(hdr.checksum)); return; } data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL); if (!data) { ql_log(ql_log_warn, vha, 0x7091, "Unable to allocate memory for data.\n"); return; } ha->isp_ops->read_optrom(vha, data, ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE); cnt = (sizeof(hdr) + le16_to_cpu(hdr.entries) * sizeof(*entry)) >> 1; for (wptr = data, chksum = 0; cnt--; wptr++) chksum += le16_to_cpu(*wptr); if (chksum) { ql_dbg(ql_dbg_user, vha, 0x7092, "Inconsistent NPIV-Config " "detected: version=0x%x entries=0x%x checksum=0x%x.\n", le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), le16_to_cpu(hdr.checksum)); goto done; } entry = data + sizeof(struct qla_npiv_header); cnt = le16_to_cpu(hdr.entries); for (i = 0; cnt; cnt--, entry++, i++) { uint16_t flags; struct fc_vport_identifiers vid; struct fc_vport *vport; memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); flags = le16_to_cpu(entry->flags); if (flags == 0xffff) continue; if ((flags & BIT_0) == 0) continue; memset(&vid, 0, sizeof(vid)); vid.roles = FC_PORT_ROLE_FCP_INITIATOR; vid.vport_type = FC_PORTTYPE_NPIV; vid.disable = false; vid.port_name = wwn_to_u64(entry->port_name); vid.node_name = wwn_to_u64(entry->node_name); ql_dbg(ql_dbg_user, vha, 0x7093, "NPIV[%02x]: wwpn=%llx wwnn=%llx vf_id=%#x Q_qos=%#x F_qos=%#x.\n", cnt, vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), entry->q_qos, entry->f_qos); if (i < QLA_PRECONFIG_VPORTS) { vport = fc_vport_create(vha->host, 0, &vid); if (!vport) ql_log(ql_log_warn, vha, 0x7094, "NPIV-Config Failed to create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt, vid.port_name, vid.node_name); } } done: kfree(data); } static int qla24xx_unprotect_flash(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; if (ha->flags.fac_supported) return qla81xx_fac_do_write_enable(vha, 1); /* Enable flash write. */ wrt_reg_dword(&reg->ctrl_status, rd_reg_dword(&reg->ctrl_status) | CSRX_FLASH_ENABLE); rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */ if (!ha->fdt_wrt_disable) goto done; /* Disable flash write-protection, first clear SR protection bit */ qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0); /* Then write zero again to clear remaining SR bits.*/ qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0); done: return QLA_SUCCESS; } static int qla24xx_protect_flash(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; ulong cnt = 300; uint32_t faddr, dword; if (ha->flags.fac_supported) return qla81xx_fac_do_write_enable(vha, 0); if (!ha->fdt_wrt_disable) goto skip_wrt_protect; /* Enable flash write-protection and wait for completion. */ faddr = flash_conf_addr(ha, 0x101); qla24xx_write_flash_dword(ha, faddr, ha->fdt_wrt_disable); faddr = flash_conf_addr(ha, 0x5); while (cnt--) { if (!qla24xx_read_flash_dword(ha, faddr, &dword)) { if (!(dword & BIT_0)) break; } udelay(10); } skip_wrt_protect: /* Disable flash write. */ wrt_reg_dword(&reg->ctrl_status, rd_reg_dword(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE); return QLA_SUCCESS; } static int qla24xx_erase_sector(scsi_qla_host_t *vha, uint32_t fdata) { struct qla_hw_data *ha = vha->hw; uint32_t start, finish; if (ha->flags.fac_supported) { start = fdata >> 2; finish = start + (ha->fdt_block_size >> 2) - 1; return qla81xx_fac_erase_sector(vha, flash_data_addr(ha, start), flash_data_addr(ha, finish)); } return qla24xx_write_flash_dword(ha, ha->fdt_erase_cmd, (fdata & 0xff00) | ((fdata << 16) & 0xff0000) | ((fdata >> 16) & 0xff)); } static int qla24xx_write_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr, uint32_t dwords) { int ret; ulong liter; ulong dburst = OPTROM_BURST_DWORDS; /* burst size in dwords */ uint32_t sec_mask, rest_addr, fdata; dma_addr_t optrom_dma; void *optrom = NULL; struct qla_hw_data *ha = vha->hw; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto next; /* Allocate dma buffer for burst write */ optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); if (!optrom) { ql_log(ql_log_warn, vha, 0x7095, "Failed allocate burst (%x bytes)\n", OPTROM_BURST_SIZE); } next: ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, "Unprotect flash...\n"); ret = qla24xx_unprotect_flash(vha); if (ret) { ql_log(ql_log_warn, vha, 0x7096, "Failed to unprotect flash.\n"); goto done; } rest_addr = (ha->fdt_block_size >> 2) - 1; sec_mask = ~rest_addr; for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { fdata = (faddr & sec_mask) << 2; /* Are we at the beginning of a sector? */ if (!(faddr & rest_addr)) { ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, "Erase sector %#x...\n", faddr); ret = qla24xx_erase_sector(vha, fdata); if (ret) { ql_dbg(ql_dbg_user, vha, 0x7007, "Failed to erase sector %x.\n", faddr); break; } } if (optrom) { /* If smaller than a burst remaining */ if (dwords - liter < dburst) dburst = dwords - liter; /* Copy to dma buffer */ memcpy(optrom, dwptr, dburst << 2); /* Burst write */ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, "Write burst (%#lx dwords)...\n", dburst); ret = qla2x00_load_ram(vha, optrom_dma, flash_data_addr(ha, faddr), dburst); if (!ret) { liter += dburst - 1; faddr += dburst - 1; dwptr += dburst - 1; continue; } ql_log(ql_log_warn, vha, 0x7097, "Failed burst-write at %x (%p/%#llx)....\n", flash_data_addr(ha, faddr), optrom, (u64)optrom_dma); dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); optrom = NULL; if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) break; ql_log(ql_log_warn, vha, 0x7098, "Reverting to slow write...\n"); } /* Slow write */ ret = qla24xx_write_flash_dword(ha, flash_data_addr(ha, faddr), le32_to_cpu(*dwptr)); if (ret) { ql_dbg(ql_dbg_user, vha, 0x7006, "Failed slow write %x (%x)\n", faddr, *dwptr); break; } } ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, "Protect flash...\n"); ret = qla24xx_protect_flash(vha); if (ret) ql_log(ql_log_warn, vha, 0x7099, "Failed to protect flash\n"); done: if (optrom) dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); return ret; } uint8_t * qla2x00_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { uint32_t i; __le16 *wptr; struct qla_hw_data *ha = vha->hw; /* Word reads to NVRAM via registers. */ wptr = buf; qla2x00_lock_nvram_access(ha); for (i = 0; i < bytes >> 1; i++, naddr++) wptr[i] = cpu_to_le16(qla2x00_get_nvram_word(ha, naddr)); qla2x00_unlock_nvram_access(ha); return buf; } uint8_t * qla24xx_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { struct qla_hw_data *ha = vha->hw; uint32_t *dwptr = buf; uint32_t i; if (IS_P3P_TYPE(ha)) return buf; /* Dword reads to flash. */ naddr = nvram_data_addr(ha, naddr); bytes >>= 2; for (i = 0; i < bytes; i++, naddr++, dwptr++) { if (qla24xx_read_flash_dword(ha, naddr, dwptr)) break; cpu_to_le32s(dwptr); } return buf; } int qla2x00_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { int ret, stat; uint32_t i; uint16_t *wptr; unsigned long flags; struct qla_hw_data *ha = vha->hw; ret = QLA_SUCCESS; spin_lock_irqsave(&ha->hardware_lock, flags); qla2x00_lock_nvram_access(ha); /* Disable NVRAM write-protection. */ stat = qla2x00_clear_nvram_protection(ha); wptr = (uint16_t *)buf; for (i = 0; i < bytes >> 1; i++, naddr++) { qla2x00_write_nvram_word(ha, naddr, cpu_to_le16(*wptr)); wptr++; } /* Enable NVRAM write-protection. */ qla2x00_set_nvram_protection(ha, stat); qla2x00_unlock_nvram_access(ha); spin_unlock_irqrestore(&ha->hardware_lock, flags); return ret; } int qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; __le32 *dwptr = buf; uint32_t i; int ret; ret = QLA_SUCCESS; if (IS_P3P_TYPE(ha)) return ret; /* Enable flash write. */ wrt_reg_dword(&reg->ctrl_status, rd_reg_dword(&reg->ctrl_status) | CSRX_FLASH_ENABLE); rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */ /* Disable NVRAM write-protection. */ qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0); qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0); /* Dword writes to flash. */ naddr = nvram_data_addr(ha, naddr); bytes >>= 2; for (i = 0; i < bytes; i++, naddr++, dwptr++) { if (qla24xx_write_flash_dword(ha, naddr, le32_to_cpu(*dwptr))) { ql_dbg(ql_dbg_user, vha, 0x709a, "Unable to program nvram address=%x data=%x.\n", naddr, *dwptr); break; } } /* Enable NVRAM write-protection. */ qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c); /* Disable flash write. */ wrt_reg_dword(&reg->ctrl_status, rd_reg_dword(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE); rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */ return ret; } uint8_t * qla25xx_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { struct qla_hw_data *ha = vha->hw; uint32_t *dwptr = buf; uint32_t i; /* Dword reads to flash. */ naddr = flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr); bytes >>= 2; for (i = 0; i < bytes; i++, naddr++, dwptr++) { if (qla24xx_read_flash_dword(ha, naddr, dwptr)) break; cpu_to_le32s(dwptr); } return buf; } #define RMW_BUFFER_SIZE (64 * 1024) int qla25xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { struct qla_hw_data *ha = vha->hw; uint8_t *dbuf = vmalloc(RMW_BUFFER_SIZE); if (!dbuf) return QLA_MEMORY_ALLOC_FAILED; ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2, RMW_BUFFER_SIZE); memcpy(dbuf + (naddr << 2), buf, bytes); ha->isp_ops->write_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2, RMW_BUFFER_SIZE); vfree(dbuf); return QLA_SUCCESS; } static inline void qla2x00_flip_colors(struct qla_hw_data *ha, uint16_t *pflags) { if (IS_QLA2322(ha)) { /* Flip all colors. */ if (ha->beacon_color_state == QLA_LED_ALL_ON) { /* Turn off. */ ha->beacon_color_state = 0; *pflags = GPIO_LED_ALL_OFF; } else { /* Turn on. */ ha->beacon_color_state = QLA_LED_ALL_ON; *pflags = GPIO_LED_RGA_ON; } } else { /* Flip green led only. */ if (ha->beacon_color_state == QLA_LED_GRN_ON) { /* Turn off. */ ha->beacon_color_state = 0; *pflags = GPIO_LED_GREEN_OFF_AMBER_OFF; } else { /* Turn on. */ ha->beacon_color_state = QLA_LED_GRN_ON; *pflags = GPIO_LED_GREEN_ON_AMBER_OFF; } } } #define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r)) void qla2x00_beacon_blink(struct scsi_qla_host *vha) { uint16_t gpio_enable; uint16_t gpio_data; uint16_t led_color = 0; unsigned long flags; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; if (IS_P3P_TYPE(ha)) return; spin_lock_irqsave(&ha->hardware_lock, flags); /* Save the Original GPIOE. */ if (ha->pio_address) { gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe)); gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod)); } else { gpio_enable = rd_reg_word(&reg->gpioe); gpio_data = rd_reg_word(&reg->gpiod); } /* Set the modified gpio_enable values */ gpio_enable |= GPIO_LED_MASK; if (ha->pio_address) { WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable); } else { wrt_reg_word(&reg->gpioe, gpio_enable); rd_reg_word(&reg->gpioe); } qla2x00_flip_colors(ha, &led_color); /* Clear out any previously set LED color. */ gpio_data &= ~GPIO_LED_MASK; /* Set the new input LED color to GPIOD. */ gpio_data |= led_color; /* Set the modified gpio_data values */ if (ha->pio_address) { WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data); } else { wrt_reg_word(&reg->gpiod, gpio_data); rd_reg_word(&reg->gpiod); } spin_unlock_irqrestore(&ha->hardware_lock, flags); } int qla2x00_beacon_on(struct scsi_qla_host *vha) { uint16_t gpio_enable; uint16_t gpio_data; unsigned long flags; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x709b, "Unable to update fw options (beacon on).\n"); return QLA_FUNCTION_FAILED; } /* Turn off LEDs. */ spin_lock_irqsave(&ha->hardware_lock, flags); if (ha->pio_address) { gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe)); gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod)); } else { gpio_enable = rd_reg_word(&reg->gpioe); gpio_data = rd_reg_word(&reg->gpiod); } gpio_enable |= GPIO_LED_MASK; /* Set the modified gpio_enable values. */ if (ha->pio_address) { WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable); } else { wrt_reg_word(&reg->gpioe, gpio_enable); rd_reg_word(&reg->gpioe); } /* Clear out previously set LED colour. */ gpio_data &= ~GPIO_LED_MASK; if (ha->pio_address) { WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data); } else { wrt_reg_word(&reg->gpiod, gpio_data); rd_reg_word(&reg->gpiod); } spin_unlock_irqrestore(&ha->hardware_lock, flags); /* * Let the per HBA timer kick off the blinking process based on * the following flags. No need to do anything else now. */ ha->beacon_blink_led = 1; ha->beacon_color_state = 0; return QLA_SUCCESS; } int qla2x00_beacon_off(struct scsi_qla_host *vha) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; ha->beacon_blink_led = 0; /* Set the on flag so when it gets flipped it will be off. */ if (IS_QLA2322(ha)) ha->beacon_color_state = QLA_LED_ALL_ON; else ha->beacon_color_state = QLA_LED_GRN_ON; ha->isp_ops->beacon_blink(vha); /* This turns green LED off */ ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7; rval = qla2x00_set_fw_options(vha, ha->fw_options); if (rval != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x709c, "Unable to update fw options (beacon off).\n"); return rval; } static inline void qla24xx_flip_colors(struct qla_hw_data *ha, uint16_t *pflags) { /* Flip all colors. */ if (ha->beacon_color_state == QLA_LED_ALL_ON) { /* Turn off. */ ha->beacon_color_state = 0; *pflags = 0; } else { /* Turn on. */ ha->beacon_color_state = QLA_LED_ALL_ON; *pflags = GPDX_LED_YELLOW_ON | GPDX_LED_AMBER_ON; } } void qla24xx_beacon_blink(struct scsi_qla_host *vha) { uint16_t led_color = 0; uint32_t gpio_data; unsigned long flags; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; /* Save the Original GPIOD. */ spin_lock_irqsave(&ha->hardware_lock, flags); gpio_data = rd_reg_dword(&reg->gpiod); /* Enable the gpio_data reg for update. */ gpio_data |= GPDX_LED_UPDATE_MASK; wrt_reg_dword(&reg->gpiod, gpio_data); gpio_data = rd_reg_dword(&reg->gpiod); /* Set the color bits. */ qla24xx_flip_colors(ha, &led_color); /* Clear out any previously set LED color. */ gpio_data &= ~GPDX_LED_COLOR_MASK; /* Set the new input LED color to GPIOD. */ gpio_data |= led_color; /* Set the modified gpio_data values. */ wrt_reg_dword(&reg->gpiod, gpio_data); gpio_data = rd_reg_dword(&reg->gpiod); spin_unlock_irqrestore(&ha->hardware_lock, flags); } static uint32_t qla83xx_select_led_port(struct qla_hw_data *ha) { uint32_t led_select_value = 0; if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto out; if (ha->port_no == 0) led_select_value = QLA83XX_LED_PORT0; else led_select_value = QLA83XX_LED_PORT1; out: return led_select_value; } void qla83xx_beacon_blink(struct scsi_qla_host *vha) { uint32_t led_select_value; struct qla_hw_data *ha = vha->hw; uint16_t led_cfg[6]; uint16_t orig_led_cfg[6]; uint32_t led_10_value, led_43_value; if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return; if (!ha->beacon_blink_led) return; if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { qla2x00_write_ram_word(vha, 0x1003, 0x40000230); qla2x00_write_ram_word(vha, 0x1004, 0x40000230); } else if (IS_QLA2031(ha)) { led_select_value = qla83xx_select_led_port(ha); qla83xx_wr_reg(vha, led_select_value, 0x40000230); qla83xx_wr_reg(vha, led_select_value + 4, 0x40000230); } else if (IS_QLA8031(ha)) { led_select_value = qla83xx_select_led_port(ha); qla83xx_rd_reg(vha, led_select_value, &led_10_value); qla83xx_rd_reg(vha, led_select_value + 0x10, &led_43_value); qla83xx_wr_reg(vha, led_select_value, 0x01f44000); msleep(500); qla83xx_wr_reg(vha, led_select_value, 0x400001f4); msleep(1000); qla83xx_wr_reg(vha, led_select_value, led_10_value); qla83xx_wr_reg(vha, led_select_value + 0x10, led_43_value); } else if (IS_QLA81XX(ha)) { int rval; /* Save Current */ rval = qla81xx_get_led_config(vha, orig_led_cfg); /* Do the blink */ if (rval == QLA_SUCCESS) { if (IS_QLA81XX(ha)) { led_cfg[0] = 0x4000; led_cfg[1] = 0x2000; led_cfg[2] = 0; led_cfg[3] = 0; led_cfg[4] = 0; led_cfg[5] = 0; } else { led_cfg[0] = 0x4000; led_cfg[1] = 0x4000; led_cfg[2] = 0x4000; led_cfg[3] = 0x2000; led_cfg[4] = 0; led_cfg[5] = 0x2000; } rval = qla81xx_set_led_config(vha, led_cfg); msleep(1000); if (IS_QLA81XX(ha)) { led_cfg[0] = 0x4000; led_cfg[1] = 0x2000; led_cfg[2] = 0; } else { led_cfg[0] = 0x4000; led_cfg[1] = 0x2000; led_cfg[2] = 0x4000; led_cfg[3] = 0x4000; led_cfg[4] = 0; led_cfg[5] = 0x2000; } rval = qla81xx_set_led_config(vha, led_cfg); } /* On exit, restore original (presumes no status change) */ qla81xx_set_led_config(vha, orig_led_cfg); } } int qla24xx_beacon_on(struct scsi_qla_host *vha) { uint32_t gpio_data; unsigned long flags; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; if (IS_QLA8031(ha) || IS_QLA81XX(ha)) goto skip_gpio; /* let blink handle it */ if (ha->beacon_blink_led == 0) { /* Enable firmware for update */ ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL; if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) return QLA_FUNCTION_FAILED; if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7009, "Unable to update fw options (beacon on).\n"); return QLA_FUNCTION_FAILED; } if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) goto skip_gpio; spin_lock_irqsave(&ha->hardware_lock, flags); gpio_data = rd_reg_dword(&reg->gpiod); /* Enable the gpio_data reg for update. */ gpio_data |= GPDX_LED_UPDATE_MASK; wrt_reg_dword(&reg->gpiod, gpio_data); rd_reg_dword(&reg->gpiod); spin_unlock_irqrestore(&ha->hardware_lock, flags); } /* So all colors blink together. */ ha->beacon_color_state = 0; skip_gpio: /* Let the per HBA timer kick off the blinking process. */ ha->beacon_blink_led = 1; return QLA_SUCCESS; } int qla24xx_beacon_off(struct scsi_qla_host *vha) { uint32_t gpio_data; unsigned long flags; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; if (!ha->flags.fw_started) return QLA_SUCCESS; ha->beacon_blink_led = 0; if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) goto set_fw_options; if (IS_QLA8031(ha) || IS_QLA81XX(ha)) return QLA_SUCCESS; ha->beacon_color_state = QLA_LED_ALL_ON; ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */ /* Give control back to firmware. */ spin_lock_irqsave(&ha->hardware_lock, flags); gpio_data = rd_reg_dword(&reg->gpiod); /* Disable the gpio_data reg for update. */ gpio_data &= ~GPDX_LED_UPDATE_MASK; wrt_reg_dword(&reg->gpiod, gpio_data); rd_reg_dword(&reg->gpiod); spin_unlock_irqrestore(&ha->hardware_lock, flags); set_fw_options: ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x704d, "Unable to update fw options (beacon on).\n"); return QLA_FUNCTION_FAILED; } if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x704e, "Unable to update fw options (beacon on).\n"); return QLA_FUNCTION_FAILED; } return QLA_SUCCESS; } /* * Flash support routines */ /** * qla2x00_flash_enable() - Setup flash for reading and writing. * @ha: HA context */ static void qla2x00_flash_enable(struct qla_hw_data *ha) { uint16_t data; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; data = rd_reg_word(&reg->ctrl_status); data |= CSR_FLASH_ENABLE; wrt_reg_word(&reg->ctrl_status, data); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ } /** * qla2x00_flash_disable() - Disable flash and allow RISC to run. * @ha: HA context */ static void qla2x00_flash_disable(struct qla_hw_data *ha) { uint16_t data; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; data = rd_reg_word(&reg->ctrl_status); data &= ~(CSR_FLASH_ENABLE); wrt_reg_word(&reg->ctrl_status, data); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ } /** * qla2x00_read_flash_byte() - Reads a byte from flash * @ha: HA context * @addr: Address in flash to read * * A word is read from the chip, but, only the lower byte is valid. * * Returns the byte read from flash @addr. */ static uint8_t qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr) { uint16_t data; uint16_t bank_select; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; bank_select = rd_reg_word(&reg->ctrl_status); if (IS_QLA2322(ha) || IS_QLA6322(ha)) { /* Specify 64K address range: */ /* clear out Module Select and Flash Address bits [19:16]. */ bank_select &= ~0xf8; bank_select |= addr >> 12 & 0xf0; bank_select |= CSR_FLASH_64K_BANK; wrt_reg_word(&reg->ctrl_status, bank_select); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ wrt_reg_word(&reg->flash_address, (uint16_t)addr); data = rd_reg_word(&reg->flash_data); return (uint8_t)data; } /* Setup bit 16 of flash address. */ if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { bank_select |= CSR_FLASH_64K_BANK; wrt_reg_word(&reg->ctrl_status, bank_select); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ } else if (((addr & BIT_16) == 0) && (bank_select & CSR_FLASH_64K_BANK)) { bank_select &= ~(CSR_FLASH_64K_BANK); wrt_reg_word(&reg->ctrl_status, bank_select); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ } /* Always perform IO mapped accesses to the FLASH registers. */ if (ha->pio_address) { uint16_t data2; WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr); do { data = RD_REG_WORD_PIO(PIO_REG(ha, flash_data)); barrier(); cpu_relax(); data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data)); } while (data != data2); } else { wrt_reg_word(&reg->flash_address, (uint16_t)addr); data = qla2x00_debounce_register(&reg->flash_data); } return (uint8_t)data; } /** * qla2x00_write_flash_byte() - Write a byte to flash * @ha: HA context * @addr: Address in flash to write * @data: Data to write */ static void qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data) { uint16_t bank_select; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; bank_select = rd_reg_word(&reg->ctrl_status); if (IS_QLA2322(ha) || IS_QLA6322(ha)) { /* Specify 64K address range: */ /* clear out Module Select and Flash Address bits [19:16]. */ bank_select &= ~0xf8; bank_select |= addr >> 12 & 0xf0; bank_select |= CSR_FLASH_64K_BANK; wrt_reg_word(&reg->ctrl_status, bank_select); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ wrt_reg_word(&reg->flash_address, (uint16_t)addr); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ wrt_reg_word(&reg->flash_data, (uint16_t)data); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ return; } /* Setup bit 16 of flash address. */ if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { bank_select |= CSR_FLASH_64K_BANK; wrt_reg_word(&reg->ctrl_status, bank_select); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ } else if (((addr & BIT_16) == 0) && (bank_select & CSR_FLASH_64K_BANK)) { bank_select &= ~(CSR_FLASH_64K_BANK); wrt_reg_word(&reg->ctrl_status, bank_select); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ } /* Always perform IO mapped accesses to the FLASH registers. */ if (ha->pio_address) { WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr); WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data); } else { wrt_reg_word(&reg->flash_address, (uint16_t)addr); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ wrt_reg_word(&reg->flash_data, (uint16_t)data); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ } } /** * qla2x00_poll_flash() - Polls flash for completion. * @ha: HA context * @addr: Address in flash to poll * @poll_data: Data to be polled * @man_id: Flash manufacturer ID * @flash_id: Flash ID * * This function polls the device until bit 7 of what is read matches data * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed * out (a fatal error). The flash book recommeds reading bit 7 again after * reading bit 5 as a 1. * * Returns 0 on success, else non-zero. */ static int qla2x00_poll_flash(struct qla_hw_data *ha, uint32_t addr, uint8_t poll_data, uint8_t man_id, uint8_t flash_id) { int status; uint8_t flash_data; uint32_t cnt; status = 1; /* Wait for 30 seconds for command to finish. */ poll_data &= BIT_7; for (cnt = 3000000; cnt; cnt--) { flash_data = qla2x00_read_flash_byte(ha, addr); if ((flash_data & BIT_7) == poll_data) { status = 0; break; } if (man_id != 0x40 && man_id != 0xda) { if ((flash_data & BIT_5) && cnt > 2) cnt = 2; } udelay(10); barrier(); cond_resched(); } return status; } /** * qla2x00_program_flash_address() - Programs a flash address * @ha: HA context * @addr: Address in flash to program * @data: Data to be written in flash * @man_id: Flash manufacturer ID * @flash_id: Flash ID * * Returns 0 on success, else non-zero. */ static int qla2x00_program_flash_address(struct qla_hw_data *ha, uint32_t addr, uint8_t data, uint8_t man_id, uint8_t flash_id) { /* Write Program Command Sequence. */ if (IS_OEM_001(ha)) { qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); qla2x00_write_flash_byte(ha, 0x555, 0x55); qla2x00_write_flash_byte(ha, 0xaaa, 0xa0); qla2x00_write_flash_byte(ha, addr, data); } else { if (man_id == 0xda && flash_id == 0xc1) { qla2x00_write_flash_byte(ha, addr, data); if (addr & 0x7e) return 0; } else { qla2x00_write_flash_byte(ha, 0x5555, 0xaa); qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); qla2x00_write_flash_byte(ha, 0x5555, 0xa0); qla2x00_write_flash_byte(ha, addr, data); } } udelay(150); /* Wait for write to complete. */ return qla2x00_poll_flash(ha, addr, data, man_id, flash_id); } /** * qla2x00_erase_flash() - Erase the flash. * @ha: HA context * @man_id: Flash manufacturer ID * @flash_id: Flash ID * * Returns 0 on success, else non-zero. */ static int qla2x00_erase_flash(struct qla_hw_data *ha, uint8_t man_id, uint8_t flash_id) { /* Individual Sector Erase Command Sequence */ if (IS_OEM_001(ha)) { qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); qla2x00_write_flash_byte(ha, 0x555, 0x55); qla2x00_write_flash_byte(ha, 0xaaa, 0x80); qla2x00_write_flash_byte(ha, 0xaaa, 0xaa); qla2x00_write_flash_byte(ha, 0x555, 0x55); qla2x00_write_flash_byte(ha, 0xaaa, 0x10); } else { qla2x00_write_flash_byte(ha, 0x5555, 0xaa); qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); qla2x00_write_flash_byte(ha, 0x5555, 0x80); qla2x00_write_flash_byte(ha, 0x5555, 0xaa); qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); qla2x00_write_flash_byte(ha, 0x5555, 0x10); } udelay(150); /* Wait for erase to complete. */ return qla2x00_poll_flash(ha, 0x00, 0x80, man_id, flash_id); } /** * qla2x00_erase_flash_sector() - Erase a flash sector. * @ha: HA context * @addr: Flash sector to erase * @sec_mask: Sector address mask * @man_id: Flash manufacturer ID * @flash_id: Flash ID * * Returns 0 on success, else non-zero. */ static int qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr, uint32_t sec_mask, uint8_t man_id, uint8_t flash_id) { /* Individual Sector Erase Command Sequence */ qla2x00_write_flash_byte(ha, 0x5555, 0xaa); qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); qla2x00_write_flash_byte(ha, 0x5555, 0x80); qla2x00_write_flash_byte(ha, 0x5555, 0xaa); qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); if (man_id == 0x1f && flash_id == 0x13) qla2x00_write_flash_byte(ha, addr & sec_mask, 0x10); else qla2x00_write_flash_byte(ha, addr & sec_mask, 0x30); udelay(150); /* Wait for erase to complete. */ return qla2x00_poll_flash(ha, addr, 0x80, man_id, flash_id); } /** * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip. * @ha: host adapter * @man_id: Flash manufacturer ID * @flash_id: Flash ID */ static void qla2x00_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id, uint8_t *flash_id) { qla2x00_write_flash_byte(ha, 0x5555, 0xaa); qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); qla2x00_write_flash_byte(ha, 0x5555, 0x90); *man_id = qla2x00_read_flash_byte(ha, 0x0000); *flash_id = qla2x00_read_flash_byte(ha, 0x0001); qla2x00_write_flash_byte(ha, 0x5555, 0xaa); qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); qla2x00_write_flash_byte(ha, 0x5555, 0xf0); } static void qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf, uint32_t saddr, uint32_t length) { struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint32_t midpoint, ilength; uint8_t data; midpoint = length / 2; wrt_reg_word(&reg->nvram, 0); rd_reg_word(&reg->nvram); for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) { if (ilength == midpoint) { wrt_reg_word(&reg->nvram, NVR_SELECT); rd_reg_word(&reg->nvram); } data = qla2x00_read_flash_byte(ha, saddr); if (saddr % 100) udelay(10); *tmp_buf = data; cond_resched(); } } static inline void qla2x00_suspend_hba(struct scsi_qla_host *vha) { int cnt; unsigned long flags; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; /* Suspend HBA. */ scsi_block_requests(vha->host); ha->isp_ops->disable_intrs(ha); set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); /* Pause RISC. */ spin_lock_irqsave(&ha->hardware_lock, flags); wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC); rd_reg_word(&reg->hccr); if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { for (cnt = 0; cnt < 30000; cnt++) { if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0) break; udelay(100); } } else { udelay(10); } spin_unlock_irqrestore(&ha->hardware_lock, flags); } static inline void qla2x00_resume_hba(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; /* Resume HBA. */ clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); } void * qla2x00_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { uint32_t addr, midpoint; uint8_t *data; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; /* Suspend HBA. */ qla2x00_suspend_hba(vha); /* Go with read. */ midpoint = ha->optrom_size / 2; qla2x00_flash_enable(ha); wrt_reg_word(&reg->nvram, 0); rd_reg_word(&reg->nvram); /* PCI Posting. */ for (addr = offset, data = buf; addr < length; addr++, data++) { if (addr == midpoint) { wrt_reg_word(&reg->nvram, NVR_SELECT); rd_reg_word(&reg->nvram); /* PCI Posting. */ } *data = qla2x00_read_flash_byte(ha, addr); } qla2x00_flash_disable(ha); /* Resume HBA. */ qla2x00_resume_hba(vha); return buf; } int qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { int rval; uint8_t man_id, flash_id, sec_number, *data; uint16_t wd; uint32_t addr, liter, sec_mask, rest_addr; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; /* Suspend HBA. */ qla2x00_suspend_hba(vha); rval = QLA_SUCCESS; sec_number = 0; /* Reset ISP chip. */ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET); pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); /* Go with write. */ qla2x00_flash_enable(ha); do { /* Loop once to provide quick error exit */ /* Structure of flash memory based on manufacturer */ if (IS_OEM_001(ha)) { /* OEM variant with special flash part. */ man_id = flash_id = 0; rest_addr = 0xffff; sec_mask = 0x10000; goto update_flash; } qla2x00_get_flash_manufacturer(ha, &man_id, &flash_id); switch (man_id) { case 0x20: /* ST flash. */ if (flash_id == 0xd2 || flash_id == 0xe3) { /* * ST m29w008at part - 64kb sector size with * 32kb,8kb,8kb,16kb sectors at memory address * 0xf0000. */ rest_addr = 0xffff; sec_mask = 0x10000; break; } /* * ST m29w010b part - 16kb sector size * Default to 16kb sectors */ rest_addr = 0x3fff; sec_mask = 0x1c000; break; case 0x40: /* Mostel flash. */ /* Mostel v29c51001 part - 512 byte sector size. */ rest_addr = 0x1ff; sec_mask = 0x1fe00; break; case 0xbf: /* SST flash. */ /* SST39sf10 part - 4kb sector size. */ rest_addr = 0xfff; sec_mask = 0x1f000; break; case 0xda: /* Winbond flash. */ /* Winbond W29EE011 part - 256 byte sector size. */ rest_addr = 0x7f; sec_mask = 0x1ff80; break; case 0xc2: /* Macronix flash. */ /* 64k sector size. */ if (flash_id == 0x38 || flash_id == 0x4f) { rest_addr = 0xffff; sec_mask = 0x10000; break; } fallthrough; case 0x1f: /* Atmel flash. */ /* 512k sector size. */ if (flash_id == 0x13) { rest_addr = 0x7fffffff; sec_mask = 0x80000000; break; } fallthrough; case 0x01: /* AMD flash. */ if (flash_id == 0x38 || flash_id == 0x40 || flash_id == 0x4f) { /* Am29LV081 part - 64kb sector size. */ /* Am29LV002BT part - 64kb sector size. */ rest_addr = 0xffff; sec_mask = 0x10000; break; } else if (flash_id == 0x3e) { /* * Am29LV008b part - 64kb sector size with * 32kb,8kb,8kb,16kb sector at memory address * h0xf0000. */ rest_addr = 0xffff; sec_mask = 0x10000; break; } else if (flash_id == 0x20 || flash_id == 0x6e) { /* * Am29LV010 part or AM29f010 - 16kb sector * size. */ rest_addr = 0x3fff; sec_mask = 0x1c000; break; } else if (flash_id == 0x6d) { /* Am29LV001 part - 8kb sector size. */ rest_addr = 0x1fff; sec_mask = 0x1e000; break; } fallthrough; default: /* Default to 16 kb sector size. */ rest_addr = 0x3fff; sec_mask = 0x1c000; break; } update_flash: if (IS_QLA2322(ha) || IS_QLA6322(ha)) { if (qla2x00_erase_flash(ha, man_id, flash_id)) { rval = QLA_FUNCTION_FAILED; break; } } for (addr = offset, liter = 0; liter < length; liter++, addr++) { data = buf + liter; /* Are we at the beginning of a sector? */ if ((addr & rest_addr) == 0) { if (IS_QLA2322(ha) || IS_QLA6322(ha)) { if (addr >= 0x10000UL) { if (((addr >> 12) & 0xf0) && ((man_id == 0x01 && flash_id == 0x3e) || (man_id == 0x20 && flash_id == 0xd2))) { sec_number++; if (sec_number == 1) { rest_addr = 0x7fff; sec_mask = 0x18000; } else if ( sec_number == 2 || sec_number == 3) { rest_addr = 0x1fff; sec_mask = 0x1e000; } else if ( sec_number == 4) { rest_addr = 0x3fff; sec_mask = 0x1c000; } } } } else if (addr == ha->optrom_size / 2) { wrt_reg_word(&reg->nvram, NVR_SELECT); rd_reg_word(&reg->nvram); } if (flash_id == 0xda && man_id == 0xc1) { qla2x00_write_flash_byte(ha, 0x5555, 0xaa); qla2x00_write_flash_byte(ha, 0x2aaa, 0x55); qla2x00_write_flash_byte(ha, 0x5555, 0xa0); } else if (!IS_QLA2322(ha) && !IS_QLA6322(ha)) { /* Then erase it */ if (qla2x00_erase_flash_sector(ha, addr, sec_mask, man_id, flash_id)) { rval = QLA_FUNCTION_FAILED; break; } if (man_id == 0x01 && flash_id == 0x6d) sec_number++; } } if (man_id == 0x01 && flash_id == 0x6d) { if (sec_number == 1 && addr == (rest_addr - 1)) { rest_addr = 0x0fff; sec_mask = 0x1f000; } else if (sec_number == 3 && (addr & 0x7ffe)) { rest_addr = 0x3fff; sec_mask = 0x1c000; } } if (qla2x00_program_flash_address(ha, addr, *data, man_id, flash_id)) { rval = QLA_FUNCTION_FAILED; break; } cond_resched(); } } while (0); qla2x00_flash_disable(ha); /* Resume HBA. */ qla2x00_resume_hba(vha); return rval; } void * qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { struct qla_hw_data *ha = vha->hw; /* Suspend HBA. */ scsi_block_requests(vha->host); set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); /* Go with read. */ qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2); /* Resume HBA. */ clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); scsi_unblock_requests(vha->host); return buf; } static int qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, __le32 *buf, uint32_t len, uint32_t buf_size_without_sfub, uint8_t *sfub_buf) { uint32_t check_sum = 0; __le32 *p; int i; p = buf + buf_size_without_sfub; /* Extract SFUB from end of file */ memcpy(sfub_buf, (uint8_t *)p, sizeof(struct secure_flash_update_block)); for (i = 0; i < (sizeof(struct secure_flash_update_block) >> 2); i++) check_sum += le32_to_cpu(p[i]); check_sum = (~check_sum) + 1; if (check_sum != le32_to_cpu(p[i])) { ql_log(ql_log_warn, vha, 0x7097, "SFUB checksum failed, 0x%x, 0x%x\n", check_sum, le32_to_cpu(p[i])); return QLA_COMMAND_ERROR; } return QLA_SUCCESS; } static int qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start, struct qla_flt_region *region) { struct qla_hw_data *ha = vha->hw; struct qla_flt_header *flt = ha->flt; struct qla_flt_region *flt_reg = &flt->region[0]; uint16_t cnt; int rval = QLA_FUNCTION_FAILED; if (!ha->flt) return QLA_FUNCTION_FAILED; cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); for (; cnt; cnt--, flt_reg++) { if (le32_to_cpu(flt_reg->start) == start) { memcpy((uint8_t *)region, flt_reg, sizeof(struct qla_flt_region)); rval = QLA_SUCCESS; break; } } return rval; } static int qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t dwords) { struct qla_hw_data *ha = vha->hw; ulong liter; ulong dburst = OPTROM_BURST_DWORDS; /* burst size in dwords */ uint32_t sec_mask, rest_addr, fdata; void *optrom = NULL; dma_addr_t optrom_dma; int rval, ret; struct secure_flash_update_block *sfub; dma_addr_t sfub_dma; uint32_t offset = faddr << 2; uint32_t buf_size_without_sfub = 0; struct qla_flt_region region; bool reset_to_rom = false; uint32_t risc_size, risc_attr = 0; __be32 *fw_array = NULL; /* Retrieve region info - must be a start address passed in */ rval = qla28xx_get_flash_region(vha, offset, &region); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xffff, "Invalid address %x - not a region start address\n", offset); goto done; } /* Allocate dma buffer for burst write */ optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); if (!optrom) { ql_log(ql_log_warn, vha, 0x7095, "Failed allocate burst (%x bytes)\n", OPTROM_BURST_SIZE); rval = QLA_COMMAND_ERROR; goto done; } /* * If adapter supports secure flash and region is secure * extract secure flash update block (SFUB) and verify */ if (ha->flags.secure_adapter && region.attribute) { ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, "Region %x is secure\n", le16_to_cpu(region.code)); switch (le16_to_cpu(region.code)) { case FLT_REG_FW: case FLT_REG_FW_SEC_27XX: case FLT_REG_MPI_PRI_28XX: case FLT_REG_MPI_SEC_28XX: fw_array = (__force __be32 *)dwptr; /* 1st fw array */ risc_size = be32_to_cpu(fw_array[3]); risc_attr = be32_to_cpu(fw_array[9]); buf_size_without_sfub = risc_size; fw_array += risc_size; /* 2nd fw array */ risc_size = be32_to_cpu(fw_array[3]); buf_size_without_sfub += risc_size; fw_array += risc_size; /* 1st dump template */ risc_size = be32_to_cpu(fw_array[2]); /* skip header and ignore checksum */ buf_size_without_sfub += risc_size; fw_array += risc_size; if (risc_attr & BIT_9) { /* 2nd dump template */ risc_size = be32_to_cpu(fw_array[2]); /* skip header and ignore checksum */ buf_size_without_sfub += risc_size; fw_array += risc_size; } break; case FLT_REG_PEP_PRI_28XX: case FLT_REG_PEP_SEC_28XX: fw_array = (__force __be32 *)dwptr; /* 1st fw array */ risc_size = be32_to_cpu(fw_array[3]); risc_attr = be32_to_cpu(fw_array[9]); buf_size_without_sfub = risc_size; fw_array += risc_size; break; default: ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, "Secure region %x not supported\n", le16_to_cpu(region.code)); rval = QLA_COMMAND_ERROR; goto done; } sfub = dma_alloc_coherent(&ha->pdev->dev, sizeof(struct secure_flash_update_block), &sfub_dma, GFP_KERNEL); if (!sfub) { ql_log(ql_log_warn, vha, 0xffff, "Unable to allocate memory for SFUB\n"); rval = QLA_COMMAND_ERROR; goto done; } rval = qla28xx_extract_sfub_and_verify(vha, (__le32 *)dwptr, dwords, buf_size_without_sfub, (uint8_t *)sfub); if (rval != QLA_SUCCESS) goto done; ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, "SFUB extract and verify successful\n"); } rest_addr = (ha->fdt_block_size >> 2) - 1; sec_mask = ~rest_addr; /* Lock semaphore */ rval = qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_LOCK); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xffff, "Unable to lock flash semaphore."); goto done; } ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, "Unprotect flash...\n"); rval = qla24xx_unprotect_flash(vha); if (rval) { qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK); ql_log(ql_log_warn, vha, 0x7096, "Failed unprotect flash\n"); goto done; } for (liter = 0; liter < dwords; liter++, faddr++) { fdata = (faddr & sec_mask) << 2; /* If start of sector */ if (!(faddr & rest_addr)) { ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, "Erase sector %#x...\n", faddr); rval = qla24xx_erase_sector(vha, fdata); if (rval) { ql_dbg(ql_dbg_user, vha, 0x7007, "Failed erase sector %#x\n", faddr); goto write_protect; } } } if (ha->flags.secure_adapter) { /* * If adapter supports secure flash but FW doesn't, * disable write protect, release semaphore and reset * chip to execute ROM code in order to update region securely */ if (!ha->flags.secure_fw) { ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, "Disable Write and Release Semaphore."); rval = qla24xx_protect_flash(vha); if (rval != QLA_SUCCESS) { qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK); ql_log(ql_log_warn, vha, 0xffff, "Unable to protect flash."); goto done; } ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, "Reset chip to ROM."); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); set_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags); qla2xxx_wake_dpc(vha); rval = qla2x00_wait_for_chip_reset(vha); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xffff, "Unable to reset to ROM code."); goto done; } reset_to_rom = true; ha->flags.fac_supported = 0; ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, "Lock Semaphore"); rval = qla2xxx_write_remote_register(vha, FLASH_SEMAPHORE_REGISTER_ADDR, 0x00020002); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xffff, "Unable to lock flash semaphore."); goto done; } /* Unprotect flash */ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, "Enable Write."); rval = qla2x00_write_ram_word(vha, 0x7ffd0101, 0); if (rval) { ql_log(ql_log_warn, vha, 0x7096, "Failed unprotect flash\n"); goto done; } } /* If region is secure, send Secure Flash MB Cmd */ if (region.attribute && buf_size_without_sfub) { ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, "Sending Secure Flash MB Cmd\n"); rval = qla28xx_secure_flash_update(vha, 0, le16_to_cpu(region.code), buf_size_without_sfub, sfub_dma, sizeof(struct secure_flash_update_block) >> 2); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xffff, "Secure Flash MB Cmd failed %x.", rval); goto write_protect; } } } /* re-init flash offset */ faddr = offset >> 2; for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { fdata = (faddr & sec_mask) << 2; /* If smaller than a burst remaining */ if (dwords - liter < dburst) dburst = dwords - liter; /* Copy to dma buffer */ memcpy(optrom, dwptr, dburst << 2); /* Burst write */ ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, "Write burst (%#lx dwords)...\n", dburst); rval = qla2x00_load_ram(vha, optrom_dma, flash_data_addr(ha, faddr), dburst); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7097, "Failed burst write at %x (%p/%#llx)...\n", flash_data_addr(ha, faddr), optrom, (u64)optrom_dma); break; } liter += dburst - 1; faddr += dburst - 1; dwptr += dburst - 1; } write_protect: ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, "Protect flash...\n"); ret = qla24xx_protect_flash(vha); if (ret) { qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK); ql_log(ql_log_warn, vha, 0x7099, "Failed protect flash\n"); rval = QLA_COMMAND_ERROR; } if (reset_to_rom == true) { /* Schedule DPC to restart the RISC */ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); ret = qla2x00_wait_for_hba_online(vha); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xffff, "Adapter did not come out of reset\n"); rval = QLA_COMMAND_ERROR; } } done: if (optrom) dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); return rval; } int qla24xx_write_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { int rval; struct qla_hw_data *ha = vha->hw; /* Suspend HBA. */ scsi_block_requests(vha->host); set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); /* Go with write. */ if (IS_QLA28XX(ha)) rval = qla28xx_write_flash_data(vha, buf, offset >> 2, length >> 2); else rval = qla24xx_write_flash_data(vha, buf, offset >> 2, length >> 2); clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); scsi_unblock_requests(vha->host); return rval; } void * qla25xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { int rval; dma_addr_t optrom_dma; void *optrom; uint8_t *pbuf; uint32_t faddr, left, burst; struct qla_hw_data *ha = vha->hw; if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) goto try_fast; if (offset & 0xfff) goto slow_read; if (length < OPTROM_BURST_SIZE) goto slow_read; try_fast: if (offset & 0xff) goto slow_read; optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); if (!optrom) { ql_log(ql_log_warn, vha, 0x00cc, "Unable to allocate memory for optrom burst read (%x KB).\n", OPTROM_BURST_SIZE / 1024); goto slow_read; } pbuf = buf; faddr = offset >> 2; left = length >> 2; burst = OPTROM_BURST_DWORDS; while (left != 0) { if (burst > left) burst = left; rval = qla2x00_dump_ram(vha, optrom_dma, flash_data_addr(ha, faddr), burst); if (rval) { ql_log(ql_log_warn, vha, 0x00f5, "Unable to burst-read optrom segment (%x/%x/%llx).\n", rval, flash_data_addr(ha, faddr), (unsigned long long)optrom_dma); ql_log(ql_log_warn, vha, 0x00f6, "Reverting to slow-read.\n"); dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); goto slow_read; } memcpy(pbuf, optrom, burst * 4); left -= burst; faddr += burst; pbuf += burst * 4; } dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); return buf; slow_read: return qla24xx_read_optrom_data(vha, buf, offset, length); } /** * qla2x00_get_fcode_version() - Determine an FCODE image's version. * @ha: HA context * @pcids: Pointer to the FCODE PCI data structure * * The process of retrieving the FCODE version information is at best * described as interesting. * * Within the first 100h bytes of the image an ASCII string is present * which contains several pieces of information including the FCODE * version. Unfortunately it seems the only reliable way to retrieve * the version is by scanning for another sentinel within the string, * the FCODE build date: * * ... 2.00.02 10/17/02 ... * * Returns QLA_SUCCESS on successful retrieval of version. */ static void qla2x00_get_fcode_version(struct qla_hw_data *ha, uint32_t pcids) { int ret = QLA_FUNCTION_FAILED; uint32_t istart, iend, iter, vend; uint8_t do_next, rbyte, *vbyte; memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); /* Skip the PCI data structure. */ istart = pcids + ((qla2x00_read_flash_byte(ha, pcids + 0x0B) << 8) | qla2x00_read_flash_byte(ha, pcids + 0x0A)); iend = istart + 0x100; do { /* Scan for the sentinel date string...eeewww. */ do_next = 0; iter = istart; while ((iter < iend) && !do_next) { iter++; if (qla2x00_read_flash_byte(ha, iter) == '/') { if (qla2x00_read_flash_byte(ha, iter + 2) == '/') do_next++; else if (qla2x00_read_flash_byte(ha, iter + 3) == '/') do_next++; } } if (!do_next) break; /* Backtrack to previous ' ' (space). */ do_next = 0; while ((iter > istart) && !do_next) { iter--; if (qla2x00_read_flash_byte(ha, iter) == ' ') do_next++; } if (!do_next) break; /* * Mark end of version tag, and find previous ' ' (space) or * string length (recent FCODE images -- major hack ahead!!!). */ vend = iter - 1; do_next = 0; while ((iter > istart) && !do_next) { iter--; rbyte = qla2x00_read_flash_byte(ha, iter); if (rbyte == ' ' || rbyte == 0xd || rbyte == 0x10) do_next++; } if (!do_next) break; /* Mark beginning of version tag, and copy data. */ iter++; if ((vend - iter) && ((vend - iter) < sizeof(ha->fcode_revision))) { vbyte = ha->fcode_revision; while (iter <= vend) { *vbyte++ = qla2x00_read_flash_byte(ha, iter); iter++; } ret = QLA_SUCCESS; } } while (0); if (ret != QLA_SUCCESS) memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); } int qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf) { int ret = QLA_SUCCESS; uint8_t code_type, last_image; uint32_t pcihdr, pcids; uint8_t *dbyte; uint16_t *dcode; struct qla_hw_data *ha = vha->hw; if (!ha->pio_address || !mbuf) return QLA_FUNCTION_FAILED; memset(ha->bios_revision, 0, sizeof(ha->bios_revision)); memset(ha->efi_revision, 0, sizeof(ha->efi_revision)); memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); qla2x00_flash_enable(ha); /* Begin with first PCI expansion ROM header. */ pcihdr = 0; last_image = 1; do { /* Verify PCI expansion ROM header. */ if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 || qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) { /* No signature */ ql_log(ql_log_fatal, vha, 0x0050, "No matching ROM signature.\n"); ret = QLA_FUNCTION_FAILED; break; } /* Locate PCI data structure. */ pcids = pcihdr + ((qla2x00_read_flash_byte(ha, pcihdr + 0x19) << 8) | qla2x00_read_flash_byte(ha, pcihdr + 0x18)); /* Validate signature of PCI data structure. */ if (qla2x00_read_flash_byte(ha, pcids) != 'P' || qla2x00_read_flash_byte(ha, pcids + 0x1) != 'C' || qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' || qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') { /* Incorrect header. */ ql_log(ql_log_fatal, vha, 0x0051, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = QLA_FUNCTION_FAILED; break; } /* Read version */ code_type = qla2x00_read_flash_byte(ha, pcids + 0x14); switch (code_type) { case ROM_CODE_TYPE_BIOS: /* Intel x86, PC-AT compatible. */ ha->bios_revision[0] = qla2x00_read_flash_byte(ha, pcids + 0x12); ha->bios_revision[1] = qla2x00_read_flash_byte(ha, pcids + 0x13); ql_dbg(ql_dbg_init, vha, 0x0052, "Read BIOS %d.%d.\n", ha->bios_revision[1], ha->bios_revision[0]); break; case ROM_CODE_TYPE_FCODE: /* Open Firmware standard for PCI (FCode). */ /* Eeeewww... */ qla2x00_get_fcode_version(ha, pcids); break; case ROM_CODE_TYPE_EFI: /* Extensible Firmware Interface (EFI). */ ha->efi_revision[0] = qla2x00_read_flash_byte(ha, pcids + 0x12); ha->efi_revision[1] = qla2x00_read_flash_byte(ha, pcids + 0x13); ql_dbg(ql_dbg_init, vha, 0x0053, "Read EFI %d.%d.\n", ha->efi_revision[1], ha->efi_revision[0]); break; default: ql_log(ql_log_warn, vha, 0x0054, "Unrecognized code type %x at pcids %x.\n", code_type, pcids); break; } last_image = qla2x00_read_flash_byte(ha, pcids + 0x15) & BIT_7; /* Locate next PCI expansion ROM. */ pcihdr += ((qla2x00_read_flash_byte(ha, pcids + 0x11) << 8) | qla2x00_read_flash_byte(ha, pcids + 0x10)) * 512; } while (!last_image); if (IS_QLA2322(ha)) { /* Read firmware image information. */ memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); dbyte = mbuf; memset(dbyte, 0, 8); dcode = (uint16_t *)dbyte; qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10, 8); ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a, "Dumping fw " "ver from flash:.\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b, dbyte, 32); if ((dcode[0] == 0xffff && dcode[1] == 0xffff && dcode[2] == 0xffff && dcode[3] == 0xffff) || (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && dcode[3] == 0)) { ql_log(ql_log_warn, vha, 0x0057, "Unrecognized fw revision at %x.\n", ha->flt_region_fw * 4); } else { /* values are in big endian */ ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3]; ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5]; ql_dbg(ql_dbg_init, vha, 0x0058, "FW Version: " "%d.%d.%d.\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2]); } } qla2x00_flash_disable(ha); return ret; } int qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) { int ret = QLA_SUCCESS; uint32_t pcihdr, pcids; uint32_t *dcode = mbuf; uint8_t *bcode = mbuf; uint8_t code_type, last_image; struct qla_hw_data *ha = vha->hw; if (!mbuf) return QLA_FUNCTION_FAILED; memset(ha->bios_revision, 0, sizeof(ha->bios_revision)); memset(ha->efi_revision, 0, sizeof(ha->efi_revision)); memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); /* Begin with first PCI expansion ROM header. */ pcihdr = ha->flt_region_boot << 2; last_image = 1; do { /* Verify PCI expansion ROM header. */ ha->isp_ops->read_optrom(vha, dcode, pcihdr, 0x20 * 4); bcode = mbuf + (pcihdr % 4); if (memcmp(bcode, "\x55\xaa", 2)) { /* No signature */ ql_log(ql_log_fatal, vha, 0x0154, "No matching ROM signature.\n"); ret = QLA_FUNCTION_FAILED; break; } /* Locate PCI data structure. */ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); ha->isp_ops->read_optrom(vha, dcode, pcids, 0x20 * 4); bcode = mbuf + (pcihdr % 4); /* Validate signature of PCI data structure. */ if (memcmp(bcode, "PCIR", 4)) { /* Incorrect header. */ ql_log(ql_log_fatal, vha, 0x0155, "PCI data struct not found pcir_adr=%x.\n", pcids); ret = QLA_FUNCTION_FAILED; break; } /* Read version */ code_type = bcode[0x14]; switch (code_type) { case ROM_CODE_TYPE_BIOS: /* Intel x86, PC-AT compatible. */ ha->bios_revision[0] = bcode[0x12]; ha->bios_revision[1] = bcode[0x13]; ql_dbg(ql_dbg_init, vha, 0x0156, "Read BIOS %d.%d.\n", ha->bios_revision[1], ha->bios_revision[0]); break; case ROM_CODE_TYPE_FCODE: /* Open Firmware standard for PCI (FCode). */ ha->fcode_revision[0] = bcode[0x12]; ha->fcode_revision[1] = bcode[0x13]; ql_dbg(ql_dbg_init, vha, 0x0157, "Read FCODE %d.%d.\n", ha->fcode_revision[1], ha->fcode_revision[0]); break; case ROM_CODE_TYPE_EFI: /* Extensible Firmware Interface (EFI). */ ha->efi_revision[0] = bcode[0x12]; ha->efi_revision[1] = bcode[0x13]; ql_dbg(ql_dbg_init, vha, 0x0158, "Read EFI %d.%d.\n", ha->efi_revision[1], ha->efi_revision[0]); break; default: ql_log(ql_log_warn, vha, 0x0159, "Unrecognized code type %x at pcids %x.\n", code_type, pcids); break; } last_image = bcode[0x15] & BIT_7; /* Locate next PCI expansion ROM. */ pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512; } while (!last_image); /* Read firmware image information. */ memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); dcode = mbuf; ha->isp_ops->read_optrom(vha, dcode, ha->flt_region_fw << 2, 0x20); bcode = mbuf + (pcihdr % 4); /* Validate signature of PCI data structure. */ if (bcode[0x0] == 0x3 && bcode[0x1] == 0x0 && bcode[0x2] == 0x40 && bcode[0x3] == 0x40) { ha->fw_revision[0] = bcode[0x4]; ha->fw_revision[1] = bcode[0x5]; ha->fw_revision[2] = bcode[0x6]; ql_dbg(ql_dbg_init, vha, 0x0153, "Firmware revision %d.%d.%d\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2]); } return ret; } int qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) { int ret = QLA_SUCCESS; uint32_t pcihdr = 0, pcids = 0; uint32_t *dcode = mbuf; uint8_t *bcode = mbuf; uint8_t code_type, last_image; int i; struct qla_hw_data *ha = vha->hw; uint32_t faddr = 0; struct active_regions active_regions = { }; if (IS_P3P_TYPE(ha)) return ret; if (!mbuf) return QLA_FUNCTION_FAILED; memset(ha->bios_revision, 0, sizeof(ha->bios_revision)); memset(ha->efi_revision, 0, sizeof(ha->efi_revision)); memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); pcihdr = ha->flt_region_boot << 2; if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { qla27xx_get_active_image(vha, &active_regions); if (active_regions.global == QLA27XX_SECONDARY_IMAGE) { pcihdr = ha->flt_region_boot_sec << 2; } } do { /* Verify PCI expansion ROM header. */ qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); bcode = mbuf + (pcihdr % 4); if (memcmp(bcode, "\x55\xaa", 2)) { /* No signature */ ql_log(ql_log_fatal, vha, 0x0059, "No matching ROM signature.\n"); ret = QLA_FUNCTION_FAILED; break; } /* Locate PCI data structure. */ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); bcode = mbuf + (pcihdr % 4); /* Validate signature of PCI data structure. */ if (memcmp(bcode, "PCIR", 4)) { /* Incorrect header. */ ql_log(ql_log_fatal, vha, 0x005a, "PCI data struct not found pcir_adr=%x.\n", pcids); ql_dump_buffer(ql_dbg_init, vha, 0x0059, dcode, 32); ret = QLA_FUNCTION_FAILED; break; } /* Read version */ code_type = bcode[0x14]; switch (code_type) { case ROM_CODE_TYPE_BIOS: /* Intel x86, PC-AT compatible. */ ha->bios_revision[0] = bcode[0x12]; ha->bios_revision[1] = bcode[0x13]; ql_dbg(ql_dbg_init, vha, 0x005b, "Read BIOS %d.%d.\n", ha->bios_revision[1], ha->bios_revision[0]); break; case ROM_CODE_TYPE_FCODE: /* Open Firmware standard for PCI (FCode). */ ha->fcode_revision[0] = bcode[0x12]; ha->fcode_revision[1] = bcode[0x13]; ql_dbg(ql_dbg_init, vha, 0x005c, "Read FCODE %d.%d.\n", ha->fcode_revision[1], ha->fcode_revision[0]); break; case ROM_CODE_TYPE_EFI: /* Extensible Firmware Interface (EFI). */ ha->efi_revision[0] = bcode[0x12]; ha->efi_revision[1] = bcode[0x13]; ql_dbg(ql_dbg_init, vha, 0x005d, "Read EFI %d.%d.\n", ha->efi_revision[1], ha->efi_revision[0]); break; default: ql_log(ql_log_warn, vha, 0x005e, "Unrecognized code type %x at pcids %x.\n", code_type, pcids); break; } last_image = bcode[0x15] & BIT_7; /* Locate next PCI expansion ROM. */ pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512; } while (!last_image); /* Read firmware image information. */ memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); faddr = ha->flt_region_fw; if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { qla27xx_get_active_image(vha, &active_regions); if (active_regions.global == QLA27XX_SECONDARY_IMAGE) faddr = ha->flt_region_fw_sec; } qla24xx_read_flash_data(vha, dcode, faddr, 8); if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_warn, vha, 0x005f, "Unrecognized fw revision at %x.\n", ha->flt_region_fw * 4); ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32); } else { for (i = 0; i < 4; i++) ha->fw_revision[i] = be32_to_cpu((__force __be32)dcode[4+i]); ql_dbg(ql_dbg_init, vha, 0x0060, "Firmware revision (flash) %u.%u.%u (%x).\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], ha->fw_revision[3]); } /* Check for golden firmware and get version if available */ if (!IS_QLA81XX(ha)) { /* Golden firmware is not present in non 81XX adapters */ return ret; } memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version)); faddr = ha->flt_region_gold_fw; qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8); if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_warn, vha, 0x0056, "Unrecognized golden fw at %#x.\n", faddr); ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32); return ret; } for (i = 0; i < 4; i++) ha->gold_fw_version[i] = be32_to_cpu((__force __be32)dcode[4+i]); return ret; } static int qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end) { if (pos >= end || *pos != 0x82) return 0; pos += 3 + pos[1]; if (pos >= end || *pos != 0x90) return 0; pos += 3 + pos[1]; if (pos >= end || *pos != 0x78) return 0; return 1; } int qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size) { struct qla_hw_data *ha = vha->hw; uint8_t *pos = ha->vpd; uint8_t *end = pos + ha->vpd_size; int len = 0; if (!IS_FWI2_CAPABLE(ha) || !qla2xxx_is_vpd_valid(pos, end)) return 0; while (pos < end && *pos != 0x78) { len = (*pos == 0x82) ? pos[1] : pos[2]; if (!strncmp(pos, key, strlen(key))) break; if (*pos != 0x90 && *pos != 0x91) pos += len; pos += 3; } if (pos < end - len && *pos != 0x78) return scnprintf(str, size, "%.*s", len, pos + 3); return 0; } int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha) { int len, max_len; uint32_t fcp_prio_addr; struct qla_hw_data *ha = vha->hw; if (!ha->fcp_prio_cfg) { ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); if (!ha->fcp_prio_cfg) { ql_log(ql_log_warn, vha, 0x00d5, "Unable to allocate memory for fcp priority data (%x).\n", FCP_PRIO_CFG_SIZE); return QLA_FUNCTION_FAILED; } } memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); fcp_prio_addr = ha->flt_region_fcp_prio; /* first read the fcp priority data header from flash */ ha->isp_ops->read_optrom(vha, ha->fcp_prio_cfg, fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE); if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0)) goto fail; /* read remaining FCP CMD config data from flash */ fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2); len = ha->fcp_prio_cfg->num_entries * sizeof(struct qla_fcp_prio_entry); max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE; ha->isp_ops->read_optrom(vha, &ha->fcp_prio_cfg->entry[0], fcp_prio_addr << 2, (len < max_len ? len : max_len)); /* revalidate the entire FCP priority config data, including entries */ if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) goto fail; ha->flags.fcp_prio_enabled = 1; return QLA_SUCCESS; fail: vfree(ha->fcp_prio_cfg); ha->fcp_prio_cfg = NULL; return QLA_FUNCTION_FAILED; }
linux-master
drivers/scsi/qla2xxx/qla_sup.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include "qla_target.h" #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_tcq.h> static int qla_start_scsi_type6(srb_t *sp); /** * qla2x00_get_cmd_direction() - Determine control_flag data direction. * @sp: SCSI command * * Returns the proper CF_* direction based on CDB. */ static inline uint16_t qla2x00_get_cmd_direction(srb_t *sp) { uint16_t cflags; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->vha; cflags = 0; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { cflags = CF_WRITE; vha->qla_stats.output_bytes += scsi_bufflen(cmd); vha->qla_stats.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cflags = CF_READ; vha->qla_stats.input_bytes += scsi_bufflen(cmd); vha->qla_stats.input_requests++; } return (cflags); } /** * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and * Continuation Type 0 IOCBs to allocate. * * @dsds: number of data segment descriptors needed * * Returns the number of IOCB entries needed to store @dsds. */ uint16_t qla2x00_calc_iocbs_32(uint16_t dsds) { uint16_t iocbs; iocbs = 1; if (dsds > 3) { iocbs += (dsds - 3) / 7; if ((dsds - 3) % 7) iocbs++; } return (iocbs); } /** * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and * Continuation Type 1 IOCBs to allocate. * * @dsds: number of data segment descriptors needed * * Returns the number of IOCB entries needed to store @dsds. */ uint16_t qla2x00_calc_iocbs_64(uint16_t dsds) { uint16_t iocbs; iocbs = 1; if (dsds > 2) { iocbs += (dsds - 2) / 5; if ((dsds - 2) % 5) iocbs++; } return (iocbs); } /** * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB. * @vha: HA context * * Returns a pointer to the Continuation Type 0 IOCB packet. */ static inline cont_entry_t * qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) { cont_entry_t *cont_pkt; struct req_que *req = vha->req; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else { req->ring_ptr++; } cont_pkt = (cont_entry_t *)req->ring_ptr; /* Load packet defaults. */ put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type); return (cont_pkt); } /** * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB. * @vha: HA context * @req: request queue * * Returns a pointer to the continuation type 1 IOCB packet. */ cont_a64_entry_t * qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) { cont_a64_entry_t *cont_pkt; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else { req->ring_ptr++; } cont_pkt = (cont_a64_entry_t *)req->ring_ptr; /* Load packet defaults. */ put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 : CONTINUE_A64_TYPE, &cont_pkt->entry_type); return (cont_pkt); } inline int qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) { struct scsi_cmnd *cmd = GET_CMD_SP(sp); /* We always use DIFF Bundling for best performance */ *fw_prot_opts = 0; /* Translate SCSI opcode to a protection opcode */ switch (scsi_get_prot_op(cmd)) { case SCSI_PROT_READ_STRIP: *fw_prot_opts |= PO_MODE_DIF_REMOVE; break; case SCSI_PROT_WRITE_INSERT: *fw_prot_opts |= PO_MODE_DIF_INSERT; break; case SCSI_PROT_READ_INSERT: *fw_prot_opts |= PO_MODE_DIF_INSERT; break; case SCSI_PROT_WRITE_STRIP: *fw_prot_opts |= PO_MODE_DIF_REMOVE; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM) *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; else *fw_prot_opts |= PO_MODE_DIF_PASS; break; default: /* Normal Request */ *fw_prot_opts |= PO_MODE_DIF_PASS; break; } if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK)) *fw_prot_opts |= PO_DISABLE_GUARD_CHECK; return scsi_prot_sg_count(cmd); } /* * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit * capable IOCB types. * * @sp: SRB command to process * @cmd_pkt: Command type 2 IOCB * @tot_dsds: Total number of segments to transfer */ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, uint16_t tot_dsds) { uint16_t avail_dsds; struct dsd32 *cur_dsd; scsi_qla_host_t *vha; struct scsi_cmnd *cmd; struct scatterlist *sg; int i; cmd = GET_CMD_SP(sp); /* Update entry type to indicate Command Type 2 IOCB */ put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { cmd_pkt->byte_count = cpu_to_le32(0); return; } vha = sp->vha; cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Three DSDs are available in the Command Type 2 IOCB */ avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32); cur_dsd = cmd_pkt->dsd32; /* Load data segments */ scsi_for_each_sg(cmd, sg, tot_dsds, i) { cont_entry_t *cont_pkt; /* Allocate additional continuation packets? */ if (avail_dsds == 0) { /* * Seven DSDs are available in the Continuation * Type 0 IOCB. */ cont_pkt = qla2x00_prep_cont_type0_iocb(vha); cur_dsd = cont_pkt->dsd; avail_dsds = ARRAY_SIZE(cont_pkt->dsd); } append_dsd32(&cur_dsd, sg); avail_dsds--; } } /** * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit * capable IOCB types. * * @sp: SRB command to process * @cmd_pkt: Command type 3 IOCB * @tot_dsds: Total number of segments to transfer */ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, uint16_t tot_dsds) { uint16_t avail_dsds; struct dsd64 *cur_dsd; scsi_qla_host_t *vha; struct scsi_cmnd *cmd; struct scatterlist *sg; int i; cmd = GET_CMD_SP(sp); /* Update entry type to indicate Command Type 3 IOCB */ put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { cmd_pkt->byte_count = cpu_to_le32(0); return; } vha = sp->vha; cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Two DSDs are available in the Command Type 3 IOCB */ avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64); cur_dsd = cmd_pkt->dsd64; /* Load data segments */ scsi_for_each_sg(cmd, sg, tot_dsds, i) { cont_a64_entry_t *cont_pkt; /* Allocate additional continuation packets? */ if (avail_dsds == 0) { /* * Five DSDs are available in the Continuation * Type 1 IOCB. */ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); cur_dsd = cont_pkt->dsd; avail_dsds = ARRAY_SIZE(cont_pkt->dsd); } append_dsd64(&cur_dsd, sg); avail_dsds--; } } /* * Find the first handle that is not in use, starting from * req->current_outstanding_cmd + 1. The caller must hold the lock that is * associated with @req. */ uint32_t qla2xxx_get_next_handle(struct req_que *req) { uint32_t index, handle = req->current_outstanding_cmd; for (index = 1; index < req->num_outstanding_cmds; index++) { handle++; if (handle == req->num_outstanding_cmds) handle = 1; if (!req->outstanding_cmds[handle]) return handle; } return 0; } /** * qla2x00_start_scsi() - Send a SCSI command to the ISP * @sp: command to send to the ISP * * Returns non-zero if a failure occurred, else zero. */ int qla2x00_start_scsi(srb_t *sp) { int nseg; unsigned long flags; scsi_qla_host_t *vha; struct scsi_cmnd *cmd; uint32_t *clr_ptr; uint32_t handle; cmd_entry_t *cmd_pkt; uint16_t cnt; uint16_t req_cnt; uint16_t tot_dsds; struct device_reg_2xxx __iomem *reg; struct qla_hw_data *ha; struct req_que *req; struct rsp_que *rsp; /* Setup device pointers. */ vha = sp->vha; ha = vha->hw; reg = &ha->iobase->isp; cmd = GET_CMD_SP(sp); req = ha->req_q_map[0]; rsp = ha->rsp_q_map[0]; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; /* Send marker if required */ if (vha->marker_needed != 0) { if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { return (QLA_FUNCTION_FAILED); } vha->marker_needed = 0; } /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); handle = qla2xxx_get_next_handle(req); if (handle == 0) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ if (scsi_sg_count(cmd)) { nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); if (unlikely(!nseg)) goto queuing_error; } else nseg = 0; tot_dsds = nseg; /* Calculate the number of request entries needed. */ req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); if (req->cnt < (req_cnt + 2)) { cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg)); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); /* If still no head room then bail out */ if (req->cnt < (req_cnt + 2)) goto queuing_error; } /* Build command packet */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; cmd_pkt = (cmd_entry_t *)req->ring_ptr; cmd_pkt->handle = handle; /* Zero out remaining portion of packet. */ clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); /* Set target ID and LUN number*/ SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); cmd_pkt->lun = cpu_to_le16(cmd->device->lun); cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG); /* Load SCSI command packet. */ memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Build IOCB segments */ ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds); /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; wmb(); /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else req->ring_ptr++; sp->flags |= SRB_DMA_VALID; /* Set chip new ring index. */ wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index); rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ /* Manage unprocessed RIO/ZIO commands in response queue. */ if (vha->flags.process_response_queue && rsp->ring_ptr->signature != RESPONSE_PROCESSED) qla2x00_process_response_queue(rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); return (QLA_SUCCESS); queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); spin_unlock_irqrestore(&ha->hardware_lock, flags); return (QLA_FUNCTION_FAILED); } /** * qla2x00_start_iocbs() - Execute the IOCB command * @vha: HA context * @req: request queue */ void qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) { struct qla_hw_data *ha = vha->hw; device_reg_t *reg = ISP_QUE_REG(ha, req->id); if (IS_P3P_TYPE(ha)) { qla82xx_start_iocbs(vha); } else { /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else req->ring_ptr++; /* Set chip new ring index. */ if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { wrt_reg_dword(req->req_q_in, req->ring_index); } else if (IS_QLA83XX(ha)) { wrt_reg_dword(req->req_q_in, req->ring_index); rd_reg_dword_relaxed(&ha->iobase->isp24.hccr); } else if (IS_QLAFX00(ha)) { wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index); rd_reg_dword_relaxed(&reg->ispfx00.req_q_in); QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); } else if (IS_FWI2_CAPABLE(ha)) { wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index); rd_reg_dword_relaxed(&reg->isp24.req_q_in); } else { wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index); rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp)); } } } /** * __qla2x00_marker() - Send a marker IOCB to the firmware. * @vha: HA context * @qpair: queue pair pointer * @loop_id: loop ID * @lun: LUN * @type: marker modifier * * Can be called from both normal and interrupt context. * * Returns non-zero if a failure occurred, else zero. */ static int __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, uint16_t loop_id, uint64_t lun, uint8_t type) { mrk_entry_t *mrk; struct mrk_entry_24xx *mrk24 = NULL; struct req_que *req = qpair->req; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL); if (mrk == NULL) { ql_log(ql_log_warn, base_vha, 0x3026, "Failed to allocate Marker IOCB.\n"); return (QLA_FUNCTION_FAILED); } mrk24 = (struct mrk_entry_24xx *)mrk; mrk->entry_type = MARKER_TYPE; mrk->modifier = type; if (type != MK_SYNC_ALL) { if (IS_FWI2_CAPABLE(ha)) { mrk24->nport_handle = cpu_to_le16(loop_id); int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); mrk24->vp_index = vha->vp_idx; } else { SET_TARGET_ID(ha, mrk->target, loop_id); mrk->lun = cpu_to_le16((uint16_t)lun); } } if (IS_FWI2_CAPABLE(ha)) mrk24->handle = QLA_SKIP_HANDLE; wmb(); qla2x00_start_iocbs(vha, req); return (QLA_SUCCESS); } int qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, uint16_t loop_id, uint64_t lun, uint8_t type) { int ret; unsigned long flags = 0; spin_lock_irqsave(qpair->qp_lock_ptr, flags); ret = __qla2x00_marker(vha, qpair, loop_id, lun, type); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return (ret); } /* * qla2x00_issue_marker * * Issue marker * Caller CAN have hardware lock held as specified by ha_locked parameter. * Might release it, then reaquire. */ int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) { if (ha_locked) { if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) return QLA_FUNCTION_FAILED; } else { if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) return QLA_FUNCTION_FAILED; } vha->marker_needed = 0; return QLA_SUCCESS; } static inline int qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, uint16_t tot_dsds) { struct dsd64 *cur_dsd = NULL, *next_dsd; struct scsi_cmnd *cmd; struct scatterlist *cur_seg; uint8_t avail_dsds; uint8_t first_iocb = 1; uint32_t dsd_list_len; struct dsd_dma *dsd_ptr; struct ct6_dsd *ctx; struct qla_qpair *qpair = sp->qpair; cmd = GET_CMD_SP(sp); /* Update entry type to indicate Command Type 3 IOCB */ put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE || tot_dsds == 0) { cmd_pkt->byte_count = cpu_to_le32(0); return 0; } /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); qpair->counters.output_bytes += scsi_bufflen(cmd); qpair->counters.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); qpair->counters.input_bytes += scsi_bufflen(cmd); qpair->counters.input_requests++; } cur_seg = scsi_sglist(cmd); ctx = &sp->u.scmd.ct6_ctx; while (tot_dsds) { avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? QLA_DSDS_PER_IOCB : tot_dsds; tot_dsds -= avail_dsds; dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; dsd_ptr = list_first_entry(&qpair->dsd_list, struct dsd_dma, list); next_dsd = dsd_ptr->dsd_addr; list_del(&dsd_ptr->list); qpair->dsd_avail--; list_add_tail(&dsd_ptr->list, &ctx->dsd_list); ctx->dsd_use_cnt++; qpair->dsd_inuse++; if (first_iocb) { first_iocb = 0; put_unaligned_le64(dsd_ptr->dsd_list_dma, &cmd_pkt->fcp_dsd.address); cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len); } else { put_unaligned_le64(dsd_ptr->dsd_list_dma, &cur_dsd->address); cur_dsd->length = cpu_to_le32(dsd_list_len); cur_dsd++; } cur_dsd = next_dsd; while (avail_dsds) { append_dsd64(&cur_dsd, cur_seg); cur_seg = sg_next(cur_seg); avail_dsds--; } } /* Null termination */ cur_dsd->address = 0; cur_dsd->length = 0; cur_dsd++; cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); return 0; } /* * qla24xx_calc_dsd_lists() - Determine number of DSD list required * for Command Type 6. * * @dsds: number of data segment descriptors needed * * Returns the number of dsd list needed to store @dsds. */ static inline uint16_t qla24xx_calc_dsd_lists(uint16_t dsds) { uint16_t dsd_lists = 0; dsd_lists = (dsds/QLA_DSDS_PER_IOCB); if (dsds % QLA_DSDS_PER_IOCB) dsd_lists++; return dsd_lists; } /** * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 * IOCB types. * * @sp: SRB command to process * @cmd_pkt: Command type 3 IOCB * @tot_dsds: Total number of segments to transfer * @req: pointer to request queue */ inline void qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, uint16_t tot_dsds, struct req_que *req) { uint16_t avail_dsds; struct dsd64 *cur_dsd; scsi_qla_host_t *vha; struct scsi_cmnd *cmd; struct scatterlist *sg; int i; struct qla_qpair *qpair = sp->qpair; cmd = GET_CMD_SP(sp); /* Update entry type to indicate Command Type 3 IOCB */ put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { cmd_pkt->byte_count = cpu_to_le32(0); return; } vha = sp->vha; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); qpair->counters.output_bytes += scsi_bufflen(cmd); qpair->counters.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); qpair->counters.input_bytes += scsi_bufflen(cmd); qpair->counters.input_requests++; } /* One DSD is available in the Command Type 3 IOCB */ avail_dsds = 1; cur_dsd = &cmd_pkt->dsd; /* Load data segments */ scsi_for_each_sg(cmd, sg, tot_dsds, i) { cont_a64_entry_t *cont_pkt; /* Allocate additional continuation packets? */ if (avail_dsds == 0) { /* * Five DSDs are available in the Continuation * Type 1 IOCB. */ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); cur_dsd = cont_pkt->dsd; avail_dsds = ARRAY_SIZE(cont_pkt->dsd); } append_dsd64(&cur_dsd, sg); avail_dsds--; } } struct fw_dif_context { __le32 ref_tag; __le16 app_tag; uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ }; /* * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command * */ static inline void qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, unsigned int protcnt) { struct scsi_cmnd *cmd = GET_CMD_SP(sp); pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd)); if (cmd->prot_flags & SCSI_PROT_REF_CHECK && qla2x00_hba_err_chk_enabled(sp)) { pkt->ref_tag_mask[0] = 0xff; pkt->ref_tag_mask[1] = 0xff; pkt->ref_tag_mask[2] = 0xff; pkt->ref_tag_mask[3] = 0xff; } pkt->app_tag = cpu_to_le16(0); pkt->app_tag_mask[0] = 0x0; pkt->app_tag_mask[1] = 0x0; } int qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, uint32_t *partial) { struct scatterlist *sg; uint32_t cumulative_partial, sg_len; dma_addr_t sg_dma_addr; if (sgx->num_bytes == sgx->tot_bytes) return 0; sg = sgx->cur_sg; cumulative_partial = sgx->tot_partial; sg_dma_addr = sg_dma_address(sg); sg_len = sg_dma_len(sg); sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { sgx->dma_len = (blk_sz - cumulative_partial); sgx->tot_partial = 0; sgx->num_bytes += blk_sz; *partial = 0; } else { sgx->dma_len = sg_len - sgx->bytes_consumed; sgx->tot_partial += sgx->dma_len; *partial = 1; } sgx->bytes_consumed += sgx->dma_len; if (sg_len == sgx->bytes_consumed) { sg = sg_next(sg); sgx->num_sg++; sgx->cur_sg = sg; sgx->bytes_consumed = 0; } return 1; } int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) { void *next_dsd; uint8_t avail_dsds = 0; uint32_t dsd_list_len; struct dsd_dma *dsd_ptr; struct scatterlist *sg_prot; struct dsd64 *cur_dsd = dsd; uint16_t used_dsds = tot_dsds; uint32_t prot_int; /* protection interval */ uint32_t partial; struct qla2_sgx sgx; dma_addr_t sle_dma; uint32_t sle_dma_len, tot_prot_dma_len = 0; struct scsi_cmnd *cmd; memset(&sgx, 0, sizeof(struct qla2_sgx)); if (sp) { cmd = GET_CMD_SP(sp); prot_int = scsi_prot_interval(cmd); sgx.tot_bytes = scsi_bufflen(cmd); sgx.cur_sg = scsi_sglist(cmd); sgx.sp = sp; sg_prot = scsi_prot_sglist(cmd); } else if (tc) { prot_int = tc->blk_sz; sgx.tot_bytes = tc->bufflen; sgx.cur_sg = tc->sg; sg_prot = tc->prot_sg; } else { BUG(); return 1; } while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { sle_dma = sgx.dma_addr; sle_dma_len = sgx.dma_len; alloc_and_fill: /* Allocate additional continuation packets? */ if (avail_dsds == 0) { avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? QLA_DSDS_PER_IOCB : used_dsds; dsd_list_len = (avail_dsds + 1) * 12; used_dsds -= avail_dsds; /* allocate tracking DS */ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); if (!dsd_ptr) return 1; /* allocate new list */ dsd_ptr->dsd_addr = next_dsd = dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &dsd_ptr->dsd_list_dma); if (!next_dsd) { /* * Need to cleanup only this dsd_ptr, rest * will be done by sp_free_dma() */ kfree(dsd_ptr); return 1; } if (sp) { list_add_tail(&dsd_ptr->list, &sp->u.scmd.crc_ctx->dsd_list); sp->flags |= SRB_CRC_CTX_DSD_VALID; } else { list_add_tail(&dsd_ptr->list, &(tc->ctx->dsd_list)); *tc->ctx_dsd_alloced = 1; } /* add new list to cmd iocb or last list */ put_unaligned_le64(dsd_ptr->dsd_list_dma, &cur_dsd->address); cur_dsd->length = cpu_to_le32(dsd_list_len); cur_dsd = next_dsd; } put_unaligned_le64(sle_dma, &cur_dsd->address); cur_dsd->length = cpu_to_le32(sle_dma_len); cur_dsd++; avail_dsds--; if (partial == 0) { /* Got a full protection interval */ sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; sle_dma_len = 8; tot_prot_dma_len += sle_dma_len; if (tot_prot_dma_len == sg_dma_len(sg_prot)) { tot_prot_dma_len = 0; sg_prot = sg_next(sg_prot); } partial = 1; /* So as to not re-enter this block */ goto alloc_and_fill; } } /* Null termination */ cur_dsd->address = 0; cur_dsd->length = 0; cur_dsd++; return 0; } int qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) { void *next_dsd; uint8_t avail_dsds = 0; uint32_t dsd_list_len; struct dsd_dma *dsd_ptr; struct scatterlist *sg, *sgl; struct dsd64 *cur_dsd = dsd; int i; uint16_t used_dsds = tot_dsds; struct scsi_cmnd *cmd; if (sp) { cmd = GET_CMD_SP(sp); sgl = scsi_sglist(cmd); } else if (tc) { sgl = tc->sg; } else { BUG(); return 1; } for_each_sg(sgl, sg, tot_dsds, i) { /* Allocate additional continuation packets? */ if (avail_dsds == 0) { avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? QLA_DSDS_PER_IOCB : used_dsds; dsd_list_len = (avail_dsds + 1) * 12; used_dsds -= avail_dsds; /* allocate tracking DS */ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); if (!dsd_ptr) return 1; /* allocate new list */ dsd_ptr->dsd_addr = next_dsd = dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &dsd_ptr->dsd_list_dma); if (!next_dsd) { /* * Need to cleanup only this dsd_ptr, rest * will be done by sp_free_dma() */ kfree(dsd_ptr); return 1; } if (sp) { list_add_tail(&dsd_ptr->list, &sp->u.scmd.crc_ctx->dsd_list); sp->flags |= SRB_CRC_CTX_DSD_VALID; } else { list_add_tail(&dsd_ptr->list, &(tc->ctx->dsd_list)); *tc->ctx_dsd_alloced = 1; } /* add new list to cmd iocb or last list */ put_unaligned_le64(dsd_ptr->dsd_list_dma, &cur_dsd->address); cur_dsd->length = cpu_to_le32(dsd_list_len); cur_dsd = next_dsd; } append_dsd64(&cur_dsd, sg); avail_dsds--; } /* Null termination */ cur_dsd->address = 0; cur_dsd->length = 0; cur_dsd++; return 0; } int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) { struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd; struct scatterlist *sg, *sgl; struct crc_context *difctx = NULL; struct scsi_qla_host *vha; uint dsd_list_len; uint avail_dsds = 0; uint used_dsds = tot_dsds; bool dif_local_dma_alloc = false; bool direction_to_device = false; int i; if (sp) { struct scsi_cmnd *cmd = GET_CMD_SP(sp); sgl = scsi_prot_sglist(cmd); vha = sp->vha; difctx = sp->u.scmd.crc_ctx; direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n", __func__, cmd, difctx, sp); } else if (tc) { vha = tc->vha; sgl = tc->prot_sg; difctx = tc->ctx; direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE; } else { BUG(); return 1; } ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, "%s: enter (write=%u)\n", __func__, direction_to_device); /* if initiator doing write or target doing read */ if (direction_to_device) { for_each_sg(sgl, sg, tot_dsds, i) { u64 sle_phys = sg_phys(sg); /* If SGE addr + len flips bits in upper 32-bits */ if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) { ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022, "%s: page boundary crossing (phys=%llx len=%x)\n", __func__, sle_phys, sg->length); if (difctx) { ha->dif_bundle_crossed_pages++; dif_local_dma_alloc = true; } else { ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022, "%s: difctx pointer is NULL\n", __func__); } break; } } ha->dif_bundle_writes++; } else { ha->dif_bundle_reads++; } if (ql2xdifbundlinginternalbuffers) dif_local_dma_alloc = direction_to_device; if (dif_local_dma_alloc) { u32 track_difbundl_buf = 0; u32 ldma_sg_len = 0; u8 ldma_needed = 1; difctx->no_dif_bundl = 0; difctx->dif_bundl_len = 0; /* Track DSD buffers */ INIT_LIST_HEAD(&difctx->ldif_dsd_list); /* Track local DMA buffers */ INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list); for_each_sg(sgl, sg, tot_dsds, i) { u32 sglen = sg_dma_len(sg); ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023, "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n", __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len, difctx->dif_bundl_len, ldma_needed); while (sglen) { u32 xfrlen = 0; if (ldma_needed) { /* * Allocate list item to store * the DMA buffers */ dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); if (!dsd_ptr) { ql_dbg(ql_dbg_tgt, vha, 0xe024, "%s: failed alloc dsd_ptr\n", __func__); return 1; } ha->dif_bundle_kallocs++; /* allocate dma buffer */ dsd_ptr->dsd_addr = dma_pool_alloc (ha->dif_bundl_pool, GFP_ATOMIC, &dsd_ptr->dsd_list_dma); if (!dsd_ptr->dsd_addr) { ql_dbg(ql_dbg_tgt, vha, 0xe024, "%s: failed alloc ->dsd_ptr\n", __func__); /* * need to cleanup only this * dsd_ptr rest will be done * by sp_free_dma() */ kfree(dsd_ptr); ha->dif_bundle_kallocs--; return 1; } ha->dif_bundle_dma_allocs++; ldma_needed = 0; difctx->no_dif_bundl++; list_add_tail(&dsd_ptr->list, &difctx->ldif_dma_hndl_list); } /* xfrlen is min of dma pool size and sglen */ xfrlen = (sglen > (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ? DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len : sglen; /* replace with local allocated dma buffer */ sg_pcopy_to_buffer(sgl, sg_nents(sgl), dsd_ptr->dsd_addr + ldma_sg_len, xfrlen, difctx->dif_bundl_len); difctx->dif_bundl_len += xfrlen; sglen -= xfrlen; ldma_sg_len += xfrlen; if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE || sg_is_last(sg)) { ldma_needed = 1; ldma_sg_len = 0; } } } track_difbundl_buf = used_dsds = difctx->no_dif_bundl; ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025, "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n", difctx->dif_bundl_len, difctx->no_dif_bundl, track_difbundl_buf); if (sp) sp->flags |= SRB_DIF_BUNDL_DMA_VALID; else tc->prot_flags = DIF_BUNDL_DMA_VALID; list_for_each_entry_safe(dif_dsd, nxt_dsd, &difctx->ldif_dma_hndl_list, list) { u32 sglen = (difctx->dif_bundl_len > DIF_BUNDLING_DMA_POOL_SIZE) ? DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len; BUG_ON(track_difbundl_buf == 0); /* Allocate additional continuation packets? */ if (avail_dsds == 0) { ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe024, "%s: adding continuation iocb's\n", __func__); avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? QLA_DSDS_PER_IOCB : used_dsds; dsd_list_len = (avail_dsds + 1) * 12; used_dsds -= avail_dsds; /* allocate tracking DS */ dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); if (!dsd_ptr) { ql_dbg(ql_dbg_tgt, vha, 0xe026, "%s: failed alloc dsd_ptr\n", __func__); return 1; } ha->dif_bundle_kallocs++; difctx->no_ldif_dsd++; /* allocate new list */ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &dsd_ptr->dsd_list_dma); if (!dsd_ptr->dsd_addr) { ql_dbg(ql_dbg_tgt, vha, 0xe026, "%s: failed alloc ->dsd_addr\n", __func__); /* * need to cleanup only this dsd_ptr * rest will be done by sp_free_dma() */ kfree(dsd_ptr); ha->dif_bundle_kallocs--; return 1; } ha->dif_bundle_dma_allocs++; if (sp) { list_add_tail(&dsd_ptr->list, &difctx->ldif_dsd_list); sp->flags |= SRB_CRC_CTX_DSD_VALID; } else { list_add_tail(&dsd_ptr->list, &difctx->ldif_dsd_list); tc->ctx_dsd_alloced = 1; } /* add new list to cmd iocb or last list */ put_unaligned_le64(dsd_ptr->dsd_list_dma, &cur_dsd->address); cur_dsd->length = cpu_to_le32(dsd_list_len); cur_dsd = dsd_ptr->dsd_addr; } put_unaligned_le64(dif_dsd->dsd_list_dma, &cur_dsd->address); cur_dsd->length = cpu_to_le32(sglen); cur_dsd++; avail_dsds--; difctx->dif_bundl_len -= sglen; track_difbundl_buf--; } ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026, "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__, difctx->no_ldif_dsd, difctx->no_dif_bundl); } else { for_each_sg(sgl, sg, tot_dsds, i) { /* Allocate additional continuation packets? */ if (avail_dsds == 0) { avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? QLA_DSDS_PER_IOCB : used_dsds; dsd_list_len = (avail_dsds + 1) * 12; used_dsds -= avail_dsds; /* allocate tracking DS */ dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); if (!dsd_ptr) { ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe027, "%s: failed alloc dsd_dma...\n", __func__); return 1; } /* allocate new list */ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &dsd_ptr->dsd_list_dma); if (!dsd_ptr->dsd_addr) { /* need to cleanup only this dsd_ptr */ /* rest will be done by sp_free_dma() */ kfree(dsd_ptr); return 1; } if (sp) { list_add_tail(&dsd_ptr->list, &difctx->dsd_list); sp->flags |= SRB_CRC_CTX_DSD_VALID; } else { list_add_tail(&dsd_ptr->list, &difctx->dsd_list); tc->ctx_dsd_alloced = 1; } /* add new list to cmd iocb or last list */ put_unaligned_le64(dsd_ptr->dsd_list_dma, &cur_dsd->address); cur_dsd->length = cpu_to_le32(dsd_list_len); cur_dsd = dsd_ptr->dsd_addr; } append_dsd64(&cur_dsd, sg); avail_dsds--; } } /* Null termination */ cur_dsd->address = 0; cur_dsd->length = 0; cur_dsd++; return 0; } /** * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command * Type 6 IOCB types. * * @sp: SRB command to process * @cmd_pkt: Command type 3 IOCB * @tot_dsds: Total number of segments to transfer * @tot_prot_dsds: Total number of segments with protection information * @fw_prot_opts: Protection options to be passed to firmware */ static inline int qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) { struct dsd64 *cur_dsd; __be32 *fcp_dl; scsi_qla_host_t *vha; struct scsi_cmnd *cmd; uint32_t total_bytes = 0; uint32_t data_bytes; uint32_t dif_bytes; uint8_t bundling = 1; uint16_t blk_size; struct crc_context *crc_ctx_pkt = NULL; struct qla_hw_data *ha; uint8_t additional_fcpcdb_len; uint16_t fcp_cmnd_len; struct fcp_cmnd *fcp_cmnd; dma_addr_t crc_ctx_dma; cmd = GET_CMD_SP(sp); /* Update entry type to indicate Command Type CRC_2 IOCB */ put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type); vha = sp->vha; ha = vha->hw; /* No data transfer */ data_bytes = scsi_bufflen(cmd); if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { cmd_pkt->byte_count = cpu_to_le32(0); return QLA_SUCCESS; } cmd_pkt->vp_index = sp->vha->vp_idx; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); } if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) || (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) || (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT)) bundling = 0; /* Allocate CRC context from global pool */ crc_ctx_pkt = sp->u.scmd.crc_ctx = dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); if (!crc_ctx_pkt) goto crc_queuing_error; crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; sp->flags |= SRB_CRC_CTX_DMA_VALID; /* Set handle */ crc_ctx_pkt->handle = cmd_pkt->handle; INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) &crc_ctx_pkt->ref_tag, tot_prot_dsds); put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address); cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); /* Determine SCSI command length -- align to 4 byte boundary */ if (cmd->cmd_len > 16) { additional_fcpcdb_len = cmd->cmd_len - 16; if ((cmd->cmd_len % 4) != 0) { /* SCSI cmd > 16 bytes must be multiple of 4 */ goto crc_queuing_error; } fcp_cmnd_len = 12 + cmd->cmd_len + 4; } else { additional_fcpcdb_len = 0; fcp_cmnd_len = 12 + 16 + 4; } fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; if (cmd->sc_data_direction == DMA_TO_DEVICE) fcp_cmnd->additional_cdb_len |= 1; else if (cmd->sc_data_direction == DMA_FROM_DEVICE) fcp_cmnd->additional_cdb_len |= 2; int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF, &cmd_pkt->fcp_cmnd_dseg_address); fcp_cmnd->task_management = 0; fcp_cmnd->task_attribute = TSK_SIMPLE; cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ /* Compute dif len and adjust data len to incude protection */ dif_bytes = 0; blk_size = cmd->device->sector_size; dif_bytes = (data_bytes / blk_size) * 8; switch (scsi_get_prot_op(GET_CMD_SP(sp))) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: total_bytes = data_bytes; data_bytes += dif_bytes; break; case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: total_bytes = data_bytes + dif_bytes; break; default: BUG(); } if (!qla2x00_hba_err_chk_enabled(sp)) fw_prot_opts |= 0x10; /* Disable Guard tag checking */ /* HBA error checking enabled */ else if (IS_PI_UNINIT_CAPABLE(ha)) { if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1) || (scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE2)) fw_prot_opts |= BIT_10; else if (scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE3) fw_prot_opts |= BIT_11; } if (!bundling) { cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; } else { /* * Configure Bundling if we need to fetch interlaving * protection PCI accesses */ fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - tot_prot_dsds); cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; } /* Finish the common fields of CRC pkt */ crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); crc_ctx_pkt->guard_seed = cpu_to_le16(0); /* Fibre channel byte count */ cmd_pkt->byte_count = cpu_to_le32(total_bytes); fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + additional_fcpcdb_len); *fcp_dl = htonl(total_bytes); if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { cmd_pkt->byte_count = cpu_to_le32(0); return QLA_SUCCESS; } /* Walks data segments */ cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); if (!bundling && tot_prot_dsds) { if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, cur_dsd, tot_dsds, NULL)) goto crc_queuing_error; } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, (tot_dsds - tot_prot_dsds), NULL)) goto crc_queuing_error; if (bundling && tot_prot_dsds) { /* Walks dif segments */ cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, tot_prot_dsds, NULL)) goto crc_queuing_error; } return QLA_SUCCESS; crc_queuing_error: /* Cleanup will be performed by the caller */ return QLA_FUNCTION_FAILED; } /** * qla24xx_start_scsi() - Send a SCSI command to the ISP * @sp: command to send to the ISP * * Returns non-zero if a failure occurred, else zero. */ int qla24xx_start_scsi(srb_t *sp) { int nseg; unsigned long flags; uint32_t *clr_ptr; uint32_t handle; struct cmd_type_7 *cmd_pkt; uint16_t cnt; uint16_t req_cnt; uint16_t tot_dsds; struct req_que *req = NULL; struct rsp_que *rsp; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE)) return qla28xx_start_scsi_edif(sp); /* Setup device pointers. */ req = vha->req; rsp = req->rsp; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; /* Send marker if required */ if (vha->marker_needed != 0) { if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) return QLA_FUNCTION_FAILED; vha->marker_needed = 0; } /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); handle = qla2xxx_get_next_handle(req); if (handle == 0) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ if (scsi_sg_count(cmd)) { nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); if (unlikely(!nseg)) goto queuing_error; } else nseg = 0; tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = req_cnt; if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { if (IS_SHADOW_REG_CAPABLE(ha)) { cnt = *req->out_ptr; } else { cnt = rd_reg_dword_relaxed(req->req_q_out); if (qla2x00_check_reg16_for_disconnect(vha, cnt)) goto queuing_error; } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); if (req->cnt < (req_cnt + 2)) goto queuing_error; } /* Build command packet. */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; cmd_pkt->handle = make_handle(req->id, handle); /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); /* Set NPORT-ID and LUN number*/ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; cmd_pkt->vp_index = sp->vha->vp_idx; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); cmd_pkt->task = TSK_SIMPLE; /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Build IOCB segments */ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; wmb(); /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else req->ring_ptr++; sp->qpair->cmd_cnt++; sp->flags |= SRB_DMA_VALID; /* Set chip new ring index. */ wrt_reg_dword(req->req_q_in, req->ring_index); /* Manage unprocessed RIO/ZIO commands in response queue. */ if (vha->flags.process_response_queue && rsp->ring_ptr->signature != RESPONSE_PROCESSED) qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_FUNCTION_FAILED; } /** * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP * @sp: command to send to the ISP * * Returns non-zero if a failure occurred, else zero. */ int qla24xx_dif_start_scsi(srb_t *sp) { int nseg; unsigned long flags; uint32_t *clr_ptr; uint32_t handle; uint16_t cnt; uint16_t req_cnt = 0; uint16_t tot_dsds; uint16_t tot_prot_dsds; uint16_t fw_prot_opts = 0; struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct cmd_type_crc_2 *cmd_pkt; uint32_t status = 0; #define QDSS_GOT_Q_SPACE BIT_0 /* Only process protection or >16 cdb in this routine */ if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { if (cmd->cmd_len <= 16) return qla24xx_start_scsi(sp); else return qla_start_scsi_type6(sp); } /* Setup device pointers. */ req = vha->req; rsp = req->rsp; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; /* Send marker if required */ if (vha->marker_needed != 0) { if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) return QLA_FUNCTION_FAILED; vha->marker_needed = 0; } /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); handle = qla2xxx_get_next_handle(req); if (handle == 0) goto queuing_error; /* Compute number of required data segments */ /* Map the sg table so we have an accurate count of sg entries needed */ if (scsi_sg_count(cmd)) { nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); if (unlikely(!nseg)) goto queuing_error; else sp->flags |= SRB_DMA_VALID; if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { struct qla2_sgx sgx; uint32_t partial; memset(&sgx, 0, sizeof(struct qla2_sgx)); sgx.tot_bytes = scsi_bufflen(cmd); sgx.cur_sg = scsi_sglist(cmd); sgx.sp = sp; nseg = 0; while (qla24xx_get_one_block_sg( cmd->device->sector_size, &sgx, &partial)) nseg++; } } else nseg = 0; /* number of required data segments */ tot_dsds = nseg; /* Compute number of required protection segments */ if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), scsi_prot_sg_count(cmd), cmd->sc_data_direction); if (unlikely(!nseg)) goto queuing_error; else sp->flags |= SRB_CRC_PROT_DMA_VALID; if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { nseg = scsi_bufflen(cmd) / cmd->device->sector_size; } } else { nseg = 0; } req_cnt = 1; /* Total Data and protection sg segment(s) */ tot_prot_dsds = nseg; tot_dsds += nseg; sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { if (IS_SHADOW_REG_CAPABLE(ha)) { cnt = *req->out_ptr; } else { cnt = rd_reg_dword_relaxed(req->req_q_out); if (qla2x00_check_reg16_for_disconnect(vha, cnt)) goto queuing_error; } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); if (req->cnt < (req_cnt + 2)) goto queuing_error; } status |= QDSS_GOT_Q_SPACE; /* Build header part of command packet (excluding the OPCODE). */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; /* Fill-in common area */ cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; cmd_pkt->handle = make_handle(req->id, handle); clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); /* Set NPORT-ID and LUN number*/ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); /* Total Data and protection segment(s) */ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); /* Build IOCB segments and adjust for data protection segments */ if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != QLA_SUCCESS) goto queuing_error; cmd_pkt->entry_count = (uint8_t)req_cnt; /* Specify response queue number where completion should happen */ cmd_pkt->entry_status = (uint8_t) rsp->id; cmd_pkt->timeout = cpu_to_le16(0); wmb(); /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else req->ring_ptr++; sp->qpair->cmd_cnt++; /* Set chip new ring index. */ wrt_reg_dword(req->req_q_in, req->ring_index); /* Manage unprocessed RIO/ZIO commands in response queue. */ if (vha->flags.process_response_queue && rsp->ring_ptr->signature != RESPONSE_PROCESSED) qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; queuing_error: if (status & QDSS_GOT_Q_SPACE) { req->outstanding_cmds[handle] = NULL; req->cnt += req_cnt; } /* Cleanup will be performed by the caller (queuecommand) */ qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_FUNCTION_FAILED; } /** * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP * @sp: command to send to the ISP * * Returns non-zero if a failure occurred, else zero. */ static int qla2xxx_start_scsi_mq(srb_t *sp) { int nseg; unsigned long flags; uint32_t *clr_ptr; uint32_t handle; struct cmd_type_7 *cmd_pkt; uint16_t cnt; uint16_t req_cnt; uint16_t tot_dsds; struct req_que *req = NULL; struct rsp_que *rsp; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; struct qla_qpair *qpair = sp->qpair; if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE)) return qla28xx_start_scsi_edif(sp); /* Acquire qpair specific lock */ spin_lock_irqsave(&qpair->qp_lock, flags); /* Setup qpair pointers */ req = qpair->req; rsp = qpair->rsp; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; /* Send marker if required */ if (vha->marker_needed != 0) { if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; } vha->marker_needed = 0; } handle = qla2xxx_get_next_handle(req); if (handle == 0) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ if (scsi_sg_count(cmd)) { nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); if (unlikely(!nseg)) goto queuing_error; } else nseg = 0; tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = req_cnt; if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { if (IS_SHADOW_REG_CAPABLE(ha)) { cnt = *req->out_ptr; } else { cnt = rd_reg_dword_relaxed(req->req_q_out); if (qla2x00_check_reg16_for_disconnect(vha, cnt)) goto queuing_error; } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); if (req->cnt < (req_cnt + 2)) goto queuing_error; } /* Build command packet. */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; cmd_pkt->handle = make_handle(req->id, handle); /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); /* Set NPORT-ID and LUN number*/ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; cmd_pkt->vp_index = sp->fcport->vha->vp_idx; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); cmd_pkt->task = TSK_SIMPLE; /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Build IOCB segments */ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; wmb(); /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else req->ring_ptr++; sp->qpair->cmd_cnt++; sp->flags |= SRB_DMA_VALID; /* Set chip new ring index. */ wrt_reg_dword(req->req_q_in, req->ring_index); /* Manage unprocessed RIO/ZIO commands in response queue. */ if (vha->flags.process_response_queue && rsp->ring_ptr->signature != RESPONSE_PROCESSED) qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_SUCCESS; queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; } /** * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP * @sp: command to send to the ISP * * Returns non-zero if a failure occurred, else zero. */ int qla2xxx_dif_start_scsi_mq(srb_t *sp) { int nseg; unsigned long flags; uint32_t *clr_ptr; uint32_t handle; uint16_t cnt; uint16_t req_cnt = 0; uint16_t tot_dsds; uint16_t tot_prot_dsds; uint16_t fw_prot_opts = 0; struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; struct cmd_type_crc_2 *cmd_pkt; uint32_t status = 0; struct qla_qpair *qpair = sp->qpair; #define QDSS_GOT_Q_SPACE BIT_0 /* Check for host side state */ if (!qpair->online) { cmd->result = DID_NO_CONNECT << 16; return QLA_INTERFACE_ERROR; } if (!qpair->difdix_supported && scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { cmd->result = DID_NO_CONNECT << 16; return QLA_INTERFACE_ERROR; } /* Only process protection or >16 cdb in this routine */ if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { if (cmd->cmd_len <= 16) return qla2xxx_start_scsi_mq(sp); else return qla_start_scsi_type6(sp); } spin_lock_irqsave(&qpair->qp_lock, flags); /* Setup qpair pointers */ rsp = qpair->rsp; req = qpair->req; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; /* Send marker if required */ if (vha->marker_needed != 0) { if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; } vha->marker_needed = 0; } handle = qla2xxx_get_next_handle(req); if (handle == 0) goto queuing_error; /* Compute number of required data segments */ /* Map the sg table so we have an accurate count of sg entries needed */ if (scsi_sg_count(cmd)) { nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); if (unlikely(!nseg)) goto queuing_error; else sp->flags |= SRB_DMA_VALID; if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { struct qla2_sgx sgx; uint32_t partial; memset(&sgx, 0, sizeof(struct qla2_sgx)); sgx.tot_bytes = scsi_bufflen(cmd); sgx.cur_sg = scsi_sglist(cmd); sgx.sp = sp; nseg = 0; while (qla24xx_get_one_block_sg( cmd->device->sector_size, &sgx, &partial)) nseg++; } } else nseg = 0; /* number of required data segments */ tot_dsds = nseg; /* Compute number of required protection segments */ if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), scsi_prot_sg_count(cmd), cmd->sc_data_direction); if (unlikely(!nseg)) goto queuing_error; else sp->flags |= SRB_CRC_PROT_DMA_VALID; if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { nseg = scsi_bufflen(cmd) / cmd->device->sector_size; } } else { nseg = 0; } req_cnt = 1; /* Total Data and protection sg segment(s) */ tot_prot_dsds = nseg; tot_dsds += nseg; sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { if (IS_SHADOW_REG_CAPABLE(ha)) { cnt = *req->out_ptr; } else { cnt = rd_reg_dword_relaxed(req->req_q_out); if (qla2x00_check_reg16_for_disconnect(vha, cnt)) goto queuing_error; } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); if (req->cnt < (req_cnt + 2)) goto queuing_error; } status |= QDSS_GOT_Q_SPACE; /* Build header part of command packet (excluding the OPCODE). */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; /* Fill-in common area */ cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; cmd_pkt->handle = make_handle(req->id, handle); clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); /* Set NPORT-ID and LUN number*/ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); /* Total Data and protection segment(s) */ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); /* Build IOCB segments and adjust for data protection segments */ if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != QLA_SUCCESS) goto queuing_error; cmd_pkt->entry_count = (uint8_t)req_cnt; cmd_pkt->timeout = cpu_to_le16(0); wmb(); /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else req->ring_ptr++; sp->qpair->cmd_cnt++; /* Set chip new ring index. */ wrt_reg_dword(req->req_q_in, req->ring_index); /* Manage unprocessed RIO/ZIO commands in response queue. */ if (vha->flags.process_response_queue && rsp->ring_ptr->signature != RESPONSE_PROCESSED) qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_SUCCESS; queuing_error: if (status & QDSS_GOT_Q_SPACE) { req->outstanding_cmds[handle] = NULL; req->cnt += req_cnt; } /* Cleanup will be performed by the caller (queuecommand) */ qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; } /* Generic Control-SRB manipulation functions. */ /* hardware_lock assumed to be held. */ void * __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) { scsi_qla_host_t *vha = qpair->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req = qpair->req; device_reg_t *reg = ISP_QUE_REG(ha, req->id); uint32_t handle; request_t *pkt; uint16_t cnt, req_cnt; pkt = NULL; req_cnt = 1; handle = 0; if (sp && (sp->type != SRB_SCSI_CMD)) { /* Adjust entry-counts as needed. */ req_cnt = sp->iocbs; } /* Check for room on request queue. */ if (req->cnt < req_cnt + 2) { if (qpair->use_shadow_reg) cnt = *req->out_ptr; else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) cnt = rd_reg_dword(&reg->isp25mq.req_q_out); else if (IS_P3P_TYPE(ha)) cnt = rd_reg_dword(reg->isp82.req_q_out); else if (IS_FWI2_CAPABLE(ha)) cnt = rd_reg_dword(&reg->isp24.req_q_out); else if (IS_QLAFX00(ha)) cnt = rd_reg_dword(&reg->ispfx00.req_q_out); else cnt = qla2x00_debounce_register( ISP_REQ_Q_OUT(ha, &reg->isp)); if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) { qla_schedule_eeh_work(vha); return NULL; } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); } if (req->cnt < req_cnt + 2) goto queuing_error; if (sp) { handle = qla2xxx_get_next_handle(req); if (handle == 0) { ql_log(ql_log_warn, vha, 0x700b, "No room on outstanding cmd array.\n"); goto queuing_error; } /* Prep command array. */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; sp->handle = handle; } /* Prep packet */ req->cnt -= req_cnt; pkt = req->ring_ptr; memset(pkt, 0, REQUEST_ENTRY_SIZE); if (IS_QLAFX00(ha)) { wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt); wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle); } else { pkt->entry_count = req_cnt; pkt->handle = handle; } return pkt; queuing_error: qpair->tgt_counters.num_alloc_iocb_failed++; return pkt; } void * qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp) { scsi_qla_host_t *vha = qpair->vha; if (qla2x00_reset_active(vha)) return NULL; return __qla2x00_alloc_iocbs(qpair, sp); } void * qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp) { return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp); } static void qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio) { struct srb_iocb *lio = &sp->u.iocb_cmd; logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI); if (sp->vha->flags.nvme_first_burst) logio->io_parameter[0] = cpu_to_le32(NVME_PRLI_SP_FIRST_BURST); if (sp->vha->flags.nvme2_enabled) { /* Set service parameter BIT_7 for NVME CONF support */ logio->io_parameter[0] |= cpu_to_le32(NVME_PRLI_SP_CONF); /* Set service parameter BIT_8 for SLER support */ logio->io_parameter[0] |= cpu_to_le32(NVME_PRLI_SP_SLER); /* Set service parameter BIT_9 for PI control support */ logio->io_parameter[0] |= cpu_to_le32(NVME_PRLI_SP_PI_CTRL); } } logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; logio->port_id[2] = sp->fcport->d_id.b.domain; logio->vp_index = sp->vha->vp_idx; } static void qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) { struct srb_iocb *lio = &sp->u.iocb_cmd; logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) { logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); } else { logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); if (lio->u.logio.flags & SRB_LOGIN_FCSP) { logio->control_flags |= cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI); logio->io_parameter[0] = cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO); } } logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; logio->port_id[2] = sp->fcport->d_id.b.domain; logio->vp_index = sp->vha->vp_idx; } static void qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) { struct qla_hw_data *ha = sp->vha->hw; struct srb_iocb *lio = &sp->u.iocb_cmd; uint16_t opts; mbx->entry_type = MBX_IOCB_TYPE; SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0; if (HAS_EXTENDED_IDS(ha)) { mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); mbx->mb10 = cpu_to_le16(opts); } else { mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); } mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | sp->fcport->d_id.b.al_pa); mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); } static void qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) { u16 control_flags = LCF_COMMAND_LOGO; logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; if (sp->fcport->explicit_logout) { control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; } else { control_flags |= LCF_IMPL_LOGO; if (!sp->fcport->keep_nport_handle) control_flags |= LCF_FREE_NPORT; } logio->control_flags = cpu_to_le16(control_flags); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; logio->port_id[2] = sp->fcport->d_id.b.domain; logio->vp_index = sp->vha->vp_idx; } static void qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) { struct qla_hw_data *ha = sp->vha->hw; mbx->entry_type = MBX_IOCB_TYPE; SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); mbx->mb1 = HAS_EXTENDED_IDS(ha) ? cpu_to_le16(sp->fcport->loop_id) : cpu_to_le16(sp->fcport->loop_id << 8); mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | sp->fcport->d_id.b.al_pa); mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); /* Implicit: mbx->mbx10 = 0. */ } static void qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) { logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->vp_index = sp->vha->vp_idx; } static void qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) { struct qla_hw_data *ha = sp->vha->hw; mbx->entry_type = MBX_IOCB_TYPE; SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE); if (HAS_EXTENDED_IDS(ha)) { mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); mbx->mb10 = cpu_to_le16(BIT_0); } else { mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0); } mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma)); mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); } static void qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) { uint32_t flags; uint64_t lun; struct fc_port *fcport = sp->fcport; scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; struct srb_iocb *iocb = &sp->u.iocb_cmd; struct req_que *req = sp->qpair->req; flags = iocb->u.tmf.flags; lun = iocb->u.tmf.lun; tsk->entry_type = TSK_MGMT_IOCB_TYPE; tsk->entry_count = 1; tsk->handle = make_handle(req->id, tsk->handle); tsk->nport_handle = cpu_to_le16(fcport->loop_id); tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); tsk->control_flags = cpu_to_le32(flags); tsk->port_id[0] = fcport->d_id.b.al_pa; tsk->port_id[1] = fcport->d_id.b.area; tsk->port_id[2] = fcport->d_id.b.domain; tsk->vp_index = fcport->vha->vp_idx; if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET| TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { int_to_scsilun(lun, &tsk->lun); host_to_fcp_swap((uint8_t *)&tsk->lun, sizeof(tsk->lun)); } } static void qla2x00_async_done(struct srb *sp, int res) { if (del_timer(&sp->u.iocb_cmd.timer)) { /* * Successfully cancelled the timeout handler * ref: TMR */ if (kref_put(&sp->cmd_kref, qla2x00_sp_release)) return; } sp->async_done(sp, res); } void qla2x00_sp_release(struct kref *kref) { struct srb *sp = container_of(kref, struct srb, cmd_kref); sp->free(sp); } void qla2x00_init_async_sp(srb_t *sp, unsigned long tmo, void (*done)(struct srb *sp, int res)) { timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); sp->done = qla2x00_async_done; sp->async_done = done; sp->free = qla2x00_sp_free; sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); sp->start_timer = 1; } static void qla2x00_els_dcmd_sp_free(srb_t *sp) { struct srb_iocb *elsio = &sp->u.iocb_cmd; kfree(sp->fcport); if (elsio->u.els_logo.els_logo_pyld) dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, elsio->u.els_logo.els_logo_pyld, elsio->u.els_logo.els_logo_pyld_dma); del_timer(&elsio->timer); qla2x00_rel_sp(sp); } static void qla2x00_els_dcmd_iocb_timeout(void *data) { srb_t *sp = data; fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; unsigned long flags = 0; int res, h; ql_dbg(ql_dbg_io, vha, 0x3069, "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); /* Abort the exchange */ res = qla24xx_async_abort_cmd(sp, false); if (res) { ql_dbg(ql_dbg_io, vha, 0x3070, "mbx abort_command failed.\n"); spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { if (sp->qpair->req->outstanding_cmds[h] == sp) { sp->qpair->req->outstanding_cmds[h] = NULL; break; } } spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); complete(&lio->u.els_logo.comp); } else { ql_dbg(ql_dbg_io, vha, 0x3071, "mbx abort_command success.\n"); } } static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res) { fc_port_t *fcport = sp->fcport; struct srb_iocb *lio = &sp->u.iocb_cmd; struct scsi_qla_host *vha = sp->vha; ql_dbg(ql_dbg_io, vha, 0x3072, "%s hdl=%x, portid=%02x%02x%02x done\n", sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); complete(&lio->u.els_logo.comp); } int qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, port_id_t remote_did) { srb_t *sp; fc_port_t *fcport = NULL; struct srb_iocb *elsio = NULL; struct qla_hw_data *ha = vha->hw; struct els_logo_payload logo_pyld; int rval = QLA_SUCCESS; fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (!fcport) { ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n"); return -ENOMEM; } /* Alloc SRB structure * ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { kfree(fcport); ql_log(ql_log_info, vha, 0x70e6, "SRB allocation failed\n"); return -ENOMEM; } elsio = &sp->u.iocb_cmd; fcport->loop_id = 0xFFFF; fcport->d_id.b.domain = remote_did.b.domain; fcport->d_id.b.area = remote_did.b.area; fcport->d_id.b.al_pa = remote_did.b.al_pa; ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n", fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); sp->type = SRB_ELS_DCMD; sp->name = "ELS_DCMD"; sp->fcport = fcport; qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT, qla2x00_els_dcmd_sp_done); sp->free = qla2x00_els_dcmd_sp_free; sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout; init_completion(&sp->u.iocb_cmd.u.els_logo.comp); elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma, GFP_KERNEL); if (!elsio->u.els_logo.els_logo_pyld) { /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); return QLA_FUNCTION_FAILED; } memset(&logo_pyld, 0, sizeof(struct els_logo_payload)); elsio->u.els_logo.els_cmd = els_opcode; logo_pyld.opcode = els_opcode; logo_pyld.s_id[0] = vha->d_id.b.al_pa; logo_pyld.s_id[1] = vha->d_id.b.area; logo_pyld.s_id[2] = vha->d_id.b.domain; host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t)); memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE); memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, sizeof(struct els_logo_payload)); ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a, elsio->u.els_logo.els_logo_pyld, sizeof(*elsio->u.els_logo.els_logo_pyld)); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); return QLA_FUNCTION_FAILED; } ql_dbg(ql_dbg_io, vha, 0x3074, "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n", sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); wait_for_completion(&elsio->u.els_logo.comp); /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); return rval; } static void qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) { scsi_qla_host_t *vha = sp->vha; struct srb_iocb *elsio = &sp->u.iocb_cmd; els_iocb->entry_type = ELS_IOCB_TYPE; els_iocb->entry_count = 1; els_iocb->sys_define = 0; els_iocb->entry_status = 0; els_iocb->handle = sp->handle; els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); els_iocb->tx_dsd_count = cpu_to_le16(1); els_iocb->vp_index = vha->vp_idx; els_iocb->sof_type = EST_SOFI3; els_iocb->rx_dsd_count = 0; els_iocb->opcode = elsio->u.els_logo.els_cmd; els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; els_iocb->d_id[1] = sp->fcport->d_id.b.area; els_iocb->d_id[2] = sp->fcport->d_id.b.domain; /* For SID the byte order is different than DID */ els_iocb->s_id[1] = vha->d_id.b.al_pa; els_iocb->s_id[2] = vha->d_id.b.area; els_iocb->s_id[0] = vha->d_id.b.domain; if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { if (vha->hw->flags.edif_enabled) els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN); else els_iocb->control_flags = 0; els_iocb->tx_byte_count = els_iocb->tx_len = cpu_to_le32(sizeof(struct els_plogi_payload)); put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, &els_iocb->tx_address); els_iocb->rx_dsd_count = cpu_to_le16(1); els_iocb->rx_byte_count = els_iocb->rx_len = cpu_to_le32(sizeof(struct els_plogi_payload)); put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, &els_iocb->rx_address); ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI ELS IOCB:\n"); ql_dump_buffer(ql_log_info, vha, 0x0109, (uint8_t *)els_iocb, sizeof(*els_iocb)); } else { els_iocb->tx_byte_count = cpu_to_le32(sizeof(struct els_logo_payload)); put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, &els_iocb->tx_address); els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); els_iocb->rx_byte_count = 0; els_iocb->rx_address = 0; els_iocb->rx_len = 0; ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076, "LOGO ELS IOCB:"); ql_dump_buffer(ql_log_info, vha, 0x010b, els_iocb, sizeof(*els_iocb)); } sp->vha->qla_stats.control_requests++; } void qla2x00_els_dcmd2_iocb_timeout(void *data) { srb_t *sp = data; fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = sp->vha; unsigned long flags = 0; int res, h; ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069, "%s hdl=%x ELS Timeout, %8phC portid=%06x\n", sp->name, sp->handle, fcport->port_name, fcport->d_id.b24); /* Abort the exchange */ res = qla24xx_async_abort_cmd(sp, false); ql_dbg(ql_dbg_io, vha, 0x3070, "mbx abort_command %s\n", (res == QLA_SUCCESS) ? "successful" : "failed"); if (res) { spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { if (sp->qpair->req->outstanding_cmds[h] == sp) { sp->qpair->req->outstanding_cmds[h] = NULL; break; } } spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); sp->done(sp, QLA_FUNCTION_TIMEOUT); } } void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi) { if (els_plogi->els_plogi_pyld) dma_free_coherent(&vha->hw->pdev->dev, els_plogi->tx_size, els_plogi->els_plogi_pyld, els_plogi->els_plogi_pyld_dma); if (els_plogi->els_resp_pyld) dma_free_coherent(&vha->hw->pdev->dev, els_plogi->rx_size, els_plogi->els_resp_pyld, els_plogi->els_resp_pyld_dma); } static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) { fc_port_t *fcport = sp->fcport; struct srb_iocb *lio = &sp->u.iocb_cmd; struct scsi_qla_host *vha = sp->vha; struct event_arg ea; struct qla_work_evt *e; struct fc_port *conflict_fcport; port_id_t cid; /* conflict Nport id */ const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; u16 lid; ql_dbg(ql_dbg_disc, vha, 0x3072, "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name); fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/ fcport->logout_on_delete = 1; fcport->chip_reset = vha->hw->base_qpair->chip_reset; if (sp->flags & SRB_WAKEUP_ON_COMP) complete(&lio->u.els_plogi.comp); else { switch (le32_to_cpu(fw_status[0])) { case CS_DATA_UNDERRUN: case CS_COMPLETE: memset(&ea, 0, sizeof(ea)); ea.fcport = fcport; ea.rc = res; qla_handle_els_plogi_done(vha, &ea); break; case CS_IOCB_ERROR: switch (le32_to_cpu(fw_status[1])) { case LSC_SCODE_PORTID_USED: lid = le32_to_cpu(fw_status[2]) & 0xffff; qlt_find_sess_invalidate_other(vha, wwn_to_u64(fcport->port_name), fcport->d_id, lid, &conflict_fcport); if (conflict_fcport) { /* * Another fcport shares the same * loop_id & nport id; conflict * fcport needs to finish cleanup * before this fcport can proceed * to login. */ conflict_fcport->conflict = fcport; fcport->login_pause = 1; ql_dbg(ql_dbg_disc, vha, 0x20ed, "%s %d %8phC pid %06x inuse with lid %#x.\n", __func__, __LINE__, fcport->port_name, fcport->d_id.b24, lid); } else { ql_dbg(ql_dbg_disc, vha, 0x20ed, "%s %d %8phC pid %06x inuse with lid %#x sched del\n", __func__, __LINE__, fcport->port_name, fcport->d_id.b24, lid); qla2x00_clear_loop_id(fcport); set_bit(lid, vha->hw->loop_id_map); fcport->loop_id = lid; fcport->keep_nport_handle = 0; qlt_schedule_sess_for_deletion(fcport); } break; case LSC_SCODE_NPORT_USED: cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16) & 0xff; cid.b.area = (le32_to_cpu(fw_status[2]) >> 8) & 0xff; cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff; cid.b.rsvd_1 = 0; ql_dbg(ql_dbg_disc, vha, 0x20ec, "%s %d %8phC lid %#x in use with pid %06x post gnl\n", __func__, __LINE__, fcport->port_name, fcport->loop_id, cid.b24); set_bit(fcport->loop_id, vha->hw->loop_id_map); fcport->loop_id = FC_NO_LOOP_ID; qla24xx_post_gnl_work(vha, fcport); break; case LSC_SCODE_NOXCB: vha->hw->exch_starvation++; if (vha->hw->exch_starvation > 5) { ql_log(ql_log_warn, vha, 0xd046, "Exchange starvation. Resetting RISC\n"); vha->hw->exch_starvation = 0; set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); break; } fallthrough; default: ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", __func__, sp->fcport->port_name, fw_status[0], fw_status[1], fw_status[2]); fcport->flags &= ~FCF_ASYNC_SENT; qlt_schedule_sess_for_deletion(fcport); break; } break; default: ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n", __func__, sp->fcport->port_name, fw_status[0], fw_status[1], fw_status[2]); sp->fcport->flags &= ~FCF_ASYNC_SENT; qlt_schedule_sess_for_deletion(fcport); break; } e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); if (!e) { struct srb_iocb *elsio = &sp->u.iocb_cmd; qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); return; } e->u.iosb.sp = sp; qla2x00_post_work(vha, e); } } int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, fc_port_t *fcport, bool wait) { srb_t *sp; struct srb_iocb *elsio = NULL; struct qla_hw_data *ha = vha->hw; int rval = QLA_SUCCESS; void *ptr, *resp_ptr; /* Alloc SRB structure * ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { ql_log(ql_log_info, vha, 0x70e6, "SRB allocation failed\n"); fcport->flags &= ~FCF_ASYNC_ACTIVE; return -ENOMEM; } fcport->flags |= FCF_ASYNC_SENT; qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); elsio = &sp->u.iocb_cmd; ql_dbg(ql_dbg_io, vha, 0x3073, "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24); if (wait) sp->flags = SRB_WAKEUP_ON_COMP; sp->type = SRB_ELS_DCMD; sp->name = "ELS_DCMD"; sp->fcport = fcport; qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2, qla2x00_els_dcmd2_sp_done); sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout; elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE; ptr = elsio->u.els_plogi.els_plogi_pyld = dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size, &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL); if (!elsio->u.els_plogi.els_plogi_pyld) { rval = QLA_FUNCTION_FAILED; goto out; } resp_ptr = elsio->u.els_plogi.els_resp_pyld = dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size, &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL); if (!elsio->u.els_plogi.els_resp_pyld) { rval = QLA_FUNCTION_FAILED; goto out; } ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr); memset(ptr, 0, sizeof(struct els_plogi_payload)); memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); memcpy(elsio->u.els_plogi.els_plogi_pyld->data, (void *)&ha->plogi_els_payld + offsetof(struct fc_els_flogi, fl_csp), sizeof(ha->plogi_els_payld) - offsetof(struct fc_els_flogi, fl_csp)); elsio->u.els_plogi.els_cmd = els_opcode; elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) { struct fc_els_flogi *p = ptr; p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC); } ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, sizeof(*elsio->u.els_plogi.els_plogi_pyld)); init_completion(&elsio->u.els_plogi.comp); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_disc, vha, 0x3074, "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n", sp->name, sp->handle, fcport->loop_id, fcport->d_id.b24, vha->d_id.b24); } if (wait) { wait_for_completion(&elsio->u.els_plogi.comp); if (elsio->u.els_plogi.comp_status != CS_COMPLETE) rval = QLA_FUNCTION_FAILED; } else { goto done; } out: fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } /* it is assume qpair lock is held */ void qla_els_pt_iocb(struct scsi_qla_host *vha, struct els_entry_24xx *els_iocb, struct qla_els_pt_arg *a) { els_iocb->entry_type = ELS_IOCB_TYPE; els_iocb->entry_count = 1; els_iocb->sys_define = 0; els_iocb->entry_status = 0; els_iocb->handle = QLA_SKIP_HANDLE; els_iocb->nport_handle = a->nport_handle; els_iocb->rx_xchg_address = a->rx_xchg_address; els_iocb->tx_dsd_count = cpu_to_le16(1); els_iocb->vp_index = a->vp_idx; els_iocb->sof_type = EST_SOFI3; els_iocb->rx_dsd_count = cpu_to_le16(0); els_iocb->opcode = a->els_opcode; els_iocb->d_id[0] = a->did.b.al_pa; els_iocb->d_id[1] = a->did.b.area; els_iocb->d_id[2] = a->did.b.domain; /* For SID the byte order is different than DID */ els_iocb->s_id[1] = vha->d_id.b.al_pa; els_iocb->s_id[2] = vha->d_id.b.area; els_iocb->s_id[0] = vha->d_id.b.domain; els_iocb->control_flags = cpu_to_le16(a->control_flags); els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count); els_iocb->tx_len = cpu_to_le32(a->tx_len); put_unaligned_le64(a->tx_addr, &els_iocb->tx_address); els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count); els_iocb->rx_len = cpu_to_le32(a->rx_len); put_unaligned_le64(a->rx_addr, &els_iocb->rx_address); } static void qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) { struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_request *bsg_request = bsg_job->request; els_iocb->entry_type = ELS_IOCB_TYPE; els_iocb->entry_count = 1; els_iocb->sys_define = 0; els_iocb->entry_status = 0; els_iocb->handle = sp->handle; els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); els_iocb->vp_index = sp->vha->vp_idx; els_iocb->sof_type = EST_SOFI3; els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); els_iocb->opcode = sp->type == SRB_ELS_CMD_RPT ? bsg_request->rqst_data.r_els.els_code : bsg_request->rqst_data.h_els.command_code; els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; els_iocb->d_id[1] = sp->fcport->d_id.b.area; els_iocb->d_id[2] = sp->fcport->d_id.b.domain; els_iocb->control_flags = 0; els_iocb->rx_byte_count = cpu_to_le32(bsg_job->reply_payload.payload_len); els_iocb->tx_byte_count = cpu_to_le32(bsg_job->request_payload.payload_len); put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), &els_iocb->tx_address); els_iocb->tx_len = cpu_to_le32(sg_dma_len (bsg_job->request_payload.sg_list)); put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), &els_iocb->rx_address); els_iocb->rx_len = cpu_to_le32(sg_dma_len (bsg_job->reply_payload.sg_list)); sp->vha->qla_stats.control_requests++; } static void qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) { uint16_t avail_dsds; struct dsd64 *cur_dsd; struct scatterlist *sg; int index; uint16_t tot_dsds; scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; int entry_count = 1; memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); ct_iocb->entry_type = CT_IOCB_TYPE; ct_iocb->entry_status = 0; ct_iocb->handle1 = sp->handle; SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); ct_iocb->status = cpu_to_le16(0); ct_iocb->control_flags = cpu_to_le16(0); ct_iocb->timeout = 0; ct_iocb->cmd_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); ct_iocb->total_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); ct_iocb->req_bytecount = cpu_to_le32(bsg_job->request_payload.payload_len); ct_iocb->rsp_bytecount = cpu_to_le32(bsg_job->reply_payload.payload_len); put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), &ct_iocb->req_dsd.address); ct_iocb->req_dsd.length = ct_iocb->req_bytecount; put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), &ct_iocb->rsp_dsd.address); ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount; avail_dsds = 1; cur_dsd = &ct_iocb->rsp_dsd; index = 0; tot_dsds = bsg_job->reply_payload.sg_cnt; for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { cont_a64_entry_t *cont_pkt; /* Allocate additional continuation packets? */ if (avail_dsds == 0) { /* * Five DSDs are available in the Cont. * Type 1 IOCB. */ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->hw->req_q_map[0]); cur_dsd = cont_pkt->dsd; avail_dsds = 5; entry_count++; } append_dsd64(&cur_dsd, sg); avail_dsds--; } ct_iocb->entry_count = entry_count; sp->vha->qla_stats.control_requests++; } static void qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) { uint16_t avail_dsds; struct dsd64 *cur_dsd; struct scatterlist *sg; int index; uint16_t cmd_dsds, rsp_dsds; scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; int entry_count = 1; cont_a64_entry_t *cont_pkt = NULL; ct_iocb->entry_type = CT_IOCB_TYPE; ct_iocb->entry_status = 0; ct_iocb->sys_define = 0; ct_iocb->handle = sp->handle; ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); ct_iocb->vp_index = sp->vha->vp_idx; ct_iocb->comp_status = cpu_to_le16(0); cmd_dsds = bsg_job->request_payload.sg_cnt; rsp_dsds = bsg_job->reply_payload.sg_cnt; ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds); ct_iocb->timeout = 0; ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds); ct_iocb->cmd_byte_count = cpu_to_le32(bsg_job->request_payload.payload_len); avail_dsds = 2; cur_dsd = ct_iocb->dsd; index = 0; for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) { /* Allocate additional continuation packets? */ if (avail_dsds == 0) { /* * Five DSDs are available in the Cont. * Type 1 IOCB. */ cont_pkt = qla2x00_prep_cont_type1_iocb( vha, ha->req_q_map[0]); cur_dsd = cont_pkt->dsd; avail_dsds = 5; entry_count++; } append_dsd64(&cur_dsd, sg); avail_dsds--; } index = 0; for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) { /* Allocate additional continuation packets? */ if (avail_dsds == 0) { /* * Five DSDs are available in the Cont. * Type 1 IOCB. */ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, ha->req_q_map[0]); cur_dsd = cont_pkt->dsd; avail_dsds = 5; entry_count++; } append_dsd64(&cur_dsd, sg); avail_dsds--; } ct_iocb->entry_count = entry_count; } /* * qla82xx_start_scsi() - Send a SCSI command to the ISP * @sp: command to send to the ISP * * Returns non-zero if a failure occurred, else zero. */ int qla82xx_start_scsi(srb_t *sp) { int nseg; unsigned long flags; struct scsi_cmnd *cmd; uint32_t *clr_ptr; uint32_t handle; uint16_t cnt; uint16_t req_cnt; uint16_t tot_dsds; struct device_reg_82xx __iomem *reg; uint32_t dbval; __be32 *fcp_dl; uint8_t additional_cdb_len; struct ct6_dsd *ctx; struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct qla_qpair *qpair = sp->qpair; /* Setup device pointers. */ reg = &ha->iobase->isp82; cmd = GET_CMD_SP(sp); req = vha->req; rsp = ha->rsp_q_map[0]; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; dbval = 0x04 | (ha->portnum << 5); /* Send marker if required */ if (vha->marker_needed != 0) { if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x300c, "qla2x00_marker failed for cmd=%p.\n", cmd); return QLA_FUNCTION_FAILED; } vha->marker_needed = 0; } /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); handle = qla2xxx_get_next_handle(req); if (handle == 0) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ if (scsi_sg_count(cmd)) { nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); if (unlikely(!nseg)) goto queuing_error; } else nseg = 0; tot_dsds = nseg; if (tot_dsds > ql2xshiftctondsd) { struct cmd_type_6 *cmd_pkt; uint16_t more_dsd_lists = 0; struct dsd_dma *dsd_ptr; uint16_t i; more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) { ql_dbg(ql_dbg_io, vha, 0x300d, "Num of DSD list %d is than %d for cmd=%p.\n", more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN, cmd); goto queuing_error; } if (more_dsd_lists <= qpair->dsd_avail) goto sufficient_dsds; else more_dsd_lists -= qpair->dsd_avail; for (i = 0; i < more_dsd_lists; i++) { dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); if (!dsd_ptr) { ql_log(ql_log_fatal, vha, 0x300e, "Failed to allocate memory for dsd_dma " "for cmd=%p.\n", cmd); goto queuing_error; } dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &dsd_ptr->dsd_list_dma); if (!dsd_ptr->dsd_addr) { kfree(dsd_ptr); ql_log(ql_log_fatal, vha, 0x300f, "Failed to allocate memory for dsd_addr " "for cmd=%p.\n", cmd); goto queuing_error; } list_add_tail(&dsd_ptr->list, &qpair->dsd_list); qpair->dsd_avail++; } sufficient_dsds: req_cnt = 1; if (req->cnt < (req_cnt + 2)) { cnt = (uint16_t)rd_reg_dword_relaxed( &reg->req_q_out[0]); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); if (req->cnt < (req_cnt + 2)) goto queuing_error; } ctx = &sp->u.scmd.ct6_ctx; memset(ctx, 0, sizeof(struct ct6_dsd)); ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, GFP_ATOMIC, &ctx->fcp_cmnd_dma); if (!ctx->fcp_cmnd) { ql_log(ql_log_fatal, vha, 0x3011, "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); goto queuing_error; } /* Initialize the DSD list and dma handle */ INIT_LIST_HEAD(&ctx->dsd_list); ctx->dsd_use_cnt = 0; if (cmd->cmd_len > 16) { additional_cdb_len = cmd->cmd_len - 16; if ((cmd->cmd_len % 4) != 0) { /* SCSI command bigger than 16 bytes must be * multiple of 4 */ ql_log(ql_log_warn, vha, 0x3012, "scsi cmd len %d not multiple of 4 " "for cmd=%p.\n", cmd->cmd_len, cmd); goto queuing_error_fcp_cmnd; } ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; } else { additional_cdb_len = 0; ctx->fcp_cmnd_len = 12 + 16 + 4; } cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; cmd_pkt->handle = make_handle(req->id, handle); /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); /* Set NPORT-ID and LUN number*/ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; cmd_pkt->vp_index = sp->vha->vp_idx; /* Build IOCB segments */ if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) goto queuing_error_fcp_cmnd; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); /* build FCP_CMND IU */ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; if (cmd->sc_data_direction == DMA_TO_DEVICE) ctx->fcp_cmnd->additional_cdb_len |= 1; else if (cmd->sc_data_direction == DMA_FROM_DEVICE) ctx->fcp_cmnd->additional_cdb_len |= 2; /* Populate the FCP_PRIO. */ if (ha->flags.fcp_prio_enabled) ctx->fcp_cmnd->task_attribute |= sp->fcport->fcp_prio << 3; memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + additional_cdb_len); *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address); sp->flags |= SRB_FCP_CMND_DMA_VALID; cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; /* Specify response queue number where * completion should happen */ cmd_pkt->entry_status = (uint8_t) rsp->id; } else { struct cmd_type_7 *cmd_pkt; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (req->cnt < (req_cnt + 2)) { cnt = (uint16_t)rd_reg_dword_relaxed( &reg->req_q_out[0]); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); } if (req->cnt < (req_cnt + 2)) goto queuing_error; cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; cmd_pkt->handle = make_handle(req->id, handle); /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); /* Set NPORT-ID and LUN number*/ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; cmd_pkt->vp_index = sp->vha->vp_idx; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); /* Populate the FCP_PRIO. */ if (ha->flags.fcp_prio_enabled) cmd_pkt->task |= sp->fcport->fcp_prio << 3; /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Build IOCB segments */ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; /* Specify response queue number where * completion should happen. */ cmd_pkt->entry_status = (uint8_t) rsp->id; } /* Build command packet. */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; wmb(); /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else req->ring_ptr++; sp->flags |= SRB_DMA_VALID; /* Set chip new ring index. */ /* write, read and verify logic */ dbval = dbval | (req->id << 8) | (req->ring_index << 16); if (ql2xdbwr) qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); else { wrt_reg_dword(ha->nxdb_wr_ptr, dbval); wmb(); while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { wrt_reg_dword(ha->nxdb_wr_ptr, dbval); wmb(); } } /* Manage unprocessed RIO/ZIO commands in response queue. */ if (vha->flags.process_response_queue && rsp->ring_ptr->signature != RESPONSE_PROCESSED) qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; queuing_error_fcp_cmnd: dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); if (sp->u.scmd.crc_ctx) { mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool); sp->u.scmd.crc_ctx = NULL; } spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_FUNCTION_FAILED; } static void qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) { struct srb_iocb *aio = &sp->u.iocb_cmd; scsi_qla_host_t *vha = sp->vha; struct req_que *req = sp->qpair->req; srb_t *orig_sp = sp->cmd_sp; memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); abt_iocb->entry_type = ABORT_IOCB_TYPE; abt_iocb->entry_count = 1; abt_iocb->handle = make_handle(req->id, sp->handle); if (sp->fcport) { abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; abt_iocb->port_id[1] = sp->fcport->d_id.b.area; abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; } abt_iocb->handle_to_abort = make_handle(le16_to_cpu(aio->u.abt.req_que_no), aio->u.abt.cmd_hndl); abt_iocb->vp_index = vha->vp_idx; abt_iocb->req_que_no = aio->u.abt.req_que_no; /* need to pass original sp */ if (orig_sp) qla_nvme_abort_set_option(abt_iocb, orig_sp); /* Send the command to the firmware */ wmb(); } static void qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx) { int i, sz; mbx->entry_type = MBX_IOCB_TYPE; mbx->handle = sp->handle; sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); for (i = 0; i < sz; i++) mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i]; } static void qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt) { sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt; qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg); ct_pkt->handle = sp->handle; } static void qla2x00_send_notify_ack_iocb(srb_t *sp, struct nack_to_isp *nack) { struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy; nack->entry_type = NOTIFY_ACK_TYPE; nack->entry_count = 1; nack->ox_id = ntfy->ox_id; nack->u.isp24.handle = sp->handle; nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { nack->u.isp24.flags = ntfy->u.isp24.flags & cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); } nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; nack->u.isp24.status = ntfy->u.isp24.status; nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; nack->u.isp24.srr_flags = 0; nack->u.isp24.srr_reject_code = 0; nack->u.isp24.srr_reject_code_expl = 0; nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; if (ntfy->u.isp24.status_subcode == ELS_PLOGI && (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) && sp->vha->hw->flags.edif_enabled) { ql_dbg(ql_dbg_disc, sp->vha, 0x3074, "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n", sp->name, sp->handle, sp->fcport->loop_id, sp->fcport->d_id.b24); nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP); } } /* * Build NVME LS request */ static void qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) { struct srb_iocb *nvme; nvme = &sp->u.iocb_cmd; cmd_pkt->entry_type = PT_LS4_REQUEST; cmd_pkt->entry_count = 1; cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); cmd_pkt->vp_index = sp->fcport->vha->vp_idx; if (sp->unsol_rsp) { cmd_pkt->control_flags = cpu_to_le16(CF_LS4_RESPONDER << CF_LS4_SHIFT); cmd_pkt->nport_handle = nvme->u.nvme.nport_handle; cmd_pkt->exchange_address = nvme->u.nvme.exchange_address; } else { cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT); cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->rx_dseg_count = cpu_to_le16(1); cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len; cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len; put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address); } cmd_pkt->tx_dseg_count = cpu_to_le16(1); cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len; cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len; put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address); } static void qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce) { int map, pos; vce->entry_type = VP_CTRL_IOCB_TYPE; vce->handle = sp->handle; vce->entry_count = 1; vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd); vce->vp_count = cpu_to_le16(1); /* * index map in firmware starts with 1; decrement index * this is ok as we never use index 0 */ map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8; pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7; vce->vp_idx_map[map] |= 1 << pos; } static void qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio) { logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; logio->port_id[2] = sp->fcport->d_id.b.domain; logio->vp_index = sp->fcport->vha->vp_idx; } static int qla_get_iocbs_resource(struct srb *sp) { bool get_exch; bool push_it_through = false; if (!ql2xenforce_iocb_limit) { sp->iores.res_type = RESOURCE_NONE; return 0; } sp->iores.res_type = RESOURCE_NONE; switch (sp->type) { case SRB_TM_CMD: case SRB_PRLI_CMD: case SRB_ADISC_CMD: push_it_through = true; fallthrough; case SRB_LOGIN_CMD: case SRB_ELS_CMD_RPT: case SRB_ELS_CMD_HST: case SRB_ELS_CMD_HST_NOLOGIN: case SRB_CT_CMD: case SRB_NVME_LS: case SRB_ELS_DCMD: get_exch = true; break; case SRB_FXIOCB_DCMD: case SRB_FXIOCB_BCMD: sp->iores.res_type = RESOURCE_NONE; return 0; case SRB_SA_UPDATE: case SRB_SA_REPLACE: case SRB_MB_IOCB: case SRB_ABT_CMD: case SRB_NACK_PLOGI: case SRB_NACK_PRLI: case SRB_NACK_LOGO: case SRB_LOGOUT_CMD: case SRB_CTRL_VP: case SRB_MARKER: default: push_it_through = true; get_exch = false; } sp->iores.res_type |= RESOURCE_IOCB; sp->iores.iocb_cnt = 1; if (get_exch) { sp->iores.res_type |= RESOURCE_EXCH; sp->iores.exch_cnt = 1; } if (push_it_through) sp->iores.res_type |= RESOURCE_FORCE; return qla_get_fw_resources(sp->qpair, &sp->iores); } static void qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk) { mrk->entry_type = MARKER_TYPE; mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier; mrk->handle = make_handle(sp->qpair->req->id, sp->handle); if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) { mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id); int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun); host_to_fcp_swap(mrk->lun, sizeof(mrk->lun)); mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index; } } int qla2x00_start_sp(srb_t *sp) { int rval = QLA_SUCCESS; scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct qla_qpair *qp = sp->qpair; void *pkt; unsigned long flags; if (vha->hw->flags.eeh_busy) return -EIO; spin_lock_irqsave(qp->qp_lock_ptr, flags); rval = qla_get_iocbs_resource(sp); if (rval) { spin_unlock_irqrestore(qp->qp_lock_ptr, flags); return -EAGAIN; } pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); if (!pkt) { rval = -EAGAIN; ql_log(ql_log_warn, vha, 0x700c, "qla2x00_alloc_iocbs failed.\n"); goto done; } switch (sp->type) { case SRB_LOGIN_CMD: IS_FWI2_CAPABLE(ha) ? qla24xx_login_iocb(sp, pkt) : qla2x00_login_iocb(sp, pkt); break; case SRB_PRLI_CMD: qla24xx_prli_iocb(sp, pkt); break; case SRB_LOGOUT_CMD: IS_FWI2_CAPABLE(ha) ? qla24xx_logout_iocb(sp, pkt) : qla2x00_logout_iocb(sp, pkt); break; case SRB_ELS_CMD_RPT: case SRB_ELS_CMD_HST: qla24xx_els_iocb(sp, pkt); break; case SRB_ELS_CMD_HST_NOLOGIN: qla_els_pt_iocb(sp->vha, pkt, &sp->u.bsg_cmd.u.els_arg); ((struct els_entry_24xx *)pkt)->handle = sp->handle; break; case SRB_CT_CMD: IS_FWI2_CAPABLE(ha) ? qla24xx_ct_iocb(sp, pkt) : qla2x00_ct_iocb(sp, pkt); break; case SRB_ADISC_CMD: IS_FWI2_CAPABLE(ha) ? qla24xx_adisc_iocb(sp, pkt) : qla2x00_adisc_iocb(sp, pkt); break; case SRB_TM_CMD: IS_QLAFX00(ha) ? qlafx00_tm_iocb(sp, pkt) : qla24xx_tm_iocb(sp, pkt); break; case SRB_FXIOCB_DCMD: case SRB_FXIOCB_BCMD: qlafx00_fxdisc_iocb(sp, pkt); break; case SRB_NVME_LS: qla_nvme_ls(sp, pkt); break; case SRB_ABT_CMD: IS_QLAFX00(ha) ? qlafx00_abort_iocb(sp, pkt) : qla24xx_abort_iocb(sp, pkt); break; case SRB_ELS_DCMD: qla24xx_els_logo_iocb(sp, pkt); break; case SRB_CT_PTHRU_CMD: qla2x00_ctpthru_cmd_iocb(sp, pkt); break; case SRB_MB_IOCB: qla2x00_mb_iocb(sp, pkt); break; case SRB_NACK_PLOGI: case SRB_NACK_PRLI: case SRB_NACK_LOGO: qla2x00_send_notify_ack_iocb(sp, pkt); break; case SRB_CTRL_VP: qla25xx_ctrlvp_iocb(sp, pkt); break; case SRB_PRLO_CMD: qla24xx_prlo_iocb(sp, pkt); break; case SRB_SA_UPDATE: qla24xx_sa_update_iocb(sp, pkt); break; case SRB_SA_REPLACE: qla24xx_sa_replace_iocb(sp, pkt); break; case SRB_MARKER: qla_marker_iocb(sp, pkt); break; default: break; } if (sp->start_timer) { /* ref: TMR timer ref * this code should be just before start_iocbs function * This will make sure that caller function don't to do * kref_put even on failure */ kref_get(&sp->cmd_kref); add_timer(&sp->u.iocb_cmd.timer); } wmb(); qla2x00_start_iocbs(vha, qp->req); done: if (rval) qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(qp->qp_lock_ptr, flags); return rval; } static void qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) { uint16_t avail_dsds; struct dsd64 *cur_dsd; uint32_t req_data_len = 0; uint32_t rsp_data_len = 0; struct scatterlist *sg; int index; int entry_count = 1; struct bsg_job *bsg_job = sp->u.bsg_job; /*Update entry type to indicate bidir command */ put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type); /* Set the transfer direction, in this set both flags * Also set the BD_WRAP_BACK flag, firmware will take care * assigning DID=SID for outgoing pkts. */ cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | BD_WRAP_BACK); req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len); cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len); cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); vha->bidi_stats.transfer_bytes += req_data_len; vha->bidi_stats.io_count++; vha->qla_stats.output_bytes += req_data_len; vha->qla_stats.output_requests++; /* Only one dsd is available for bidirectional IOCB, remaining dsds * are bundled in continuation iocb */ avail_dsds = 1; cur_dsd = &cmd_pkt->fcp_dsd; index = 0; for_each_sg(bsg_job->request_payload.sg_list, sg, bsg_job->request_payload.sg_cnt, index) { cont_a64_entry_t *cont_pkt; /* Allocate additional continuation packets */ if (avail_dsds == 0) { /* Continuation type 1 IOCB can accomodate * 5 DSDS */ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); cur_dsd = cont_pkt->dsd; avail_dsds = 5; entry_count++; } append_dsd64(&cur_dsd, sg); avail_dsds--; } /* For read request DSD will always goes to continuation IOCB * and follow the write DSD. If there is room on the current IOCB * then it is added to that IOCB else new continuation IOCB is * allocated. */ for_each_sg(bsg_job->reply_payload.sg_list, sg, bsg_job->reply_payload.sg_cnt, index) { cont_a64_entry_t *cont_pkt; /* Allocate additional continuation packets */ if (avail_dsds == 0) { /* Continuation type 1 IOCB can accomodate * 5 DSDS */ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); cur_dsd = cont_pkt->dsd; avail_dsds = 5; entry_count++; } append_dsd64(&cur_dsd, sg); avail_dsds--; } /* This value should be same as number of IOCB required for this cmd */ cmd_pkt->entry_count = entry_count; } int qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) { struct qla_hw_data *ha = vha->hw; unsigned long flags; uint32_t handle; uint16_t req_cnt; uint16_t cnt; uint32_t *clr_ptr; struct cmd_bidir *cmd_pkt = NULL; struct rsp_que *rsp; struct req_que *req; int rval = EXT_STATUS_OK; rval = QLA_SUCCESS; rsp = ha->rsp_q_map[0]; req = vha->req; /* Send marker if required */ if (vha->marker_needed != 0) { if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) return EXT_STATUS_MAILBOX; vha->marker_needed = 0; } /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); handle = qla2xxx_get_next_handle(req); if (handle == 0) { rval = EXT_STATUS_BUSY; goto queuing_error; } /* Calculate number of IOCB required */ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); /* Check for room on request queue. */ if (req->cnt < req_cnt + 2) { if (IS_SHADOW_REG_CAPABLE(ha)) { cnt = *req->out_ptr; } else { cnt = rd_reg_dword_relaxed(req->req_q_out); if (qla2x00_check_reg16_for_disconnect(vha, cnt)) goto queuing_error; } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); } if (req->cnt < req_cnt + 2) { rval = EXT_STATUS_BUSY; goto queuing_error; } cmd_pkt = (struct cmd_bidir *)req->ring_ptr; cmd_pkt->handle = make_handle(req->id, handle); /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); /* Set NPORT-ID (of vha)*/ cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id); cmd_pkt->port_id[0] = vha->d_id.b.al_pa; cmd_pkt->port_id[1] = vha->d_id.b.area; cmd_pkt->port_id[2] = vha->d_id.b.domain; qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); cmd_pkt->entry_status = (uint8_t) rsp->id; /* Build command packet. */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; sp->handle = handle; req->cnt -= req_cnt; /* Send the command to the firmware */ wmb(); qla2x00_start_iocbs(vha, req); queuing_error: spin_unlock_irqrestore(&ha->hardware_lock, flags); return rval; } /** * qla_start_scsi_type6() - Send a SCSI command to the ISP * @sp: command to send to the ISP * * Returns non-zero if a failure occurred, else zero. */ static int qla_start_scsi_type6(srb_t *sp) { int nseg; unsigned long flags; uint32_t *clr_ptr; uint32_t handle; struct cmd_type_6 *cmd_pkt; uint16_t cnt; uint16_t req_cnt; uint16_t tot_dsds; struct req_que *req = NULL; struct rsp_que *rsp; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; struct qla_qpair *qpair = sp->qpair; uint16_t more_dsd_lists = 0; struct dsd_dma *dsd_ptr; uint16_t i; __be32 *fcp_dl; uint8_t additional_cdb_len; struct ct6_dsd *ctx; /* Acquire qpair specific lock */ spin_lock_irqsave(&qpair->qp_lock, flags); /* Setup qpair pointers */ req = qpair->req; rsp = qpair->rsp; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; /* Send marker if required */ if (vha->marker_needed != 0) { if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; } vha->marker_needed = 0; } handle = qla2xxx_get_next_handle(req); if (handle == 0) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ if (scsi_sg_count(cmd)) { nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); if (unlikely(!nseg)) goto queuing_error; } else { nseg = 0; } tot_dsds = nseg; /* eventhough driver only need 1 T6 IOCB, FW still convert DSD to Continueation IOCB */ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = req_cnt; if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) { ql_dbg(ql_dbg_io, vha, 0x3028, "Num of DSD list %d is than %d for cmd=%p.\n", more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN, cmd); goto queuing_error; } if (more_dsd_lists <= qpair->dsd_avail) goto sufficient_dsds; else more_dsd_lists -= qpair->dsd_avail; for (i = 0; i < more_dsd_lists; i++) { dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); if (!dsd_ptr) { ql_log(ql_log_fatal, vha, 0x3029, "Failed to allocate memory for dsd_dma for cmd=%p.\n", cmd); goto queuing_error; } INIT_LIST_HEAD(&dsd_ptr->list); dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &dsd_ptr->dsd_list_dma); if (!dsd_ptr->dsd_addr) { kfree(dsd_ptr); ql_log(ql_log_fatal, vha, 0x302a, "Failed to allocate memory for dsd_addr for cmd=%p.\n", cmd); goto queuing_error; } list_add_tail(&dsd_ptr->list, &qpair->dsd_list); qpair->dsd_avail++; } sufficient_dsds: req_cnt = 1; if (req->cnt < (req_cnt + 2)) { if (IS_SHADOW_REG_CAPABLE(ha)) { cnt = *req->out_ptr; } else { cnt = (uint16_t)rd_reg_dword_relaxed(req->req_q_out); if (qla2x00_check_reg16_for_disconnect(vha, cnt)) goto queuing_error; } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); if (req->cnt < (req_cnt + 2)) goto queuing_error; } ctx = &sp->u.scmd.ct6_ctx; memset(ctx, 0, sizeof(struct ct6_dsd)); ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, GFP_ATOMIC, &ctx->fcp_cmnd_dma); if (!ctx->fcp_cmnd) { ql_log(ql_log_fatal, vha, 0x3031, "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); goto queuing_error; } /* Initialize the DSD list and dma handle */ INIT_LIST_HEAD(&ctx->dsd_list); ctx->dsd_use_cnt = 0; if (cmd->cmd_len > 16) { additional_cdb_len = cmd->cmd_len - 16; if (cmd->cmd_len % 4 || cmd->cmd_len > QLA_CDB_BUF_SIZE) { /* * SCSI command bigger than 16 bytes must be * multiple of 4 or too big. */ ql_log(ql_log_warn, vha, 0x3033, "scsi cmd len %d not multiple of 4 for cmd=%p.\n", cmd->cmd_len, cmd); goto queuing_error_fcp_cmnd; } ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; } else { additional_cdb_len = 0; ctx->fcp_cmnd_len = 12 + 16 + 4; } /* Build command packet. */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; cmd_pkt->handle = make_handle(req->id, handle); /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); /* Set NPORT-ID and LUN number */ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; cmd_pkt->vp_index = sp->vha->vp_idx; /* Build IOCB segments */ qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds); int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); /* build FCP_CMND IU */ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; if (cmd->sc_data_direction == DMA_TO_DEVICE) ctx->fcp_cmnd->additional_cdb_len |= 1; else if (cmd->sc_data_direction == DMA_FROM_DEVICE) ctx->fcp_cmnd->additional_cdb_len |= 2; /* Populate the FCP_PRIO. */ if (ha->flags.fcp_prio_enabled) ctx->fcp_cmnd->task_attribute |= sp->fcport->fcp_prio << 3; memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + additional_cdb_len); *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address); sp->flags |= SRB_FCP_CMND_DMA_VALID; cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; wmb(); /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else { req->ring_ptr++; } sp->qpair->cmd_cnt++; sp->flags |= SRB_DMA_VALID; /* Set chip new ring index. */ wrt_reg_dword(req->req_q_in, req->ring_index); /* Manage unprocessed RIO/ZIO commands in response queue. */ if (vha->flags.process_response_queue && rsp->ring_ptr->signature != RESPONSE_PROCESSED) qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_SUCCESS; queuing_error_fcp_cmnd: dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); qla_put_fw_resources(sp->qpair, &sp->iores); if (sp->u.scmd.crc_ctx) { mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool); sp->u.scmd.crc_ctx = NULL; } spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; }
linux-master
drivers/scsi/qla2xxx/qla_iocb.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include <linux/delay.h> #include <linux/ktime.h> #include <linux/pci.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> #include <scsi/scsi_tcq.h> #include <linux/utsname.h> /* QLAFX00 specific Mailbox implementation functions */ /* * qlafx00_mailbox_command * Issue mailbox command and waits for completion. * * Input: * ha = adapter block pointer. * mcp = driver internal mbx struct pointer. * * Output: * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. * * Returns: * 0 : QLA_SUCCESS = cmd performed success * 1 : QLA_FUNCTION_FAILED (error encountered) * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) * * Context: * Kernel context. */ static int qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) { int rval; unsigned long flags = 0; device_reg_t *reg; uint8_t abort_active; uint8_t io_lock_on; uint16_t command = 0; uint32_t *iptr; __le32 __iomem *optr; uint32_t cnt; uint32_t mboxes; unsigned long wait_time; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); if (ha->pdev->error_state == pci_channel_io_perm_failure) { ql_log(ql_log_warn, vha, 0x115c, "PCI channel failed permanently, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } if (vha->device_flags & DFLG_DEV_FAILED) { ql_log(ql_log_warn, vha, 0x115f, "Device in failed state, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } reg = ha->iobase; io_lock_on = base_vha->flags.init_done; rval = QLA_SUCCESS; abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); if (ha->flags.pci_channel_io_perm_failure) { ql_log(ql_log_warn, vha, 0x1175, "Perm failure on EEH timeout MBX, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } if (ha->flags.isp82xx_fw_hung) { /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; ql_log(ql_log_warn, vha, 0x1176, "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); rval = QLA_FUNCTION_FAILED; goto premature_exit; } /* * Wait for active mailbox commands to finish by waiting at most tov * seconds. This is to serialize actual issuing of mailbox cmds during * non ISP abort time. */ if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { /* Timeout occurred. Return error. */ ql_log(ql_log_warn, vha, 0x1177, "Cmd access timeout, cmd=0x%x, Exiting.\n", mcp->mb[0]); return QLA_FUNCTION_TIMEOUT; } ha->flags.mbox_busy = 1; /* Save mailbox command for debug */ ha->mcp32 = mcp; ql_dbg(ql_dbg_mbx, vha, 0x1178, "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); spin_lock_irqsave(&ha->hardware_lock, flags); /* Load mailbox registers. */ optr = &reg->ispfx00.mailbox0; iptr = mcp->mb; command = mcp->mb[0]; mboxes = mcp->out_mb; for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (mboxes & BIT_0) wrt_reg_dword(optr, *iptr); mboxes >>= 1; optr++; iptr++; } /* Issue set host interrupt command to send cmd out. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172, (uint8_t *)mcp->mb, 16); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173, ((uint8_t *)mcp->mb + 0x10), 16); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174, ((uint8_t *)mcp->mb + 0x20), 8); /* Unlock mbx registers and wait for interrupt */ ql_dbg(ql_dbg_mbx, vha, 0x1179, "Going to unlock irq & waiting for interrupts. " "jiffies=%lx.\n", jiffies); /* Wait for mbx cmd completion until timeout */ if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); spin_unlock_irqrestore(&ha->hardware_lock, flags); WARN_ON_ONCE(wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ) != 0); } else { ql_dbg(ql_dbg_mbx, vha, 0x112c, "Cmd=%x Polling Mode.\n", command); QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ while (!ha->flags.mbox_int) { if (time_after(jiffies, wait_time)) break; /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); if (!ha->flags.mbox_int && !(IS_QLA2200(ha) && command == MBC_LOAD_RISC_RAM_EXTENDED)) usleep_range(10000, 11000); } /* while */ ql_dbg(ql_dbg_mbx, vha, 0x112d, "Waited %d sec.\n", (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); } /* Check whether we timed out */ if (ha->flags.mbox_int) { uint32_t *iptr2; ql_dbg(ql_dbg_mbx, vha, 0x112e, "Cmd=%x completed.\n", command); /* Got interrupt. Clear the flag. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE) rval = QLA_FUNCTION_FAILED; /* Load return mailbox registers. */ iptr2 = mcp->mb; iptr = (uint32_t *)&ha->mailbox_out32[0]; mboxes = mcp->in_mb; for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (mboxes & BIT_0) *iptr2 = *iptr; mboxes >>= 1; iptr2++; iptr++; } } else { rval = QLA_FUNCTION_TIMEOUT; } ha->flags.mbox_busy = 0; /* Clean up */ ha->mcp32 = NULL; if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { ql_dbg(ql_dbg_mbx, vha, 0x113a, "checking for additional resp interrupt.\n"); /* polling mode for non isp_abort commands. */ qla2x00_poll(ha->rsp_q_map[0]); } if (rval == QLA_FUNCTION_TIMEOUT && mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { if (!io_lock_on || (mcp->flags & IOCTL_CMD) || ha->flags.eeh_busy) { /* not in dpc. schedule it for dpc to take over. */ ql_dbg(ql_dbg_mbx, vha, 0x115d, "Timeout, schedule isp_abort_needed.\n"); if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { ql_log(ql_log_info, base_vha, 0x115e, "Mailbox cmd timeout occurred, cmd=0x%x, " "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " "abort.\n", command, mcp->mb[0], ha->flags.eeh_busy); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } else if (!abort_active) { /* call abort directly since we are in the DPC thread */ ql_dbg(ql_dbg_mbx, vha, 0x1160, "Timeout, calling abort_isp.\n"); if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { ql_log(ql_log_info, base_vha, 0x1161, "Mailbox cmd timeout occurred, cmd=0x%x, " "mb[0]=0x%x. Scheduling ISP abort ", command, mcp->mb[0]); set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); if (ha->isp_ops->abort_isp(vha)) { /* Failed. retry later. */ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); ql_dbg(ql_dbg_mbx, vha, 0x1162, "Finished abort_isp.\n"); } } } premature_exit: /* Allow next mbx cmd to come in. */ complete(&ha->mbx_cmd_comp); if (rval) { ql_log(ql_log_warn, base_vha, 0x1163, "**** Failed=%x mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); } else { ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__); } return rval; } /* * qlafx00_driver_shutdown * Indicate a driver shutdown to firmware. * * Input: * ha = adapter block pointer. * * Returns: * local function return status code. * * Context: * Kernel context. */ int qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo) { int rval; struct mbx_cmd_32 mc; struct mbx_cmd_32 *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166, "Entered %s.\n", __func__); mcp->mb[0] = MBC_MR_DRV_SHUTDOWN; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; if (tmo) mcp->tov = tmo; else mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qlafx00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1167, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168, "Done %s.\n", __func__); } return rval; } /* * qlafx00_get_firmware_state * Get adapter firmware state. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla7xxx local function return status code. * * Context: * Kernel context. */ static int qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states) { int rval; struct mbx_cmd_32 mc; struct mbx_cmd_32 *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_STATE; mcp->out_mb = MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qlafx00_mailbox_command(vha, mcp); /* Return firmware states. */ states[0] = mcp->mb[1]; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x116a, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b, "Done %s.\n", __func__); } return rval; } /* * qlafx00_init_firmware * Initialize adapter firmware. * * Input: * ha = adapter block pointer. * dptr = Initialization control block pointer. * size = size of initialization control block. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qlafx00 local function return status code. * * Context: * Kernel context. */ int qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size) { int rval; struct mbx_cmd_32 mc; struct mbx_cmd_32 *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; mcp->mb[1] = 0; mcp->mb[2] = MSD(ha->init_cb_dma); mcp->mb[3] = LSD(ha->init_cb_dma); mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->buf_size = size; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS; rval = qlafx00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x116d, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e, "Done %s.\n", __func__); } return rval; } /* * qlafx00_mbx_reg_test */ static int qlafx00_mbx_reg_test(scsi_qla_host_t *vha) { int rval; struct mbx_cmd_32 mc; struct mbx_cmd_32 *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f, "Entered %s.\n", __func__); mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; mcp->mb[1] = 0xAAAA; mcp->mb[2] = 0x5555; mcp->mb[3] = 0xAA55; mcp->mb[4] = 0x55AA; mcp->mb[5] = 0xA5A5; mcp->mb[6] = 0x5A5A; mcp->mb[7] = 0x2525; mcp->mb[8] = 0xBBBB; mcp->mb[9] = 0x6666; mcp->mb[10] = 0xBB66; mcp->mb[11] = 0x66BB; mcp->mb[12] = 0xB6B6; mcp->mb[13] = 0x6B6B; mcp->mb[14] = 0x3636; mcp->mb[15] = 0xCCCC; mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->buf_size = 0; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS; rval = qlafx00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 || mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA) rval = QLA_FUNCTION_FAILED; if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A || mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB) rval = QLA_FUNCTION_FAILED; if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 || mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6) rval = QLA_FUNCTION_FAILED; if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 || mcp->mb[31] != 0xCCCC) rval = QLA_FUNCTION_FAILED; } if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1170, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171, "Done %s.\n", __func__); } return rval; } /** * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers. * @vha: HA context * * Returns 0 on success. */ int qlafx00_pci_config(scsi_qla_host_t *vha) { uint16_t w; struct qla_hw_data *ha = vha->hw; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); w &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(ha->pdev, PCI_COMMAND, w); /* PCIe -- adjust Maximum Read Request Size (2048). */ if (pci_is_pcie(ha->pdev)) pcie_set_readrq(ha->pdev, 2048); ha->chip_revision = ha->pdev->revision; return QLA_SUCCESS; } /** * qlafx00_soc_cpu_reset() - Perform warm reset of iSA(CPUs being reset on SOC). * @vha: HA context * */ static inline void qlafx00_soc_cpu_reset(scsi_qla_host_t *vha) { unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; int i, core; uint32_t cnt; uint32_t reg_val; spin_lock_irqsave(&ha->hardware_lock, flags); QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0); QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0); /* stop the XOR DMA engines */ QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02); QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02); QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02); QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02); /* stop the IDMA engines */ reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840); reg_val &= ~(1<<12); QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val); reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844); reg_val &= ~(1<<12); QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val); reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848); reg_val &= ~(1<<12); QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val); reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C); reg_val &= ~(1<<12); QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val); for (i = 0; i < 100000; i++) { if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 && (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0) break; udelay(100); } /* Set all 4 cores in reset */ for (i = 0; i < 4; i++) { QLAFX00_SET_HBA_SOC_REG(ha, (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01)); QLAFX00_SET_HBA_SOC_REG(ha, (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101)); } /* Reset all units in Fabric */ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101)); /* */ QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1); QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0); /* Set all 4 core Memory Power Down Registers */ for (i = 0; i < 5; i++) { QLAFX00_SET_HBA_SOC_REG(ha, (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0)); } /* Reset all interrupt control registers */ for (i = 0; i < 115; i++) { QLAFX00_SET_HBA_SOC_REG(ha, (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0)); } /* Reset Timers control registers. per core */ for (core = 0; core < 4; core++) for (i = 0; i < 8; i++) QLAFX00_SET_HBA_SOC_REG(ha, (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0)); /* Reset per core IRQ ack register */ for (core = 0; core < 4; core++) QLAFX00_SET_HBA_SOC_REG(ha, (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF)); /* Set Fabric control and config to defaults */ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2)); QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); /* Kick in Fabric units */ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0)); /* Kick in Core0 to start boot process */ QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait 10secs for soft-reset to complete. */ for (cnt = 10; cnt; cnt--) { msleep(1000); barrier(); } } /** * qlafx00_soft_reset() - Soft Reset ISPFx00. * @vha: HA context * * Returns 0 on success. */ int qlafx00_soft_reset(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int rval = QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(ha->pdev) && ha->flags.pci_channel_io_perm_failure)) return rval; ha->isp_ops->disable_intrs(ha); qlafx00_soc_cpu_reset(vha); return QLA_SUCCESS; } /** * qlafx00_chip_diag() - Test ISPFx00 for proper operation. * @vha: HA context * * Returns 0 on success. */ int qlafx00_chip_diag(scsi_qla_host_t *vha) { int rval = 0; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; rval = qlafx00_mbx_reg_test(vha); if (rval) { ql_log(ql_log_warn, vha, 0x1165, "Failed mailbox send register test\n"); } else { /* Flag a successful rval */ rval = QLA_SUCCESS; } return rval; } void qlafx00_config_rings(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; wrt_reg_dword(&reg->req_q_in, 0); wrt_reg_dword(&reg->req_q_out, 0); wrt_reg_dword(&reg->rsp_q_in, 0); wrt_reg_dword(&reg->rsp_q_out, 0); /* PCI posting */ rd_reg_dword(&reg->rsp_q_out); } char * qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) { struct qla_hw_data *ha = vha->hw; if (pci_is_pcie(ha->pdev)) strscpy(str, "PCIe iSA", str_len); return str; } char * qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) { struct qla_hw_data *ha = vha->hw; snprintf(str, size, "%s", ha->mr.fw_version); return str; } void qlafx00_enable_intrs(struct qla_hw_data *ha) { unsigned long flags = 0; spin_lock_irqsave(&ha->hardware_lock, flags); ha->interrupts_on = 1; QLAFX00_ENABLE_ICNTRL_REG(ha); spin_unlock_irqrestore(&ha->hardware_lock, flags); } void qlafx00_disable_intrs(struct qla_hw_data *ha) { unsigned long flags = 0; spin_lock_irqsave(&ha->hardware_lock, flags); ha->interrupts_on = 0; QLAFX00_DISABLE_ICNTRL_REG(ha); spin_unlock_irqrestore(&ha->hardware_lock, flags); } int qlafx00_abort_target(fc_port_t *fcport, uint64_t l, int tag) { return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); } int qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag) { return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); } int qlafx00_iospace_config(struct qla_hw_data *ha) { if (pci_request_selected_regions(ha->pdev, ha->bars, QLA2XXX_DRIVER_NAME)) { ql_log_pci(ql_log_fatal, ha->pdev, 0x014e, "Failed to reserve PIO/MMIO regions (%s), aborting.\n", pci_name(ha->pdev)); goto iospace_error_exit; } /* Use MMIO operations for all accesses. */ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { ql_log_pci(ql_log_warn, ha->pdev, 0x014f, "Invalid pci I/O region size (%s).\n", pci_name(ha->pdev)); goto iospace_error_exit; } if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) { ql_log_pci(ql_log_warn, ha->pdev, 0x0127, "Invalid PCI mem BAR0 region size (%s), aborting\n", pci_name(ha->pdev)); goto iospace_error_exit; } ha->cregbase = ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); if (!ha->cregbase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0128, "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); goto iospace_error_exit; } if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) { ql_log_pci(ql_log_warn, ha->pdev, 0x0129, "region #2 not an MMIO resource (%s), aborting\n", pci_name(ha->pdev)); goto iospace_error_exit; } if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) { ql_log_pci(ql_log_warn, ha->pdev, 0x012a, "Invalid PCI mem BAR2 region size (%s), aborting\n", pci_name(ha->pdev)); goto iospace_error_exit; } ha->iobase = ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); if (!ha->iobase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x012b, "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); goto iospace_error_exit; } /* Determine queue resources */ ha->max_req_queues = ha->max_rsp_queues = 1; ql_log_pci(ql_log_info, ha->pdev, 0x012c, "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n", ha->bars, ha->cregbase, ha->iobase); return 0; iospace_error_exit: return -ENOMEM; } static void qlafx00_save_queue_ptrs(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; req->length_fx00 = req->length; req->ring_fx00 = req->ring; req->dma_fx00 = req->dma; rsp->length_fx00 = rsp->length; rsp->ring_fx00 = rsp->ring; rsp->dma_fx00 = rsp->dma; ql_dbg(ql_dbg_init, vha, 0x012d, "req: %p, ring_fx00: %p, length_fx00: 0x%x," "req->dma_fx00: 0x%llx\n", req, req->ring_fx00, req->length_fx00, (u64)req->dma_fx00); ql_dbg(ql_dbg_init, vha, 0x012e, "rsp: %p, ring_fx00: %p, length_fx00: 0x%x," "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00, rsp->length_fx00, (u64)rsp->dma_fx00); } static int qlafx00_config_queues(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2); req->length = ha->req_que_len; req->ring = (void __force *)ha->iobase + ha->req_que_off; req->dma = bar2_hdl + ha->req_que_off; if ((!req->ring) || (req->length == 0)) { ql_log_pci(ql_log_info, ha->pdev, 0x012f, "Unable to allocate memory for req_ring\n"); return QLA_FUNCTION_FAILED; } ql_dbg(ql_dbg_init, vha, 0x0130, "req: %p req_ring pointer %p req len 0x%x " "req off 0x%x\n, req->dma: 0x%llx", req, req->ring, req->length, ha->req_que_off, (u64)req->dma); rsp->length = ha->rsp_que_len; rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off; rsp->dma = bar2_hdl + ha->rsp_que_off; if ((!rsp->ring) || (rsp->length == 0)) { ql_log_pci(ql_log_info, ha->pdev, 0x0131, "Unable to allocate memory for rsp_ring\n"); return QLA_FUNCTION_FAILED; } ql_dbg(ql_dbg_init, vha, 0x0132, "rsp: %p rsp_ring pointer %p rsp len 0x%x " "rsp off 0x%x, rsp->dma: 0x%llx\n", rsp, rsp->ring, rsp->length, ha->rsp_que_off, (u64)rsp->dma); return QLA_SUCCESS; } static int qlafx00_init_fw_ready(scsi_qla_host_t *vha) { int rval = 0; unsigned long wtime; uint16_t wait_time; /* Wait time */ struct qla_hw_data *ha = vha->hw; struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; uint32_t aenmbx, aenmbx7 = 0; uint32_t pseudo_aen; uint32_t state[5]; bool done = false; /* 30 seconds wait - Adjust if required */ wait_time = 30; pseudo_aen = rd_reg_dword(&reg->pseudoaen); if (pseudo_aen == 1) { aenmbx7 = rd_reg_dword(&reg->initval7); ha->mbx_intr_code = MSW(aenmbx7); ha->rqstq_intr_code = LSW(aenmbx7); rval = qlafx00_driver_shutdown(vha, 10); if (rval != QLA_SUCCESS) qlafx00_soft_reset(vha); } /* wait time before firmware ready */ wtime = jiffies + (wait_time * HZ); do { aenmbx = rd_reg_dword(&reg->aenmailbox0); barrier(); ql_dbg(ql_dbg_mbx, vha, 0x0133, "aenmbx: 0x%x\n", aenmbx); switch (aenmbx) { case MBA_FW_NOT_STARTED: case MBA_FW_STARTING: break; case MBA_SYSTEM_ERR: case MBA_REQ_TRANSFER_ERR: case MBA_RSP_TRANSFER_ERR: case MBA_FW_INIT_FAILURE: qlafx00_soft_reset(vha); break; case MBA_FW_RESTART_CMPLT: /* Set the mbx and rqstq intr code */ aenmbx7 = rd_reg_dword(&reg->aenmailbox7); ha->mbx_intr_code = MSW(aenmbx7); ha->rqstq_intr_code = LSW(aenmbx7); ha->req_que_off = rd_reg_dword(&reg->aenmailbox1); ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3); ha->req_que_len = rd_reg_dword(&reg->aenmailbox5); ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6); wrt_reg_dword(&reg->aenmailbox0, 0); rd_reg_dword_relaxed(&reg->aenmailbox0); ql_dbg(ql_dbg_init, vha, 0x0134, "f/w returned mbx_intr_code: 0x%x, " "rqstq_intr_code: 0x%x\n", ha->mbx_intr_code, ha->rqstq_intr_code); QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); rval = QLA_SUCCESS; done = true; break; default: if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS) break; /* If fw is apparently not ready. In order to continue, * we might need to issue Mbox cmd, but the problem is * that the DoorBell vector values that come with the * 8060 AEN are most likely gone by now (and thus no * bell would be rung on the fw side when mbox cmd is * issued). We have to therefore grab the 8060 AEN * shadow regs (filled in by FW when the last 8060 * AEN was being posted). * Do the following to determine what is needed in * order to get the FW ready: * 1. reload the 8060 AEN values from the shadow regs * 2. clear int status to get rid of possible pending * interrupts * 3. issue Get FW State Mbox cmd to determine fw state * Set the mbx and rqstq intr code from Shadow Regs */ aenmbx7 = rd_reg_dword(&reg->initval7); ha->mbx_intr_code = MSW(aenmbx7); ha->rqstq_intr_code = LSW(aenmbx7); ha->req_que_off = rd_reg_dword(&reg->initval1); ha->rsp_que_off = rd_reg_dword(&reg->initval3); ha->req_que_len = rd_reg_dword(&reg->initval5); ha->rsp_que_len = rd_reg_dword(&reg->initval6); ql_dbg(ql_dbg_init, vha, 0x0135, "f/w returned mbx_intr_code: 0x%x, " "rqstq_intr_code: 0x%x\n", ha->mbx_intr_code, ha->rqstq_intr_code); QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); /* Get the FW state */ rval = qlafx00_get_firmware_state(vha, state); if (rval != QLA_SUCCESS) { /* Retry if timer has not expired */ break; } if (state[0] == FSTATE_FX00_CONFIG_WAIT) { /* Firmware is waiting to be * initialized by driver */ rval = QLA_SUCCESS; done = true; break; } /* Issue driver shutdown and wait until f/w recovers. * Driver should continue to poll until 8060 AEN is * received indicating firmware recovery. */ ql_dbg(ql_dbg_init, vha, 0x0136, "Sending Driver shutdown fw_state 0x%x\n", state[0]); rval = qlafx00_driver_shutdown(vha, 10); if (rval != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; break; } msleep(500); wtime = jiffies + (wait_time * HZ); break; } if (!done) { if (time_after_eq(jiffies, wtime)) { ql_dbg(ql_dbg_init, vha, 0x0137, "Init f/w failed: aen[7]: 0x%x\n", rd_reg_dword(&reg->aenmailbox7)); rval = QLA_FUNCTION_FAILED; done = true; break; } /* Delay for a while */ msleep(500); } } while (!done); if (rval) ql_dbg(ql_dbg_init, vha, 0x0138, "%s **** FAILED ****.\n", __func__); else ql_dbg(ql_dbg_init, vha, 0x0139, "%s **** SUCCESS ****.\n", __func__); return rval; } /* * qlafx00_fw_ready() - Waits for firmware ready. * @ha: HA context * * Returns 0 on success. */ int qlafx00_fw_ready(scsi_qla_host_t *vha) { int rval; unsigned long wtime; uint16_t wait_time; /* Wait time if loop is coming ready */ uint32_t state[5]; rval = QLA_SUCCESS; wait_time = 10; /* wait time before firmware ready */ wtime = jiffies + (wait_time * HZ); /* Wait for ISP to finish init */ if (!vha->flags.init_done) ql_dbg(ql_dbg_init, vha, 0x013a, "Waiting for init to complete...\n"); do { rval = qlafx00_get_firmware_state(vha, state); if (rval == QLA_SUCCESS) { if (state[0] == FSTATE_FX00_INITIALIZED) { ql_dbg(ql_dbg_init, vha, 0x013b, "fw_state=%x\n", state[0]); rval = QLA_SUCCESS; break; } } rval = QLA_FUNCTION_FAILED; if (time_after_eq(jiffies, wtime)) break; /* Delay for a while */ msleep(500); ql_dbg(ql_dbg_init, vha, 0x013c, "fw_state=%x curr time=%lx.\n", state[0], jiffies); } while (1); if (rval) ql_dbg(ql_dbg_init, vha, 0x013d, "Firmware ready **** FAILED ****.\n"); else ql_dbg(ql_dbg_init, vha, 0x013e, "Firmware ready **** SUCCESS ****.\n"); return rval; } static int qlafx00_find_all_targets(scsi_qla_host_t *vha, struct list_head *new_fcports) { int rval; uint16_t tgt_id; fc_port_t *fcport, *new_fcport; int found; struct qla_hw_data *ha = vha->hw; rval = QLA_SUCCESS; if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) return QLA_FUNCTION_FAILED; if ((atomic_read(&vha->loop_down_timer) || STATE_TRANSITION(vha))) { atomic_set(&vha->loop_down_timer, 0); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return QLA_FUNCTION_FAILED; } ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088, "Listing Target bit map...\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 0x2089, ha->gid_list, 32); /* Allocate temporary rmtport for any new rmtports discovered. */ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) return QLA_MEMORY_ALLOC_FAILED; for_each_set_bit(tgt_id, (void *)ha->gid_list, QLAFX00_TGT_NODE_LIST_SIZE) { /* Send get target node info */ new_fcport->tgt_id = tgt_id; rval = qlafx00_fx_disc(vha, new_fcport, FXDISC_GET_TGT_NODE_INFO); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x208a, "Target info scan failed -- assuming zero-entry " "result...\n"); continue; } /* Locate matching device in database. */ found = 0; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (memcmp(new_fcport->port_name, fcport->port_name, WWN_SIZE)) continue; found++; /* * If tgt_id is same and state FCS_ONLINE, nothing * changed. */ if (fcport->tgt_id == new_fcport->tgt_id && atomic_read(&fcport->state) == FCS_ONLINE) break; /* * Tgt ID changed or device was marked to be updated. */ ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b, "TGT-ID Change(%s): Present tgt id: " "0x%x state: 0x%x " "wwnn = %llx wwpn = %llx.\n", __func__, fcport->tgt_id, atomic_read(&fcport->state), (unsigned long long)wwn_to_u64(fcport->node_name), (unsigned long long)wwn_to_u64(fcport->port_name)); ql_log(ql_log_info, vha, 0x208c, "TGT-ID Announce(%s): Discovered tgt " "id 0x%x wwnn = %llx " "wwpn = %llx.\n", __func__, new_fcport->tgt_id, (unsigned long long) wwn_to_u64(new_fcport->node_name), (unsigned long long) wwn_to_u64(new_fcport->port_name)); if (atomic_read(&fcport->state) != FCS_ONLINE) { fcport->old_tgt_id = fcport->tgt_id; fcport->tgt_id = new_fcport->tgt_id; ql_log(ql_log_info, vha, 0x208d, "TGT-ID: New fcport Added: %p\n", fcport); qla2x00_update_fcport(vha, fcport); } else { ql_log(ql_log_info, vha, 0x208e, " Existing TGT-ID %x did not get " " offline event from firmware.\n", fcport->old_tgt_id); qla2x00_mark_device_lost(vha, fcport, 0); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); qla2x00_free_fcport(new_fcport); return rval; } break; } if (found) continue; /* If device was not in our fcports list, then add it. */ list_add_tail(&new_fcport->list, new_fcports); /* Allocate a new replacement fcport. */ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) return QLA_MEMORY_ALLOC_FAILED; } qla2x00_free_fcport(new_fcport); return rval; } /* * qlafx00_configure_all_targets * Setup target devices with node ID's. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success. * BIT_0 = error */ static int qlafx00_configure_all_targets(scsi_qla_host_t *vha) { int rval; fc_port_t *fcport, *rmptemp; LIST_HEAD(new_fcports); rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport, FXDISC_GET_TGT_NODE_LIST); if (rval != QLA_SUCCESS) { set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return rval; } rval = qlafx00_find_all_targets(vha, &new_fcports); if (rval != QLA_SUCCESS) { set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return rval; } /* * Delete all previous devices marked lost. */ list_for_each_entry(fcport, &vha->vp_fcports, list) { if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { if (fcport->port_type != FCT_INITIATOR) qla2x00_mark_device_lost(vha, fcport, 0); } } /* * Add the new devices to our devices list. */ list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; qla2x00_update_fcport(vha, fcport); list_move_tail(&fcport->list, &vha->vp_fcports); ql_log(ql_log_info, vha, 0x208f, "Attach new target id 0x%x wwnn = %llx " "wwpn = %llx.\n", fcport->tgt_id, (unsigned long long)wwn_to_u64(fcport->node_name), (unsigned long long)wwn_to_u64(fcport->port_name)); } /* Free all new device structures not processed. */ list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { list_del(&fcport->list); qla2x00_free_fcport(fcport); } return rval; } /* * qlafx00_configure_devices * Updates Fibre Channel Device Database with what is actually on loop. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success. * 1 = error. * 2 = database was full and device was not configured. */ int qlafx00_configure_devices(scsi_qla_host_t *vha) { int rval; unsigned long flags; rval = QLA_SUCCESS; flags = vha->dpc_flags; ql_dbg(ql_dbg_disc, vha, 0x2090, "Configure devices -- dpc flags =0x%lx\n", flags); rval = qlafx00_configure_all_targets(vha); if (rval == QLA_SUCCESS) { if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { rval = QLA_FUNCTION_FAILED; } else { atomic_set(&vha->loop_state, LOOP_READY); ql_log(ql_log_info, vha, 0x2091, "Device Ready\n"); } } if (rval) { ql_dbg(ql_dbg_disc, vha, 0x2092, "%s *** FAILED ***.\n", __func__); } else { ql_dbg(ql_dbg_disc, vha, 0x2093, "%s: exiting normally.\n", __func__); } return rval; } static void qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp) { struct qla_hw_data *ha = vha->hw; fc_port_t *fcport; vha->flags.online = 0; ha->mr.fw_hbt_en = 0; if (!critemp) { ha->flags.chip_reset_done = 0; clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); vha->qla_stats.total_isp_aborts++; ql_log(ql_log_info, vha, 0x013f, "Performing ISP error recovery - ha = %p.\n", ha); ha->isp_ops->reset_chip(vha); } if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, QLAFX00_LOOP_DOWN_TIME); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, QLAFX00_LOOP_DOWN_TIME); } /* Clear all async request states across all VPs. */ list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->flags = 0; if (atomic_read(&fcport->state) == FCS_ONLINE) qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); } if (!ha->flags.eeh_busy) { if (critemp) { qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); } else { /* Requeue all commands in outstanding command list. */ qla2x00_abort_all_cmds(vha, DID_RESET << 16); } } qla2x00_free_irqs(vha); if (critemp) set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags); else set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); /* Clear the Interrupts */ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); ql_log(ql_log_info, vha, 0x0140, "%s Done done - ha=%p.\n", __func__, ha); } /** * qlafx00_init_response_q_entries() - Initializes response queue entries. * @rsp: response queue * * Beginning of request ring has initialization control block already built * by nvram config routine. * * Returns 0 on success. */ void qlafx00_init_response_q_entries(struct rsp_que *rsp) { uint16_t cnt; response_t *pkt; rsp->ring_ptr = rsp->ring; rsp->ring_index = 0; rsp->status_srb = NULL; pkt = rsp->ring_ptr; for (cnt = 0; cnt < rsp->length; cnt++) { pkt->signature = RESPONSE_PROCESSED; wrt_reg_dword((void __force __iomem *)&pkt->signature, RESPONSE_PROCESSED); pkt++; } } int qlafx00_rescan_isp(scsi_qla_host_t *vha) { uint32_t status = QLA_FUNCTION_FAILED; struct qla_hw_data *ha = vha->hw; struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; uint32_t aenmbx7; qla2x00_request_irqs(ha, ha->rsp_q_map[0]); aenmbx7 = rd_reg_dword(&reg->aenmailbox7); ha->mbx_intr_code = MSW(aenmbx7); ha->rqstq_intr_code = LSW(aenmbx7); ha->req_que_off = rd_reg_dword(&reg->aenmailbox1); ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3); ha->req_que_len = rd_reg_dword(&reg->aenmailbox5); ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6); ql_dbg(ql_dbg_disc, vha, 0x2094, "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x " " Req que offset 0x%x Rsp que offset 0x%x\n", ha->mbx_intr_code, ha->rqstq_intr_code, ha->req_que_off, ha->rsp_que_len); /* Clear the Interrupts */ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); status = qla2x00_init_rings(vha); if (!status) { vha->flags.online = 1; /* if no cable then assume it's good */ if ((vha->device_flags & DFLG_NO_CABLE)) status = 0; /* Register system information */ if (qlafx00_fx_disc(vha, &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO)) ql_dbg(ql_dbg_disc, vha, 0x2095, "failed to register host info\n"); } scsi_unblock_requests(vha->host); return status; } void qlafx00_timer_routine(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t fw_heart_beat; uint32_t aenmbx0; struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; uint32_t tempc; /* Check firmware health */ if (ha->mr.fw_hbt_cnt) ha->mr.fw_hbt_cnt--; else { if ((!ha->flags.mr_reset_hdlr_active) && (!test_bit(UNLOADING, &vha->dpc_flags)) && (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && (ha->mr.fw_hbt_en)) { fw_heart_beat = rd_reg_dword(&reg->fwheartbeat); if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { ha->mr.old_fw_hbt_cnt = fw_heart_beat; ha->mr.fw_hbt_miss_cnt = 0; } else { ha->mr.fw_hbt_miss_cnt++; if (ha->mr.fw_hbt_miss_cnt == QLAFX00_HEARTBEAT_MISS_CNT) { set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); ha->mr.fw_hbt_miss_cnt = 0; } } } ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; } if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) { /* Reset recovery to be performed in timer routine */ aenmbx0 = rd_reg_dword(&reg->aenmailbox0); if (ha->mr.fw_reset_timer_exp) { set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); ha->mr.fw_reset_timer_exp = 0; } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) { /* Wake up DPC to rescan the targets */ set_bit(FX00_TARGET_SCAN, &vha->dpc_flags); clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); qla2xxx_wake_dpc(vha); ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; } else if ((aenmbx0 == MBA_FW_STARTING) && (!ha->mr.fw_hbt_en)) { ha->mr.fw_hbt_en = 1; } else if (!ha->mr.fw_reset_timer_tick) { if (aenmbx0 == ha->mr.old_aenmbx0_state) ha->mr.fw_reset_timer_exp = 1; ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; } else if (aenmbx0 == 0xFFFFFFFF) { uint32_t data0, data1; data0 = QLAFX00_RD_REG(ha, QLAFX00_BAR1_BASE_ADDR_REG); data1 = QLAFX00_RD_REG(ha, QLAFX00_PEX0_WIN0_BASE_ADDR_REG); data0 &= 0xffff0000; data1 &= 0x0000ffff; QLAFX00_WR_REG(ha, QLAFX00_PEX0_WIN0_BASE_ADDR_REG, (data0 | data1)); } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) { ha->mr.fw_reset_timer_tick = QLAFX00_MAX_RESET_INTERVAL; } else if (aenmbx0 == MBA_FW_RESET_FCT) { ha->mr.fw_reset_timer_tick = QLAFX00_MAX_RESET_INTERVAL; } if (ha->mr.old_aenmbx0_state != aenmbx0) { ha->mr.old_aenmbx0_state = aenmbx0; ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; } ha->mr.fw_reset_timer_tick--; } if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) { /* * Critical temperature recovery to be * performed in timer routine */ if (ha->mr.fw_critemp_timer_tick == 0) { tempc = QLAFX00_GET_TEMPERATURE(ha); ql_dbg(ql_dbg_timer, vha, 0x6012, "ISPFx00(%s): Critical temp timer, " "current SOC temperature: %d\n", __func__, tempc); if (tempc < ha->mr.critical_temperature) { set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); clear_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; } else { ha->mr.fw_critemp_timer_tick--; } } if (ha->mr.host_info_resend) { /* * Incomplete host info might be sent to firmware * durinng system boot - info should be resend */ if (ha->mr.hinfo_resend_timer_tick == 0) { ha->mr.host_info_resend = false; set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags); ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; qla2xxx_wake_dpc(vha); } else { ha->mr.hinfo_resend_timer_tick--; } } } /* * qlfx00a_reset_initialize * Re-initialize after a iSA device reset. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qlafx00_reset_initialize(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (vha->device_flags & DFLG_DEV_FAILED) { ql_dbg(ql_dbg_init, vha, 0x0142, "Device in failed state\n"); return QLA_SUCCESS; } ha->flags.mr_reset_hdlr_active = 1; if (vha->flags.online) { scsi_block_requests(vha->host); qlafx00_abort_isp_cleanup(vha, false); } ql_log(ql_log_info, vha, 0x0143, "(%s): succeeded.\n", __func__); ha->flags.mr_reset_hdlr_active = 0; return QLA_SUCCESS; } /* * qlafx00_abort_isp * Resets ISP and aborts all outstanding commands. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qlafx00_abort_isp(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (vha->flags.online) { if (unlikely(pci_channel_offline(ha->pdev) && ha->flags.pci_channel_io_perm_failure)) { clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); return QLA_SUCCESS; } scsi_block_requests(vha->host); qlafx00_abort_isp_cleanup(vha, false); } else { scsi_block_requests(vha->host); clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); vha->qla_stats.total_isp_aborts++; ha->isp_ops->reset_chip(vha); set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); /* Clear the Interrupts */ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); } ql_log(ql_log_info, vha, 0x0145, "(%s): succeeded.\n", __func__); return QLA_SUCCESS; } static inline fc_port_t* qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id) { fc_port_t *fcport; /* Check for matching device in remote port list. */ list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->tgt_id == tgt_id) { ql_dbg(ql_dbg_async, vha, 0x5072, "Matching fcport(%p) found with TGT-ID: 0x%x " "and Remote TGT_ID: 0x%x\n", fcport, fcport->tgt_id, tgt_id); return fcport; } } return NULL; } static void qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id) { fc_port_t *fcport; ql_log(ql_log_info, vha, 0x5073, "Detach TGT-ID: 0x%x\n", tgt_id); fcport = qlafx00_get_fcport(vha, tgt_id); if (!fcport) return; qla2x00_mark_device_lost(vha, fcport, 0); return; } void qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) { uint32_t aen_code, aen_data; aen_code = FCH_EVT_VENDOR_UNIQUE; aen_data = evt->u.aenfx.evtcode; switch (evt->u.aenfx.evtcode) { case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ if (evt->u.aenfx.mbx[1] == 0) { if (evt->u.aenfx.mbx[2] == 1) { if (!vha->flags.fw_tgt_reported) vha->flags.fw_tgt_reported = 1; atomic_set(&vha->loop_down_timer, 0); atomic_set(&vha->loop_state, LOOP_UP); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else if (evt->u.aenfx.mbx[2] == 2) { qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]); } } else if (evt->u.aenfx.mbx[1] == 0xffff) { if (evt->u.aenfx.mbx[2] == 1) { if (!vha->flags.fw_tgt_reported) vha->flags.fw_tgt_reported = 1; set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } else if (evt->u.aenfx.mbx[2] == 2) { vha->device_flags |= DFLG_NO_CABLE; qla2x00_mark_all_devices_lost(vha); } } break; case QLAFX00_MBA_LINK_UP: aen_code = FCH_EVT_LINKUP; aen_data = 0; break; case QLAFX00_MBA_LINK_DOWN: aen_code = FCH_EVT_LINKDOWN; aen_data = 0; break; case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ ql_log(ql_log_info, vha, 0x5082, "Process critical temperature event " "aenmb[0]: %x\n", evt->u.aenfx.evtcode); scsi_block_requests(vha->host); qlafx00_abort_isp_cleanup(vha, true); scsi_unblock_requests(vha->host); break; } fc_host_post_event(vha->host, fc_get_event_number(), aen_code, aen_data); } static void qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo) { u64 port_name = 0, node_name = 0; port_name = (unsigned long long)wwn_to_u64(pinfo->port_name); node_name = (unsigned long long)wwn_to_u64(pinfo->node_name); fc_host_node_name(vha->host) = node_name; fc_host_port_name(vha->host) = port_name; if (!pinfo->port_type) vha->hw->current_topology = ISP_CFG_F; if (pinfo->link_status == QLAFX00_LINK_STATUS_UP) atomic_set(&vha->loop_state, LOOP_READY); else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN) atomic_set(&vha->loop_state, LOOP_DOWN); vha->hw->link_data_rate = (uint16_t)pinfo->link_config; } static void qla2x00_fxdisc_iocb_timeout(void *data) { srb_t *sp = data; struct srb_iocb *lio = &sp->u.iocb_cmd; complete(&lio->u.fxiocb.fxiocb_comp); } static void qla2x00_fxdisc_sp_done(srb_t *sp, int res) { struct srb_iocb *lio = &sp->u.iocb_cmd; complete(&lio->u.fxiocb.fxiocb_comp); } int qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) { srb_t *sp; struct srb_iocb *fdisc; int rval = QLA_FUNCTION_FAILED; struct qla_hw_data *ha = vha->hw; struct host_system_info *phost_info; struct register_host_info *preg_hsi; struct new_utsname *p_sysid = NULL; /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_FXIOCB_DCMD; sp->name = "fxdisc"; qla2x00_init_async_sp(sp, FXDISC_TIMEOUT, qla2x00_fxdisc_sp_done); sp->u.iocb_cmd.timeout = qla2x00_fxdisc_iocb_timeout; fdisc = &sp->u.iocb_cmd; switch (fx_type) { case FXDISC_GET_CONFIG_INFO: fdisc->u.fxiocb.flags = SRB_FXDISC_RESP_DMA_VALID; fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data); break; case FXDISC_GET_PORT_INFO: fdisc->u.fxiocb.flags = SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO; fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id); break; case FXDISC_GET_TGT_NODE_INFO: fdisc->u.fxiocb.flags = SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO; fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id); break; case FXDISC_GET_TGT_NODE_LIST: fdisc->u.fxiocb.flags = SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE; break; case FXDISC_REG_HOST_INFO: fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID; fdisc->u.fxiocb.req_len = sizeof(struct register_host_info); p_sysid = utsname(); if (!p_sysid) { ql_log(ql_log_warn, vha, 0x303c, "Not able to get the system information\n"); goto done_free_sp; } break; case FXDISC_ABORT_IOCTL: default: break; } if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL); if (!fdisc->u.fxiocb.req_addr) goto done_free_sp; if (fx_type == FXDISC_REG_HOST_INFO) { preg_hsi = (struct register_host_info *) fdisc->u.fxiocb.req_addr; phost_info = &preg_hsi->hsi; memset(preg_hsi, 0, sizeof(struct register_host_info)); phost_info->os_type = OS_TYPE_LINUX; strscpy(phost_info->sysname, p_sysid->sysname, sizeof(phost_info->sysname)); strscpy(phost_info->nodename, p_sysid->nodename, sizeof(phost_info->nodename)); if (!strcmp(phost_info->nodename, "(none)")) ha->mr.host_info_resend = true; strscpy(phost_info->release, p_sysid->release, sizeof(phost_info->release)); strscpy(phost_info->version, p_sysid->version, sizeof(phost_info->version)); strscpy(phost_info->machine, p_sysid->machine, sizeof(phost_info->machine)); strscpy(phost_info->domainname, p_sysid->domainname, sizeof(phost_info->domainname)); strscpy(phost_info->hostdriver, QLA2XXX_VERSION, sizeof(phost_info->hostdriver)); preg_hsi->utc = (uint64_t)ktime_get_real_seconds(); ql_dbg(ql_dbg_init, vha, 0x0149, "ISP%04X: Host registration with firmware\n", ha->pdev->device); ql_dbg(ql_dbg_init, vha, 0x014a, "os_type = '%d', sysname = '%s', nodname = '%s'\n", phost_info->os_type, phost_info->sysname, phost_info->nodename); ql_dbg(ql_dbg_init, vha, 0x014b, "release = '%s', version = '%s'\n", phost_info->release, phost_info->version); ql_dbg(ql_dbg_init, vha, 0x014c, "machine = '%s' " "domainname = '%s', hostdriver = '%s'\n", phost_info->machine, phost_info->domainname, phost_info->hostdriver); ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d, phost_info, sizeof(*phost_info)); } } if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len, &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL); if (!fdisc->u.fxiocb.rsp_addr) goto done_unmap_req; } fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_unmap_dma; wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp); if (fx_type == FXDISC_GET_CONFIG_INFO) { struct config_info_data *pinfo = (struct config_info_data *) fdisc->u.fxiocb.rsp_addr; strscpy(vha->hw->model_number, pinfo->model_num, ARRAY_SIZE(vha->hw->model_number)); strscpy(vha->hw->model_desc, pinfo->model_description, ARRAY_SIZE(vha->hw->model_desc)); memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name, sizeof(vha->hw->mr.symbolic_name)); memcpy(&vha->hw->mr.serial_num, pinfo->serial_num, sizeof(vha->hw->mr.serial_num)); memcpy(&vha->hw->mr.hw_version, pinfo->hw_version, sizeof(vha->hw->mr.hw_version)); memcpy(&vha->hw->mr.fw_version, pinfo->fw_version, sizeof(vha->hw->mr.fw_version)); strim(vha->hw->mr.fw_version); memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version, sizeof(vha->hw->mr.uboot_version)); memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num, sizeof(vha->hw->mr.fru_serial_num)); vha->hw->mr.critical_temperature = (pinfo->nominal_temp_value) ? pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD; ha->mr.extended_io_enabled = (pinfo->enabled_capabilities & QLAFX00_EXTENDED_IO_EN_MASK) != 0; } else if (fx_type == FXDISC_GET_PORT_INFO) { struct port_info_data *pinfo = (struct port_info_data *) fdisc->u.fxiocb.rsp_addr; memcpy(vha->node_name, pinfo->node_name, WWN_SIZE); memcpy(vha->port_name, pinfo->port_name, WWN_SIZE); vha->d_id.b.domain = pinfo->port_id[0]; vha->d_id.b.area = pinfo->port_id[1]; vha->d_id.b.al_pa = pinfo->port_id[2]; qlafx00_update_host_attr(vha, pinfo); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141, pinfo, 16); } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) { struct qlafx00_tgt_node_info *pinfo = (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE); memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE); fcport->port_type = FCT_TARGET; ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144, pinfo, 16); } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) { struct qlafx00_tgt_node_info *pinfo = (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146, pinfo, 16); memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE); } else if (fx_type == FXDISC_ABORT_IOCTL) fdisc->u.fxiocb.result = (fdisc->u.fxiocb.result == cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ? cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED); rval = le32_to_cpu(fdisc->u.fxiocb.result); done_unmap_dma: if (fdisc->u.fxiocb.rsp_addr) dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len, fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle); done_unmap_req: if (fdisc->u.fxiocb.req_addr) dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle); done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } /* * qlafx00_initialize_adapter * Initialize board. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qlafx00_initialize_adapter(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; uint32_t tempc; /* Clear adapter flags. */ vha->flags.online = 0; ha->flags.chip_reset_done = 0; vha->flags.reset_active = 0; ha->flags.pci_channel_io_perm_failure = 0; ha->flags.eeh_busy = 0; atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); atomic_set(&vha->loop_state, LOOP_DOWN); vha->device_flags = DFLG_NO_CABLE; vha->dpc_flags = 0; vha->flags.management_server_logged_in = 0; ha->isp_abort_cnt = 0; ha->beacon_blink_led = 0; set_bit(0, ha->req_qid_map); set_bit(0, ha->rsp_qid_map); ql_dbg(ql_dbg_init, vha, 0x0147, "Configuring PCI space...\n"); rval = ha->isp_ops->pci_config(vha); if (rval) { ql_log(ql_log_warn, vha, 0x0148, "Unable to configure PCI space.\n"); return rval; } rval = qlafx00_init_fw_ready(vha); if (rval != QLA_SUCCESS) return rval; qlafx00_save_queue_ptrs(vha); rval = qlafx00_config_queues(vha); if (rval != QLA_SUCCESS) return rval; /* * Allocate the array of outstanding commands * now that we know the firmware resources. */ rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); if (rval != QLA_SUCCESS) return rval; rval = qla2x00_init_rings(vha); ha->flags.chip_reset_done = 1; tempc = QLAFX00_GET_TEMPERATURE(ha); ql_dbg(ql_dbg_init, vha, 0x0152, "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n", __func__, tempc); return rval; } uint32_t qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int rval = QLA_FUNCTION_FAILED; uint32_t state[1]; if (qla2x00_reset_active(vha)) ql_log(ql_log_warn, vha, 0x70ce, "ISP reset active.\n"); else if (!vha->hw->flags.eeh_busy) { rval = qlafx00_get_firmware_state(vha, state); } if (rval != QLA_SUCCESS) memset(state, -1, sizeof(state)); return state[0]; } void qlafx00_get_host_speed(struct Scsi_Host *shost) { struct qla_hw_data *ha = ((struct scsi_qla_host *) (shost_priv(shost)))->hw; u32 speed = FC_PORTSPEED_UNKNOWN; switch (ha->link_data_rate) { case QLAFX00_PORT_SPEED_2G: speed = FC_PORTSPEED_2GBIT; break; case QLAFX00_PORT_SPEED_4G: speed = FC_PORTSPEED_4GBIT; break; case QLAFX00_PORT_SPEED_8G: speed = FC_PORTSPEED_8GBIT; break; case QLAFX00_PORT_SPEED_10G: speed = FC_PORTSPEED_10GBIT; break; } fc_host_speed(shost) = speed; } /** QLAFX00 specific ISR implementation functions */ static inline void qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, uint32_t sense_len, struct rsp_que *rsp, int res) { struct scsi_qla_host *vha = sp->vha; struct scsi_cmnd *cp = GET_CMD_SP(sp); uint32_t track_sense_len; SET_FW_SENSE_LEN(sp, sense_len); if (sense_len >= SCSI_SENSE_BUFFERSIZE) sense_len = SCSI_SENSE_BUFFERSIZE; SET_CMD_SENSE_LEN(sp, sense_len); SET_CMD_SENSE_PTR(sp, cp->sense_buffer); track_sense_len = sense_len; if (sense_len > par_sense_len) sense_len = par_sense_len; memcpy(cp->sense_buffer, sense_data, sense_len); SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len); SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); track_sense_len -= sense_len; SET_CMD_SENSE_LEN(sp, track_sense_len); ql_dbg(ql_dbg_io, vha, 0x304d, "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n", sense_len, par_sense_len, track_sense_len); if (GET_FW_SENSE_LEN(sp) > 0) { rsp->status_srb = sp; cp->result = res; } if (sense_len) { ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", sp->vha->host_no, cp->device->id, cp->device->lun, cp); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, cp->sense_buffer, sense_len); } } static void qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp, __le16 sstatus, __le16 cpstatus) { struct srb_iocb *tmf; tmf = &sp->u.iocb_cmd; if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) || (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID))) cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE); tmf->u.tmf.comp_status = cpstatus; sp->done(sp, 0); } static void qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct abort_iocb_entry_fx00 *pkt) { const char func[] = "ABT_IOCB"; srb_t *sp; struct srb_iocb *abt; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; abt = &sp->u.iocb_cmd; abt->u.abt.comp_status = pkt->tgt_id_sts; sp->done(sp, 0); } static void qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, struct ioctl_iocb_entry_fx00 *pkt) { const char func[] = "IOSB_IOCB"; srb_t *sp; struct bsg_job *bsg_job; struct fc_bsg_reply *bsg_reply; struct srb_iocb *iocb_job; int res = 0; struct qla_mt_iocb_rsp_fx00 fstatus; uint8_t *fw_sts_ptr; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; if (sp->type == SRB_FXIOCB_DCMD) { iocb_job = &sp->u.iocb_cmd; iocb_job->u.fxiocb.seq_number = pkt->seq_no; iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags; iocb_job->u.fxiocb.result = pkt->status; if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID) iocb_job->u.fxiocb.req_data = pkt->dataword_r; } else { bsg_job = sp->u.bsg_job; bsg_reply = bsg_job->reply; memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00)); fstatus.reserved_1 = pkt->reserved_0; fstatus.func_type = pkt->comp_func_num; fstatus.ioctl_flags = pkt->fw_iotcl_flags; fstatus.ioctl_data = pkt->dataword_r; fstatus.adapid = pkt->adapid; fstatus.reserved_2 = pkt->dataword_r_extra; fstatus.res_count = pkt->residuallen; fstatus.status = pkt->status; fstatus.seq_number = pkt->seq_no; memcpy(fstatus.reserved_3, pkt->reserved_2, 20 * sizeof(uint8_t)); fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); memcpy(fw_sts_ptr, &fstatus, sizeof(fstatus)); bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t); ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, sp->vha, 0x5080, pkt, sizeof(*pkt)); ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, sp->vha, 0x5074, fw_sts_ptr, sizeof(fstatus)); res = bsg_reply->result = DID_OK << 16; bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; } sp->done(sp, res); } /** * qlafx00_status_entry() - Process a Status IOCB entry. * @vha: SCSI driver HA context * @rsp: response queue * @pkt: Entry pointer */ static void qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) { srb_t *sp; fc_port_t *fcport; struct scsi_cmnd *cp; struct sts_entry_fx00 *sts; __le16 comp_status; __le16 scsi_status; __le16 lscsi_status; int32_t resid; uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, fw_resid_len; uint8_t *rsp_info = NULL, *sense_data = NULL; struct qla_hw_data *ha = vha->hw; uint32_t hindex, handle; uint16_t que; struct req_que *req; int logit = 1; int res = 0; sts = (struct sts_entry_fx00 *) pkt; comp_status = sts->comp_status; scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK); hindex = sts->handle; handle = LSW(hindex); que = MSW(hindex); req = ha->req_q_map[que]; /* Validate handle. */ if (handle < req->num_outstanding_cmds) sp = req->outstanding_cmds[handle]; else sp = NULL; if (sp == NULL) { ql_dbg(ql_dbg_io, vha, 0x3034, "Invalid status handle (0x%x).\n", handle); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); return; } if (sp->type == SRB_TM_CMD) { req->outstanding_cmds[handle] = NULL; qlafx00_tm_iocb_entry(vha, req, pkt, sp, scsi_status, comp_status); return; } /* Fast path completion. */ if (comp_status == CS_COMPLETE && scsi_status == 0) { qla2x00_process_completed_request(vha, req, handle); return; } req->outstanding_cmds[handle] = NULL; cp = GET_CMD_SP(sp); if (cp == NULL) { ql_dbg(ql_dbg_io, vha, 0x3048, "Command already returned (0x%x/%p).\n", handle, sp); return; } lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK); fcport = sp->fcport; sense_len = par_sense_len = rsp_info_len = resid_len = fw_resid_len = 0; if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)) sense_len = sts->sense_len; if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER | (uint16_t)SS_RESIDUAL_OVER))) resid_len = le32_to_cpu(sts->residual_len); if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN)) fw_resid_len = le32_to_cpu(sts->residual_len); rsp_info = sense_data = sts->data; par_sense_len = sizeof(sts->data); /* Check for overrun. */ if (comp_status == CS_COMPLETE && scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER)) comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN); /* * Based on Host and scsi status generate status code for Linux */ switch (le16_to_cpu(comp_status)) { case CS_COMPLETE: case CS_QUEUE_FULL: if (scsi_status == 0) { res = DID_OK << 16; break; } if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER | (uint16_t)SS_RESIDUAL_OVER))) { resid = resid_len; scsi_set_resid(cp, resid); if (!lscsi_status && ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { ql_dbg(ql_dbg_io, fcport->vha, 0x3050, "Mid-layer underflow " "detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16; break; } } res = DID_OK << 16 | le16_to_cpu(lscsi_status); if (lscsi_status == cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) { ql_dbg(ql_dbg_io, fcport->vha, 0x3051, "QUEUE FULL detected.\n"); break; } logit = 0; if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION)) break; memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))) break; qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); break; case CS_DATA_UNDERRUN: /* Use F/W calculated residual length. */ if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) resid = fw_resid_len; else resid = resid_len; scsi_set_resid(cp, resid); if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) { if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) && fw_resid_len != resid_len) { ql_dbg(ql_dbg_io, fcport->vha, 0x3052, "Dropped frame(s) detected " "(0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16 | le16_to_cpu(lscsi_status); goto check_scsi_status; } if (!lscsi_status && ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { ql_dbg(ql_dbg_io, fcport->vha, 0x3053, "Mid-layer underflow " "detected (0x%x of 0x%x bytes, " "cp->underflow: 0x%x).\n", resid, scsi_bufflen(cp), cp->underflow); res = DID_ERROR << 16; break; } } else if (lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) && lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) { /* * scsi status of task set and busy are considered * to be task not completed. */ ql_dbg(ql_dbg_io, fcport->vha, 0x3054, "Dropped frame(s) detected (0x%x " "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16 | le16_to_cpu(lscsi_status); goto check_scsi_status; } else { ql_dbg(ql_dbg_io, fcport->vha, 0x3055, "scsi_status: 0x%x, lscsi_status: 0x%x\n", scsi_status, lscsi_status); } res = DID_OK << 16 | le16_to_cpu(lscsi_status); logit = 0; check_scsi_status: /* * Check to see if SCSI Status is non zero. If so report SCSI * Status. */ if (lscsi_status != 0) { if (lscsi_status == cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) { ql_dbg(ql_dbg_io, fcport->vha, 0x3056, "QUEUE FULL detected.\n"); logit = 1; break; } if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION)) break; memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))) break; qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); } break; case CS_PORT_LOGGED_OUT: case CS_PORT_CONFIG_CHG: case CS_PORT_BUSY: case CS_INCOMPLETE: case CS_PORT_UNAVAILABLE: case CS_TIMEOUT: case CS_RESET: /* * We are going to have the fc class block the rport * while we try to recover so instruct the mid layer * to requeue until the class decides how to handle this. */ res = DID_TRANSPORT_DISRUPTED << 16; ql_dbg(ql_dbg_io, fcport->vha, 0x3057, "Port down status: port-state=0x%x.\n", atomic_read(&fcport->state)); if (atomic_read(&fcport->state) == FCS_ONLINE) qla2x00_mark_device_lost(fcport->vha, fcport, 1); break; case CS_ABORTED: res = DID_RESET << 16; break; default: res = DID_ERROR << 16; break; } if (logit) ql_dbg(ql_dbg_io, fcport->vha, 0x3058, "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x " "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, " "par_sense_len=0x%x, rsp_info_len=0x%x\n", comp_status, scsi_status, res, vha->host_no, cp->device->id, cp->device->lun, fcport->tgt_id, lscsi_status, cp->cmnd, scsi_bufflen(cp), rsp_info, resid_len, fw_resid_len, sense_len, par_sense_len, rsp_info_len); if (rsp->status_srb == NULL) sp->done(sp, res); else WARN_ON_ONCE(true); } /** * qlafx00_status_cont_entry() - Process a Status Continuations entry. * @rsp: response queue * @pkt: Entry pointer * * Extended sense data. */ static void qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) { uint8_t sense_sz = 0; struct qla_hw_data *ha = rsp->hw; struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); srb_t *sp = rsp->status_srb; struct scsi_cmnd *cp; uint32_t sense_len; uint8_t *sense_ptr; if (!sp) { ql_dbg(ql_dbg_io, vha, 0x3037, "no SP, sp = %p\n", sp); return; } if (!GET_FW_SENSE_LEN(sp)) { ql_dbg(ql_dbg_io, vha, 0x304b, "no fw sense data, sp = %p\n", sp); return; } cp = GET_CMD_SP(sp); if (cp == NULL) { ql_log(ql_log_warn, vha, 0x303b, "cmd is NULL: already returned to OS (sp=%p).\n", sp); rsp->status_srb = NULL; return; } if (!GET_CMD_SENSE_LEN(sp)) { ql_dbg(ql_dbg_io, vha, 0x304c, "no sense data, sp = %p\n", sp); } else { sense_len = GET_CMD_SENSE_LEN(sp); sense_ptr = GET_CMD_SENSE_PTR(sp); ql_dbg(ql_dbg_io, vha, 0x304f, "sp=%p sense_len=0x%x sense_ptr=%p.\n", sp, sense_len, sense_ptr); if (sense_len > sizeof(pkt->data)) sense_sz = sizeof(pkt->data); else sense_sz = sense_len; /* Move sense data. */ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e, pkt, sizeof(*pkt)); memcpy(sense_ptr, pkt->data, sense_sz); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a, sense_ptr, sense_sz); sense_len -= sense_sz; sense_ptr += sense_sz; SET_CMD_SENSE_PTR(sp, sense_ptr); SET_CMD_SENSE_LEN(sp, sense_len); } sense_len = GET_FW_SENSE_LEN(sp); sense_len = (sense_len > sizeof(pkt->data)) ? (sense_len - sizeof(pkt->data)) : 0; SET_FW_SENSE_LEN(sp, sense_len); /* Place command on done queue. */ if (sense_len == 0) { rsp->status_srb = NULL; sp->done(sp, cp->result); } else { WARN_ON_ONCE(true); } } /** * qlafx00_multistatus_entry() - Process Multi response queue entries. * @vha: SCSI driver HA context * @rsp: response queue * @pkt: received packet */ static void qlafx00_multistatus_entry(struct scsi_qla_host *vha, struct rsp_que *rsp, void *pkt) { srb_t *sp; struct multi_sts_entry_fx00 *stsmfx; struct qla_hw_data *ha = vha->hw; uint32_t handle, hindex, handle_count, i; uint16_t que; struct req_que *req; __le32 *handle_ptr; stsmfx = (struct multi_sts_entry_fx00 *) pkt; handle_count = stsmfx->handle_count; if (handle_count > MAX_HANDLE_COUNT) { ql_dbg(ql_dbg_io, vha, 0x3035, "Invalid handle count (0x%x).\n", handle_count); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); return; } handle_ptr = &stsmfx->handles[0]; for (i = 0; i < handle_count; i++) { hindex = le32_to_cpu(*handle_ptr); handle = LSW(hindex); que = MSW(hindex); req = ha->req_q_map[que]; /* Validate handle. */ if (handle < req->num_outstanding_cmds) sp = req->outstanding_cmds[handle]; else sp = NULL; if (sp == NULL) { ql_dbg(ql_dbg_io, vha, 0x3044, "Invalid status handle (0x%x).\n", handle); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); return; } qla2x00_process_completed_request(vha, req, handle); handle_ptr++; } } /** * qlafx00_error_entry() - Process an error entry. * @vha: SCSI driver HA context * @rsp: response queue * @pkt: Entry pointer */ static void qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, struct sts_entry_fx00 *pkt) { srb_t *sp; struct qla_hw_data *ha = vha->hw; const char func[] = "ERROR-IOCB"; uint16_t que = 0; struct req_que *req = NULL; int res = DID_ERROR << 16; req = ha->req_q_map[que]; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { sp->done(sp, res); return; } set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } /** * qlafx00_process_response_queue() - Process response queue entries. * @vha: SCSI driver HA context * @rsp: response queue */ static void qlafx00_process_response_queue(struct scsi_qla_host *vha, struct rsp_que *rsp) { struct sts_entry_fx00 *pkt; response_t *lptr; uint16_t lreq_q_in = 0; uint16_t lreq_q_out = 0; lreq_q_in = rd_reg_dword(rsp->rsp_q_in); lreq_q_out = rsp->ring_index; while (lreq_q_in != lreq_q_out) { lptr = rsp->ring_ptr; memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr, sizeof(rsp->rsp_pkt)); pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt; rsp->ring_index++; lreq_q_out++; if (rsp->ring_index == rsp->length) { lreq_q_out = 0; rsp->ring_index = 0; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr++; } if (pkt->entry_status != 0 && pkt->entry_type != IOCTL_IOSB_TYPE_FX00) { ql_dbg(ql_dbg_async, vha, 0x507f, "type of error status in response: 0x%x\n", pkt->entry_status); qlafx00_error_entry(vha, rsp, (struct sts_entry_fx00 *)pkt); continue; } switch (pkt->entry_type) { case STATUS_TYPE_FX00: qlafx00_status_entry(vha, rsp, pkt); break; case STATUS_CONT_TYPE_FX00: qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); break; case MULTI_STATUS_TYPE_FX00: qlafx00_multistatus_entry(vha, rsp, pkt); break; case ABORT_IOCB_TYPE_FX00: qlafx00_abort_iocb_entry(vha, rsp->req, (struct abort_iocb_entry_fx00 *)pkt); break; case IOCTL_IOSB_TYPE_FX00: qlafx00_ioctl_iosb_entry(vha, rsp->req, (struct ioctl_iocb_entry_fx00 *)pkt); break; default: /* Type Not Supported. */ ql_dbg(ql_dbg_async, vha, 0x5081, "Received unknown response pkt type %x " "entry status=%x.\n", pkt->entry_type, pkt->entry_status); break; } } /* Adjust ring index */ wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); } /** * qlafx00_async_event() - Process aynchronous events. * @vha: SCSI driver HA context */ static void qlafx00_async_event(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct device_reg_fx00 __iomem *reg; int data_size = 1; reg = &ha->iobase->ispfx00; /* Setup to process RIO completion. */ switch (ha->aenmb[0]) { case QLAFX00_MBA_SYSTEM_ERR: /* System Error */ ql_log(ql_log_warn, vha, 0x5079, "ISP System Error - mbx1=%x\n", ha->aenmb[0]); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */ ql_dbg(ql_dbg_async, vha, 0x5076, "Asynchronous FW shutdown requested.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); break; case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1); ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2); ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3); ql_dbg(ql_dbg_async, vha, 0x5077, "Asynchronous port Update received " "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]); data_size = 4; break; case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */ ql_log(ql_log_info, vha, 0x5085, "Asynchronous over temperature event received " "aenmb[0]: %x\n", ha->aenmb[0]); break; case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */ ql_log(ql_log_info, vha, 0x5086, "Asynchronous normal temperature event received " "aenmb[0]: %x\n", ha->aenmb[0]); break; case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ ql_log(ql_log_info, vha, 0x5083, "Asynchronous critical temperature event received " "aenmb[0]: %x\n", ha->aenmb[0]); break; default: ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1); ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2); ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3); ha->aenmb[4] = rd_reg_dword(&reg->aenmailbox4); ha->aenmb[5] = rd_reg_dword(&reg->aenmailbox5); ha->aenmb[6] = rd_reg_dword(&reg->aenmailbox6); ha->aenmb[7] = rd_reg_dword(&reg->aenmailbox7); ql_dbg(ql_dbg_async, vha, 0x5078, "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]); break; } qlafx00_post_aenfx_work(vha, ha->aenmb[0], (uint32_t *)ha->aenmb, data_size); } /** * qlafx00_mbx_completion() - Process mailbox command completions. * @vha: SCSI driver HA context * @mb0: value to be written into mailbox register 0 */ static void qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) { uint16_t cnt; __le32 __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; if (!ha->mcp32) ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n"); /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out32[0] = mb0; wptr = &reg->mailbox17; for (cnt = 1; cnt < ha->mbx_count; cnt++) { ha->mailbox_out32[cnt] = rd_reg_dword(wptr); wptr++; } } /** * qlafx00_intr_handler() - Process interrupts for the ISPFX00. * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qlafx00_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct device_reg_fx00 __iomem *reg; int status; unsigned long iter; uint32_t stat; uint32_t mb[8]; struct rsp_que *rsp; unsigned long flags; uint32_t clr_intr = 0; uint32_t intr_stat = 0; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0x507d, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->ispfx00; status = 0; if (unlikely(pci_channel_offline(ha->pdev))) return IRQ_HANDLED; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; clr_intr = 0) { stat = QLAFX00_RD_INTR_REG(ha); if (qla2x00_check_reg32_for_disconnect(vha, stat)) break; intr_stat = stat & QLAFX00_HST_INT_STS_BITS; if (!intr_stat) break; if (stat & QLAFX00_INTR_MB_CMPLT) { mb[0] = rd_reg_dword(&reg->mailbox16); qlafx00_mbx_completion(vha, mb[0]); status |= MBX_INTERRUPT; clr_intr |= QLAFX00_INTR_MB_CMPLT; } if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) { ha->aenmb[0] = rd_reg_dword(&reg->aenmailbox0); qlafx00_async_event(vha); clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; } if (intr_stat & QLAFX00_INTR_RSP_CMPLT) { qlafx00_process_response_queue(vha, rsp); clr_intr |= QLAFX00_INTR_RSP_CMPLT; } QLAFX00_CLR_INTR_REG(ha, clr_intr); QLAFX00_RD_INTR_REG(ha); } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } /** QLAFX00 specific IOCB implementation functions */ static inline cont_a64_entry_t * qlafx00_prep_cont_type1_iocb(struct req_que *req, cont_a64_entry_t *lcont_pkt) { cont_a64_entry_t *cont_pkt; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else { req->ring_ptr++; } cont_pkt = (cont_a64_entry_t *)req->ring_ptr; /* Load packet defaults. */ lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00; return cont_pkt; } static inline void qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt) { uint16_t avail_dsds; struct dsd64 *cur_dsd; scsi_qla_host_t *vha; struct scsi_cmnd *cmd; struct scatterlist *sg; int i, cont; struct req_que *req; cont_a64_entry_t lcont_pkt; cont_a64_entry_t *cont_pkt; vha = sp->vha; req = vha->req; cmd = GET_CMD_SP(sp); cont = 0; cont_pkt = NULL; /* Update entry type to indicate Command Type 3 IOCB */ lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7; /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { lcmd_pkt->byte_count = cpu_to_le32(0); return; } /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { lcmd_pkt->cntrl_flags = TMF_WRITE_DATA; vha->qla_stats.output_bytes += scsi_bufflen(cmd); } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { lcmd_pkt->cntrl_flags = TMF_READ_DATA; vha->qla_stats.input_bytes += scsi_bufflen(cmd); } /* One DSD is available in the Command Type 3 IOCB */ avail_dsds = 1; cur_dsd = &lcmd_pkt->dsd; /* Load data segments */ scsi_for_each_sg(cmd, sg, tot_dsds, i) { /* Allocate additional continuation packets? */ if (avail_dsds == 0) { /* * Five DSDs are available in the Continuation * Type 1 IOCB. */ memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE); cont_pkt = qlafx00_prep_cont_type1_iocb(req, &lcont_pkt); cur_dsd = lcont_pkt.dsd; avail_dsds = 5; cont = 1; } append_dsd64(&cur_dsd, sg); avail_dsds--; if (avail_dsds == 0 && cont == 1) { cont = 0; memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, sizeof(lcont_pkt)); } } if (avail_dsds != 0 && cont == 1) { memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, sizeof(lcont_pkt)); } } /** * qlafx00_start_scsi() - Send a SCSI command to the ISP * @sp: command to send to the ISP * * Returns non-zero if a failure occurred, else zero. */ int qlafx00_start_scsi(srb_t *sp) { int nseg; unsigned long flags; uint32_t handle; uint16_t cnt; uint16_t req_cnt; uint16_t tot_dsds; struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct cmd_type_7_fx00 *cmd_pkt; struct cmd_type_7_fx00 lcmd_pkt; struct scsi_lun llun; /* Setup device pointers. */ rsp = ha->rsp_q_map[0]; req = vha->req; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); handle = qla2xxx_get_next_handle(req); if (handle == 0) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ if (scsi_sg_count(cmd)) { nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); if (unlikely(!nseg)) goto queuing_error; } else nseg = 0; tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (req->cnt < (req_cnt + 2)) { cnt = rd_reg_dword_relaxed(req->req_q_out); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); if (req->cnt < (req_cnt + 2)) goto queuing_error; } /* Build command packet. */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr; memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE); lcmd_pkt.handle = make_handle(req->id, sp->handle); lcmd_pkt.reserved_0 = 0; lcmd_pkt.port_path_ctrl = 0; lcmd_pkt.reserved_1 = 0; lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds); lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id); int_to_scsilun(cmd->device->lun, &llun); host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun, sizeof(lcmd_pkt.lun)); /* Load SCSI command packet. */ host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb)); lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Build IOCB segments */ qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt); /* Set total data segment count. */ lcmd_pkt.entry_count = (uint8_t)req_cnt; /* Specify response queue number where completion should happen */ lcmd_pkt.entry_status = (uint8_t) rsp->id; ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e, cmd->cmnd, cmd->cmd_len); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032, &lcmd_pkt, sizeof(lcmd_pkt)); memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE); wmb(); /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else req->ring_ptr++; sp->flags |= SRB_DMA_VALID; /* Set chip new ring index. */ wrt_reg_dword(req->req_q_in, req->ring_index); QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_FUNCTION_FAILED; } void qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb) { struct srb_iocb *fxio = &sp->u.iocb_cmd; scsi_qla_host_t *vha = sp->vha; struct req_que *req = vha->req; struct tsk_mgmt_entry_fx00 tm_iocb; struct scsi_lun llun; memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00)); tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00; tm_iocb.entry_count = 1; tm_iocb.handle = make_handle(req->id, sp->handle); tm_iocb.reserved_0 = 0; tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id); tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags); if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) { int_to_scsilun(fxio->u.tmf.lun, &llun); host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun, sizeof(struct scsi_lun)); } memcpy(ptm_iocb, &tm_iocb, sizeof(struct tsk_mgmt_entry_fx00)); wmb(); } void qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb) { struct srb_iocb *fxio = &sp->u.iocb_cmd; scsi_qla_host_t *vha = sp->vha; struct req_que *req = vha->req; struct abort_iocb_entry_fx00 abt_iocb; memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00)); abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00; abt_iocb.entry_count = 1; abt_iocb.handle = make_handle(req->id, sp->handle); abt_iocb.abort_handle = make_handle(req->id, fxio->u.abt.cmd_hndl); abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id); abt_iocb.req_que_no = cpu_to_le16(req->id); memcpy(pabt_iocb, &abt_iocb, sizeof(struct abort_iocb_entry_fx00)); wmb(); } void qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) { struct srb_iocb *fxio = &sp->u.iocb_cmd; struct qla_mt_iocb_rqst_fx00 *piocb_rqst; struct bsg_job *bsg_job; struct fc_bsg_request *bsg_request; struct fxdisc_entry_fx00 fx_iocb; uint8_t entry_cnt = 1; memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00)); fx_iocb.entry_type = FX00_IOCB_TYPE; fx_iocb.handle = sp->handle; fx_iocb.entry_count = entry_cnt; if (sp->type == SRB_FXIOCB_DCMD) { fx_iocb.func_num = sp->u.iocb_cmd.u.fxiocb.req_func_type; fx_iocb.adapid = fxio->u.fxiocb.adapter_id; fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi; fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0; fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1; fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra; if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { fx_iocb.req_dsdcnt = cpu_to_le16(1); fx_iocb.req_xfrcnt = cpu_to_le16(fxio->u.fxiocb.req_len); put_unaligned_le64(fxio->u.fxiocb.req_dma_handle, &fx_iocb.dseg_rq[0].address); fx_iocb.dseg_rq[0].length = cpu_to_le32(fxio->u.fxiocb.req_len); } if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { fx_iocb.rsp_dsdcnt = cpu_to_le16(1); fx_iocb.rsp_xfrcnt = cpu_to_le16(fxio->u.fxiocb.rsp_len); put_unaligned_le64(fxio->u.fxiocb.rsp_dma_handle, &fx_iocb.dseg_rsp[0].address); fx_iocb.dseg_rsp[0].length = cpu_to_le32(fxio->u.fxiocb.rsp_len); } if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) { fx_iocb.dataword = fxio->u.fxiocb.req_data; } fx_iocb.flags = fxio->u.fxiocb.flags; } else { struct scatterlist *sg; bsg_job = sp->u.bsg_job; bsg_request = bsg_job->request; piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; fx_iocb.func_num = piocb_rqst->func_type; fx_iocb.adapid = piocb_rqst->adapid; fx_iocb.adapid_hi = piocb_rqst->adapid_hi; fx_iocb.reserved_0 = piocb_rqst->reserved_0; fx_iocb.reserved_1 = piocb_rqst->reserved_1; fx_iocb.dataword_extra = piocb_rqst->dataword_extra; fx_iocb.dataword = piocb_rqst->dataword; fx_iocb.req_xfrcnt = piocb_rqst->req_len; fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len; if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { int avail_dsds, tot_dsds; cont_a64_entry_t lcont_pkt; cont_a64_entry_t *cont_pkt = NULL; struct dsd64 *cur_dsd; int index = 0, cont = 0; fx_iocb.req_dsdcnt = cpu_to_le16(bsg_job->request_payload.sg_cnt); tot_dsds = bsg_job->request_payload.sg_cnt; cur_dsd = &fx_iocb.dseg_rq[0]; avail_dsds = 1; for_each_sg(bsg_job->request_payload.sg_list, sg, tot_dsds, index) { /* Allocate additional continuation packets? */ if (avail_dsds == 0) { /* * Five DSDs are available in the Cont. * Type 1 IOCB. */ memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE); cont_pkt = qlafx00_prep_cont_type1_iocb( sp->vha->req, &lcont_pkt); cur_dsd = lcont_pkt.dsd; avail_dsds = 5; cont = 1; entry_cnt++; } append_dsd64(&cur_dsd, sg); avail_dsds--; if (avail_dsds == 0 && cont == 1) { cont = 0; memcpy_toio( (void __iomem *)cont_pkt, &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer( ql_dbg_user + ql_dbg_verbose, sp->vha, 0x3042, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } } if (avail_dsds != 0 && cont == 1) { memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, sp->vha, 0x3043, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } } if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { int avail_dsds, tot_dsds; cont_a64_entry_t lcont_pkt; cont_a64_entry_t *cont_pkt = NULL; struct dsd64 *cur_dsd; int index = 0, cont = 0; fx_iocb.rsp_dsdcnt = cpu_to_le16(bsg_job->reply_payload.sg_cnt); tot_dsds = bsg_job->reply_payload.sg_cnt; cur_dsd = &fx_iocb.dseg_rsp[0]; avail_dsds = 1; for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { /* Allocate additional continuation packets? */ if (avail_dsds == 0) { /* * Five DSDs are available in the Cont. * Type 1 IOCB. */ memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE); cont_pkt = qlafx00_prep_cont_type1_iocb( sp->vha->req, &lcont_pkt); cur_dsd = lcont_pkt.dsd; avail_dsds = 5; cont = 1; entry_cnt++; } append_dsd64(&cur_dsd, sg); avail_dsds--; if (avail_dsds == 0 && cont == 1) { cont = 0; memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer( ql_dbg_user + ql_dbg_verbose, sp->vha, 0x3045, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } } if (avail_dsds != 0 && cont == 1) { memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, REQUEST_ENTRY_SIZE); ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, sp->vha, 0x3046, (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); } } if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID) fx_iocb.dataword = piocb_rqst->dataword; fx_iocb.flags = piocb_rqst->flags; fx_iocb.entry_count = entry_cnt; } ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, sp->vha, 0x3047, &fx_iocb, sizeof(fx_iocb)); memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, sizeof(fx_iocb)); wmb(); }
linux-master
drivers/scsi/qla2xxx/qla_mr.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include "qla_tmpl.h" #define ISPREG(vha) (&(vha)->hw->iobase->isp24) #define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr) #define IOBASE(vha) IOBAR(ISPREG(vha)) #define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL) static inline void qla27xx_insert16(uint16_t value, void *buf, ulong *len) { if (buf) { buf += *len; *(__le16 *)buf = cpu_to_le16(value); } *len += sizeof(value); } static inline void qla27xx_insert32(uint32_t value, void *buf, ulong *len) { if (buf) { buf += *len; *(__le32 *)buf = cpu_to_le32(value); } *len += sizeof(value); } static inline void qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len) { if (buf && mem && size) { buf += *len; memcpy(buf, mem, size); } *len += size; } static inline void qla27xx_read8(void __iomem *window, void *buf, ulong *len) { uint8_t value = ~0; if (buf) { value = rd_reg_byte(window); } qla27xx_insert32(value, buf, len); } static inline void qla27xx_read16(void __iomem *window, void *buf, ulong *len) { uint16_t value = ~0; if (buf) { value = rd_reg_word(window); } qla27xx_insert32(value, buf, len); } static inline void qla27xx_read32(void __iomem *window, void *buf, ulong *len) { uint32_t value = ~0; if (buf) { value = rd_reg_dword(window); } qla27xx_insert32(value, buf, len); } static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *) { return (width == 1) ? qla27xx_read8 : (width == 2) ? qla27xx_read16 : qla27xx_read32; } static inline void qla27xx_read_reg(__iomem struct device_reg_24xx *reg, uint offset, void *buf, ulong *len) { void __iomem *window = (void __iomem *)reg + offset; qla27xx_read32(window, buf, len); } static inline void qla27xx_write_reg(__iomem struct device_reg_24xx *reg, uint offset, uint32_t data, void *buf) { if (buf) { void __iomem *window = (void __iomem *)reg + offset; wrt_reg_dword(window, data); } } static inline void qla27xx_read_window(__iomem struct device_reg_24xx *reg, uint32_t addr, uint offset, uint count, uint width, void *buf, ulong *len) { void __iomem *window = (void __iomem *)reg + offset; void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width); qla27xx_write_reg(reg, IOBAR(reg), addr, buf); while (count--) { qla27xx_insert32(addr, buf, len); readn(window, buf, len); window += width; addr++; } } static inline void qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf) { if (buf) ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY; } static inline struct qla27xx_fwdt_entry * qla27xx_next_entry(struct qla27xx_fwdt_entry *ent) { return (void *)ent + le32_to_cpu(ent->hdr.size); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ql_dbg(ql_dbg_misc, vha, 0xd100, "%s: nop [%lx]\n", __func__, *len); qla27xx_skip_entry(ent, buf); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ql_dbg(ql_dbg_misc, vha, 0xd1ff, "%s: end [%lx]\n", __func__, *len); qla27xx_skip_entry(ent, buf); /* terminate */ return NULL; } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong addr = le32_to_cpu(ent->t256.base_addr); uint offset = ent->t256.pci_offset; ulong count = le16_to_cpu(ent->t256.reg_count); uint width = ent->t256.reg_width; ql_dbg(ql_dbg_misc, vha, 0xd200, "%s: rdio t1 [%lx]\n", __func__, *len); qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong addr = le32_to_cpu(ent->t257.base_addr); uint offset = ent->t257.pci_offset; ulong data = le32_to_cpu(ent->t257.write_data); ql_dbg(ql_dbg_misc, vha, 0xd201, "%s: wrio t1 [%lx]\n", __func__, *len); qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf); qla27xx_write_reg(ISPREG(vha), offset, data, buf); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { uint banksel = ent->t258.banksel_offset; ulong bank = le32_to_cpu(ent->t258.bank); ulong addr = le32_to_cpu(ent->t258.base_addr); uint offset = ent->t258.pci_offset; uint count = le16_to_cpu(ent->t258.reg_count); uint width = ent->t258.reg_width; ql_dbg(ql_dbg_misc, vha, 0xd202, "%s: rdio t2 [%lx]\n", __func__, *len); qla27xx_write_reg(ISPREG(vha), banksel, bank, buf); qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong addr = le32_to_cpu(ent->t259.base_addr); uint banksel = ent->t259.banksel_offset; ulong bank = le32_to_cpu(ent->t259.bank); uint offset = ent->t259.pci_offset; ulong data = le32_to_cpu(ent->t259.write_data); ql_dbg(ql_dbg_misc, vha, 0xd203, "%s: wrio t2 [%lx]\n", __func__, *len); qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf); qla27xx_write_reg(ISPREG(vha), banksel, bank, buf); qla27xx_write_reg(ISPREG(vha), offset, data, buf); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { uint offset = ent->t260.pci_offset; ql_dbg(ql_dbg_misc, vha, 0xd204, "%s: rdpci [%lx]\n", __func__, *len); qla27xx_insert32(offset, buf, len); qla27xx_read_reg(ISPREG(vha), offset, buf, len); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { uint offset = ent->t261.pci_offset; ulong data = le32_to_cpu(ent->t261.write_data); ql_dbg(ql_dbg_misc, vha, 0xd205, "%s: wrpci [%lx]\n", __func__, *len); qla27xx_write_reg(ISPREG(vha), offset, data, buf); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { uint area = ent->t262.ram_area; ulong start = le32_to_cpu(ent->t262.start_addr); ulong end = le32_to_cpu(ent->t262.end_addr); ulong dwords; int rc; ql_dbg(ql_dbg_misc, vha, 0xd206, "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len); if (area == T262_RAM_AREA_CRITICAL_RAM) { ; } else if (area == T262_RAM_AREA_EXTERNAL_RAM) { end = vha->hw->fw_memory_size; if (buf) ent->t262.end_addr = cpu_to_le32(end); } else if (area == T262_RAM_AREA_SHARED_RAM) { start = vha->hw->fw_shared_ram_start; end = vha->hw->fw_shared_ram_end; if (buf) { ent->t262.start_addr = cpu_to_le32(start); ent->t262.end_addr = cpu_to_le32(end); } } else if (area == T262_RAM_AREA_DDR_RAM) { start = vha->hw->fw_ddr_ram_start; end = vha->hw->fw_ddr_ram_end; if (buf) { ent->t262.start_addr = cpu_to_le32(start); ent->t262.end_addr = cpu_to_le32(end); } } else if (area == T262_RAM_AREA_MISC) { if (buf) { ent->t262.start_addr = cpu_to_le32(start); ent->t262.end_addr = cpu_to_le32(end); } } else { ql_dbg(ql_dbg_misc, vha, 0xd022, "%s: unknown area %x\n", __func__, area); qla27xx_skip_entry(ent, buf); goto done; } if (end < start || start == 0 || end == 0) { ql_dbg(ql_dbg_misc, vha, 0xd023, "%s: unusable range (start=%lx end=%lx)\n", __func__, start, end); qla27xx_skip_entry(ent, buf); goto done; } dwords = end - start + 1; if (buf) { buf += *len; rc = qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_async, vha, 0xffff, "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n", __func__, area, start, end); return INVALID_ENTRY; } } *len += dwords * sizeof(uint32_t); done: return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { uint type = ent->t263.queue_type; uint count = 0; uint i; uint length; ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207, "%s: getq(%x) [%lx]\n", __func__, type, *len); if (type == T263_QUEUE_TYPE_REQ) { for (i = 0; i < vha->hw->max_req_queues; i++) { struct req_que *req = vha->hw->req_q_map[i]; if (req || !buf) { length = req ? req->length : REQUEST_ENTRY_CNT_24XX; qla27xx_insert16(i, buf, len); qla27xx_insert16(length, buf, len); qla27xx_insertbuf(req ? req->ring : NULL, length * sizeof(*req->ring), buf, len); count++; } } } else if (type == T263_QUEUE_TYPE_RSP) { for (i = 0; i < vha->hw->max_rsp_queues; i++) { struct rsp_que *rsp = vha->hw->rsp_q_map[i]; if (rsp || !buf) { length = rsp ? rsp->length : RESPONSE_ENTRY_CNT_MQ; qla27xx_insert16(i, buf, len); qla27xx_insert16(length, buf, len); qla27xx_insertbuf(rsp ? rsp->ring : NULL, length * sizeof(*rsp->ring), buf, len); count++; } } } else if (QLA_TGT_MODE_ENABLED() && ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) { struct qla_hw_data *ha = vha->hw; struct atio *atr = ha->tgt.atio_ring; if (atr || !buf) { length = ha->tgt.atio_q_length; qla27xx_insert16(0, buf, len); qla27xx_insert16(length, buf, len); qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len); count++; } } else { ql_dbg(ql_dbg_misc, vha, 0xd026, "%s: unknown queue %x\n", __func__, type); qla27xx_skip_entry(ent, buf); } if (buf) { if (count) ent->t263.num_queues = count; else qla27xx_skip_entry(ent, buf); } return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ql_dbg(ql_dbg_misc, vha, 0xd208, "%s: getfce [%lx]\n", __func__, *len); if (vha->hw->fce) { if (buf) { ent->t264.fce_trace_size = FCE_SIZE; ent->t264.write_pointer = vha->hw->fce_wr; ent->t264.base_pointer = vha->hw->fce_dma; ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0]; ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2]; ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3]; ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4]; ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5]; ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6]; } qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len); } else { ql_dbg(ql_dbg_misc, vha, 0xd027, "%s: missing fce\n", __func__); qla27xx_skip_entry(ent, buf); } return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209, "%s: pause risc [%lx]\n", __func__, *len); if (buf) qla24xx_pause_risc(ISPREG(vha), vha->hw); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ql_dbg(ql_dbg_misc, vha, 0xd20a, "%s: reset risc [%lx]\n", __func__, *len); if (buf) { if (qla24xx_soft_reset(vha->hw) != QLA_SUCCESS) { ql_dbg(ql_dbg_async, vha, 0x5001, "%s: unable to soft reset\n", __func__); return INVALID_ENTRY; } } return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { uint offset = ent->t267.pci_offset; ulong data = le32_to_cpu(ent->t267.data); ql_dbg(ql_dbg_misc, vha, 0xd20b, "%s: dis intr [%lx]\n", __func__, *len); qla27xx_write_reg(ISPREG(vha), offset, data, buf); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ql_dbg(ql_dbg_misc, vha, 0xd20c, "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len); switch (ent->t268.buf_type) { case T268_BUF_TYPE_EXTD_TRACE: if (vha->hw->eft) { if (buf) { ent->t268.buf_size = EFT_SIZE; ent->t268.start_addr = vha->hw->eft_dma; } qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len); } else { ql_dbg(ql_dbg_misc, vha, 0xd028, "%s: missing eft\n", __func__); qla27xx_skip_entry(ent, buf); } break; case T268_BUF_TYPE_EXCH_BUFOFF: if (vha->hw->exchoffld_buf) { if (buf) { ent->t268.buf_size = vha->hw->exchoffld_size; ent->t268.start_addr = vha->hw->exchoffld_buf_dma; } qla27xx_insertbuf(vha->hw->exchoffld_buf, vha->hw->exchoffld_size, buf, len); } else { ql_dbg(ql_dbg_misc, vha, 0xd028, "%s: missing exch offld\n", __func__); qla27xx_skip_entry(ent, buf); } break; case T268_BUF_TYPE_EXTD_LOGIN: if (vha->hw->exlogin_buf) { if (buf) { ent->t268.buf_size = vha->hw->exlogin_size; ent->t268.start_addr = vha->hw->exlogin_buf_dma; } qla27xx_insertbuf(vha->hw->exlogin_buf, vha->hw->exlogin_size, buf, len); } else { ql_dbg(ql_dbg_misc, vha, 0xd028, "%s: missing ext login\n", __func__); qla27xx_skip_entry(ent, buf); } break; case T268_BUF_TYPE_REQ_MIRROR: case T268_BUF_TYPE_RSP_MIRROR: /* * Mirror pointers are not implemented in the * driver, instead shadow pointers are used by * the drier. Skip these entries. */ qla27xx_skip_entry(ent, buf); break; default: ql_dbg(ql_dbg_async, vha, 0xd02b, "%s: unknown buffer %x\n", __func__, ent->t268.buf_type); qla27xx_skip_entry(ent, buf); break; } return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ql_dbg(ql_dbg_misc, vha, 0xd20d, "%s: scratch [%lx]\n", __func__, *len); qla27xx_insert32(0xaaaaaaaa, buf, len); qla27xx_insert32(0xbbbbbbbb, buf, len); qla27xx_insert32(0xcccccccc, buf, len); qla27xx_insert32(0xdddddddd, buf, len); qla27xx_insert32(*len + sizeof(uint32_t), buf, len); if (buf) ent->t269.scratch_size = 5 * sizeof(uint32_t); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong addr = le32_to_cpu(ent->t270.addr); ulong dwords = le32_to_cpu(ent->t270.count); ql_dbg(ql_dbg_misc, vha, 0xd20e, "%s: rdremreg [%lx]\n", __func__, *len); qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, 0x40, buf); while (dwords--) { qla27xx_write_reg(ISPREG(vha), 0xc0, addr|0x80000000, buf); qla27xx_insert32(addr, buf, len); qla27xx_read_reg(ISPREG(vha), 0xc4, buf, len); addr += sizeof(uint32_t); } return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong addr = le32_to_cpu(ent->t271.addr); ulong data = le32_to_cpu(ent->t271.data); ql_dbg(ql_dbg_misc, vha, 0xd20f, "%s: wrremreg [%lx]\n", __func__, *len); qla27xx_write_reg(ISPREG(vha), IOBASE(vha), 0x40, buf); qla27xx_write_reg(ISPREG(vha), 0xc4, data, buf); qla27xx_write_reg(ISPREG(vha), 0xc0, addr, buf); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong dwords = le32_to_cpu(ent->t272.count); ulong start = le32_to_cpu(ent->t272.addr); ql_dbg(ql_dbg_misc, vha, 0xd210, "%s: rdremram [%lx]\n", __func__, *len); if (buf) { ql_dbg(ql_dbg_misc, vha, 0xd02c, "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords); buf += *len; qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf); } *len += dwords * sizeof(uint32_t); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong dwords = le32_to_cpu(ent->t273.count); ulong addr = le32_to_cpu(ent->t273.addr); uint32_t value; ql_dbg(ql_dbg_misc, vha, 0xd211, "%s: pcicfg [%lx]\n", __func__, *len); while (dwords--) { value = ~0; if (pci_read_config_dword(vha->hw->pdev, addr, &value)) ql_dbg(ql_dbg_misc, vha, 0xd02d, "%s: failed pcicfg read at %lx\n", __func__, addr); qla27xx_insert32(addr, buf, len); qla27xx_insert32(value, buf, len); addr += sizeof(uint32_t); } return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong type = ent->t274.queue_type; uint count = 0; uint i; ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212, "%s: getqsh(%lx) [%lx]\n", __func__, type, *len); if (type == T274_QUEUE_TYPE_REQ_SHAD) { for (i = 0; i < vha->hw->max_req_queues; i++) { struct req_que *req = vha->hw->req_q_map[i]; if (req || !buf) { qla27xx_insert16(i, buf, len); qla27xx_insert16(1, buf, len); qla27xx_insert32(req && req->out_ptr ? *req->out_ptr : 0, buf, len); count++; } } } else if (type == T274_QUEUE_TYPE_RSP_SHAD) { for (i = 0; i < vha->hw->max_rsp_queues; i++) { struct rsp_que *rsp = vha->hw->rsp_q_map[i]; if (rsp || !buf) { qla27xx_insert16(i, buf, len); qla27xx_insert16(1, buf, len); qla27xx_insert32(rsp && rsp->in_ptr ? *rsp->in_ptr : 0, buf, len); count++; } } } else if (QLA_TGT_MODE_ENABLED() && ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { struct qla_hw_data *ha = vha->hw; struct atio *atr = ha->tgt.atio_ring_ptr; if (atr || !buf) { qla27xx_insert16(0, buf, len); qla27xx_insert16(1, buf, len); qla27xx_insert32(ha->tgt.atio_q_in ? readl(ha->tgt.atio_q_in) : 0, buf, len); count++; } } else { ql_dbg(ql_dbg_misc, vha, 0xd02f, "%s: unknown queue %lx\n", __func__, type); qla27xx_skip_entry(ent, buf); } if (buf) { if (count) ent->t274.num_queues = count; else qla27xx_skip_entry(ent, buf); } return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong offset = offsetof(typeof(*ent), t275.buffer); ulong length = le32_to_cpu(ent->t275.length); ulong size = le32_to_cpu(ent->hdr.size); void *buffer = ent->t275.buffer; ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213, "%s: buffer(%lx) [%lx]\n", __func__, length, *len); if (!length) { ql_dbg(ql_dbg_misc, vha, 0xd020, "%s: buffer zero length\n", __func__); qla27xx_skip_entry(ent, buf); goto done; } if (offset + length > size) { length = size - offset; ql_dbg(ql_dbg_misc, vha, 0xd030, "%s: buffer overflow, truncate [%lx]\n", __func__, length); ent->t275.length = cpu_to_le32(length); } qla27xx_insertbuf(buffer, length, buf, len); done: return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214, "%s: cond [%lx]\n", __func__, *len); if (buf) { ulong cond1 = le32_to_cpu(ent->t276.cond1); ulong cond2 = le32_to_cpu(ent->t276.cond2); uint type = vha->hw->pdev->device >> 4 & 0xf; uint func = vha->hw->port_no & 0x3; if (type != cond1 || func != cond2) { struct qla27xx_fwdt_template *tmp = buf; tmp->count--; ent = qla27xx_next_entry(ent); qla27xx_skip_entry(ent, buf); } } return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr); ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data); ulong data_addr = le32_to_cpu(ent->t277.data_addr); ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215, "%s: rdpep [%lx]\n", __func__, *len); qla27xx_insert32(wr_cmd_data, buf, len); qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf); qla27xx_read_reg(ISPREG(vha), data_addr, buf, len); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr); ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data); ulong data_addr = le32_to_cpu(ent->t278.data_addr); ulong wr_data = le32_to_cpu(ent->t278.wr_data); ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216, "%s: wrpep [%lx]\n", __func__, *len); qla27xx_write_reg(ISPREG(vha), data_addr, wr_data, buf); qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf); return qla27xx_next_entry(ent); } static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_other(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong type = le32_to_cpu(ent->hdr.type); ql_dbg(ql_dbg_misc, vha, 0xd2ff, "%s: other %lx [%lx]\n", __func__, type, *len); qla27xx_skip_entry(ent, buf); return qla27xx_next_entry(ent); } static struct { uint type; typeof(qla27xx_fwdt_entry_other)(*call); } qla27xx_fwdt_entry_call[] = { { ENTRY_TYPE_NOP, qla27xx_fwdt_entry_t0 }, { ENTRY_TYPE_TMP_END, qla27xx_fwdt_entry_t255 }, { ENTRY_TYPE_RD_IOB_T1, qla27xx_fwdt_entry_t256 }, { ENTRY_TYPE_WR_IOB_T1, qla27xx_fwdt_entry_t257 }, { ENTRY_TYPE_RD_IOB_T2, qla27xx_fwdt_entry_t258 }, { ENTRY_TYPE_WR_IOB_T2, qla27xx_fwdt_entry_t259 }, { ENTRY_TYPE_RD_PCI, qla27xx_fwdt_entry_t260 }, { ENTRY_TYPE_WR_PCI, qla27xx_fwdt_entry_t261 }, { ENTRY_TYPE_RD_RAM, qla27xx_fwdt_entry_t262 }, { ENTRY_TYPE_GET_QUEUE, qla27xx_fwdt_entry_t263 }, { ENTRY_TYPE_GET_FCE, qla27xx_fwdt_entry_t264 }, { ENTRY_TYPE_PSE_RISC, qla27xx_fwdt_entry_t265 }, { ENTRY_TYPE_RST_RISC, qla27xx_fwdt_entry_t266 }, { ENTRY_TYPE_DIS_INTR, qla27xx_fwdt_entry_t267 }, { ENTRY_TYPE_GET_HBUF, qla27xx_fwdt_entry_t268 }, { ENTRY_TYPE_SCRATCH, qla27xx_fwdt_entry_t269 }, { ENTRY_TYPE_RDREMREG, qla27xx_fwdt_entry_t270 }, { ENTRY_TYPE_WRREMREG, qla27xx_fwdt_entry_t271 }, { ENTRY_TYPE_RDREMRAM, qla27xx_fwdt_entry_t272 }, { ENTRY_TYPE_PCICFG, qla27xx_fwdt_entry_t273 }, { ENTRY_TYPE_GET_SHADOW, qla27xx_fwdt_entry_t274 }, { ENTRY_TYPE_WRITE_BUF, qla27xx_fwdt_entry_t275 }, { ENTRY_TYPE_CONDITIONAL, qla27xx_fwdt_entry_t276 }, { ENTRY_TYPE_RDPEPREG, qla27xx_fwdt_entry_t277 }, { ENTRY_TYPE_WRPEPREG, qla27xx_fwdt_entry_t278 }, { -1, qla27xx_fwdt_entry_other } }; static inline typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type)) { typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call; while (list->type < type) list++; if (list->type == type) return list->call; return qla27xx_fwdt_entry_other; } static void qla27xx_walk_template(struct scsi_qla_host *vha, struct qla27xx_fwdt_template *tmp, void *buf, ulong *len) { struct qla27xx_fwdt_entry *ent = (void *)tmp + le32_to_cpu(tmp->entry_offset); ulong type; tmp->count = le32_to_cpu(tmp->entry_count); ql_dbg(ql_dbg_misc, vha, 0xd01a, "%s: entry count %u\n", __func__, tmp->count); while (ent && tmp->count--) { type = le32_to_cpu(ent->hdr.type); ent = qla27xx_find_entry(type)(vha, ent, buf, len); if (!ent) break; if (ent == INVALID_ENTRY) { *len = 0; ql_dbg(ql_dbg_async, vha, 0xffff, "Unable to capture FW dump"); goto bailout; } } if (tmp->count) ql_dbg(ql_dbg_misc, vha, 0xd018, "%s: entry count residual=+%u\n", __func__, tmp->count); if (ent) ql_dbg(ql_dbg_misc, vha, 0xd019, "%s: missing end entry\n", __func__); bailout: cpu_to_le32s(&tmp->count); /* endianize residual count */ } static void qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp) { tmp->capture_timestamp = cpu_to_le32(jiffies); } static void qla27xx_driver_info(struct qla27xx_fwdt_template *tmp) { uint8_t v[] = { 0, 0, 0, 0, 0, 0 }; WARN_ON_ONCE(sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu", v + 0, v + 1, v + 2, v + 3) != 4); tmp->driver_info[0] = cpu_to_le32( v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]); tmp->driver_info[1] = cpu_to_le32(v[5] << 8 | v[4]); tmp->driver_info[2] = __constant_cpu_to_le32(0x12345678); } static void qla27xx_firmware_info(struct scsi_qla_host *vha, struct qla27xx_fwdt_template *tmp) { tmp->firmware_version[0] = cpu_to_le32(vha->hw->fw_major_version); tmp->firmware_version[1] = cpu_to_le32(vha->hw->fw_minor_version); tmp->firmware_version[2] = cpu_to_le32(vha->hw->fw_subminor_version); tmp->firmware_version[3] = cpu_to_le32( vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes); tmp->firmware_version[4] = cpu_to_le32( vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]); } static void ql27xx_edit_template(struct scsi_qla_host *vha, struct qla27xx_fwdt_template *tmp) { qla27xx_time_stamp(tmp); qla27xx_driver_info(tmp); qla27xx_firmware_info(vha, tmp); } static inline uint32_t qla27xx_template_checksum(void *p, ulong size) { __le32 *buf = p; uint64_t sum = 0; size /= sizeof(*buf); for ( ; size--; buf++) sum += le32_to_cpu(*buf); sum = (sum & 0xffffffff) + (sum >> 32); return ~sum; } static inline int qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp) { return qla27xx_template_checksum(tmp, le32_to_cpu(tmp->template_size)) == 0; } static inline int qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp) { return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP; } static ulong qla27xx_execute_fwdt_template(struct scsi_qla_host *vha, struct qla27xx_fwdt_template *tmp, void *buf) { ulong len = 0; if (qla27xx_fwdt_template_valid(tmp)) { len = le32_to_cpu(tmp->template_size); tmp = memcpy(buf, tmp, len); ql27xx_edit_template(vha, tmp); qla27xx_walk_template(vha, tmp, buf, &len); } return len; } ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p) { struct qla27xx_fwdt_template *tmp = p; ulong len = 0; if (qla27xx_fwdt_template_valid(tmp)) { len = le32_to_cpu(tmp->template_size); qla27xx_walk_template(vha, tmp, NULL, &len); } return len; } ulong qla27xx_fwdt_template_size(void *p) { struct qla27xx_fwdt_template *tmp = p; return le32_to_cpu(tmp->template_size); } int qla27xx_fwdt_template_valid(void *p) { struct qla27xx_fwdt_template *tmp = p; if (!qla27xx_verify_template_header(tmp)) { ql_log(ql_log_warn, NULL, 0xd01c, "%s: template type %x\n", __func__, le32_to_cpu(tmp->template_type)); return false; } if (!qla27xx_verify_template_checksum(tmp)) { ql_log(ql_log_warn, NULL, 0xd01d, "%s: failed template checksum\n", __func__); return false; } return true; } void qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked) { ulong flags = 0; if (!hardware_locked) spin_lock_irqsave(&vha->hw->hardware_lock, flags); if (!vha->hw->mpi_fw_dump) { ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n"); } else { struct fwdt *fwdt = &vha->hw->fwdt[1]; ulong len; void *buf = vha->hw->mpi_fw_dump; bool walk_template_only = false; if (vha->hw->mpi_fw_dumped) { /* Use the spare area for any further dumps. */ buf += fwdt->dump_size; walk_template_only = true; ql_log(ql_log_warn, vha, 0x02f4, "-> MPI firmware already dumped -- dump saving to temporary buffer %p.\n", buf); } ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n"); if (!fwdt->template) { ql_log(ql_log_warn, vha, 0x02f6, "-> fwdt1 no template\n"); goto bailout; } len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf); if (len == 0) { goto bailout; } else if (len != fwdt->dump_size) { ql_log(ql_log_warn, vha, 0x02f7, "-> fwdt1 fwdump residual=%+ld\n", fwdt->dump_size - len); } vha->hw->stat.num_mpi_reset++; if (walk_template_only) goto bailout; vha->hw->mpi_fw_dump_len = len; vha->hw->mpi_fw_dumped = 1; ql_log(ql_log_warn, vha, 0x02f8, "-> MPI firmware dump saved to buffer (%lu/%p)\n", vha->host_no, vha->hw->mpi_fw_dump); qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); } bailout: if (!hardware_locked) spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); } void qla27xx_fwdump(scsi_qla_host_t *vha) { lockdep_assert_held(&vha->hw->hardware_lock); if (!vha->hw->fw_dump) { ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n"); } else if (vha->hw->fw_dumped) { ql_log(ql_log_warn, vha, 0xd01f, "-> Firmware already dumped (%p) -- ignoring request\n", vha->hw->fw_dump); } else { struct fwdt *fwdt = vha->hw->fwdt; ulong len; void *buf = vha->hw->fw_dump; ql_log(ql_log_warn, vha, 0xd011, "-> fwdt0 running...\n"); if (!fwdt->template) { ql_log(ql_log_warn, vha, 0xd012, "-> fwdt0 no template\n"); return; } len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf); if (len == 0) { return; } else if (len != fwdt->dump_size) { ql_log(ql_log_warn, vha, 0xd013, "-> fwdt0 fwdump residual=%+ld\n", fwdt->dump_size - len); } vha->hw->fw_dump_len = len; vha->hw->fw_dumped = true; ql_log(ql_log_warn, vha, 0xd015, "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n", vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags); qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); } }
linux-master
drivers/scsi/qla2xxx/qla_tmpl.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include <linux/moduleparam.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/kobject.h> #include <linux/slab.h> #include <linux/blk-mq-pci.h> #include <linux/refcount.h> #include <linux/crash_dump.h> #include <linux/trace_events.h> #include <linux/trace.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_fc.h> #include "qla_target.h" /* * Driver version */ char qla2x00_version_str[40]; static int apidev_major; /* * SRB allocation cache */ struct kmem_cache *srb_cachep; static struct trace_array *qla_trc_array; int ql2xfulldump_on_mpifail; module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql2xfulldump_on_mpifail, "Set this to take full dump on MPI hang."); int ql2xenforce_iocb_limit = 2; module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql2xenforce_iocb_limit, "Enforce IOCB throttling, to avoid FW congestion. (default: 2) " "1: track usage per queue, 2: track usage per adapter"); /* * CT6 CTX allocation cache */ static struct kmem_cache *ctx_cachep; /* * error level for logging */ uint ql_errlev = 0x8001; int ql2xsecenable; module_param(ql2xsecenable, int, S_IRUGO); MODULE_PARM_DESC(ql2xsecenable, "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled."); static int ql2xenableclass2; module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); MODULE_PARM_DESC(ql2xenableclass2, "Specify if Class 2 operations are supported from the very " "beginning. Default is 0 - class 2 not supported."); int ql2xlogintimeout = 20; module_param(ql2xlogintimeout, int, S_IRUGO); MODULE_PARM_DESC(ql2xlogintimeout, "Login timeout value in seconds."); int qlport_down_retry; module_param(qlport_down_retry, int, S_IRUGO); MODULE_PARM_DESC(qlport_down_retry, "Maximum number of command retries to a port that returns " "a PORT-DOWN status."); int ql2xplogiabsentdevice; module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xplogiabsentdevice, "Option to enable PLOGI to devices that are not present after " "a Fabric scan. This is needed for several broken switches. " "Default is 0 - no PLOGI. 1 - perform PLOGI."); int ql2xloginretrycount; module_param(ql2xloginretrycount, int, S_IRUGO); MODULE_PARM_DESC(ql2xloginretrycount, "Specify an alternate value for the NVRAM login retry count."); int ql2xallocfwdump = 1; module_param(ql2xallocfwdump, int, S_IRUGO); MODULE_PARM_DESC(ql2xallocfwdump, "Option to enable allocation of memory for a firmware dump " "during HBA initialization. Memory allocation requirements " "vary by ISP type. Default is 1 - allocate memory."); int ql2xextended_error_logging; module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xextended_error_logging, "Option to enable extended error logging,\n" "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" "\t\t0x1e400000 - Preferred value for capturing essential " "debug information (equivalent to old " "ql2xextended_error_logging=1).\n" "\t\tDo LOGICAL OR of the value to enable more than one level"); int ql2xextended_error_logging_ktrace = 1; module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xextended_error_logging_ktrace, "Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n"); int ql2xshiftctondsd = 6; module_param(ql2xshiftctondsd, int, S_IRUGO); MODULE_PARM_DESC(ql2xshiftctondsd, "Set to control shifting of command type processing " "based on total number of SG elements."); int ql2xfdmienable = 1; module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xfdmienable, "Enables FDMI registrations. " "0 - no FDMI registrations. " "1 - provide FDMI registrations (default)."); #define MAX_Q_DEPTH 64 static int ql2xmaxqdepth = MAX_Q_DEPTH; module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xmaxqdepth, "Maximum queue depth to set for each LUN. " "Default is 64."); int ql2xenabledif = 2; module_param(ql2xenabledif, int, S_IRUGO); MODULE_PARM_DESC(ql2xenabledif, " Enable T10-CRC-DIF:\n" " Default is 2.\n" " 0 -- No DIF Support\n" " 1 -- Enable DIF for all types\n" " 2 -- Enable DIF for all types, except Type 0.\n"); #if (IS_ENABLED(CONFIG_NVME_FC)) int ql2xnvmeenable = 1; #else int ql2xnvmeenable; #endif module_param(ql2xnvmeenable, int, 0644); MODULE_PARM_DESC(ql2xnvmeenable, "Enables NVME support. " "0 - no NVMe. Default is Y"); int ql2xenablehba_err_chk = 2; module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xenablehba_err_chk, " Enable T10-CRC-DIF Error isolation by HBA:\n" " Default is 2.\n" " 0 -- Error isolation disabled\n" " 1 -- Error isolation enabled only for DIX Type 0\n" " 2 -- Error isolation enabled for all Types\n"); int ql2xiidmaenable = 1; module_param(ql2xiidmaenable, int, S_IRUGO); MODULE_PARM_DESC(ql2xiidmaenable, "Enables iIDMA settings " "Default is 1 - perform iIDMA. 0 - no iIDMA."); int ql2xmqsupport = 1; module_param(ql2xmqsupport, int, S_IRUGO); MODULE_PARM_DESC(ql2xmqsupport, "Enable on demand multiple queue pairs support " "Default is 1 for supported. " "Set it to 0 to turn off mq qpair support."); int ql2xfwloadbin; module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xfwloadbin, "Option to specify location from which to load ISP firmware:.\n" " 2 -- load firmware via the request_firmware() (hotplug).\n" " interface.\n" " 1 -- load firmware from flash.\n" " 0 -- use default semantics.\n"); int ql2xetsenable; module_param(ql2xetsenable, int, S_IRUGO); MODULE_PARM_DESC(ql2xetsenable, "Enables firmware ETS burst." "Default is 0 - skip ETS enablement."); int ql2xdbwr = 1; module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xdbwr, "Option to specify scheme for request queue posting.\n" " 0 -- Regular doorbell.\n" " 1 -- CAMRAM doorbell (faster).\n"); int ql2xgffidenable; module_param(ql2xgffidenable, int, S_IRUGO); MODULE_PARM_DESC(ql2xgffidenable, "Enables GFF_ID checks of port type. " "Default is 0 - Do not use GFF_ID information."); int ql2xasynctmfenable = 1; module_param(ql2xasynctmfenable, int, S_IRUGO); MODULE_PARM_DESC(ql2xasynctmfenable, "Enables issue of TM IOCBs asynchronously via IOCB mechanism" "Default is 1 - Issue TM IOCBs via mailbox mechanism."); int ql2xdontresethba; module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xdontresethba, "Option to specify reset behaviour.\n" " 0 (Default) -- Reset on failure.\n" " 1 -- Do not reset on failure.\n"); uint64_t ql2xmaxlun = MAX_LUNS; module_param(ql2xmaxlun, ullong, S_IRUGO); MODULE_PARM_DESC(ql2xmaxlun, "Defines the maximum LU number to register with the SCSI " "midlayer. Default is 65535."); int ql2xmdcapmask = 0x1F; module_param(ql2xmdcapmask, int, S_IRUGO); MODULE_PARM_DESC(ql2xmdcapmask, "Set the Minidump driver capture mask level. " "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); int ql2xmdenable = 1; module_param(ql2xmdenable, int, S_IRUGO); MODULE_PARM_DESC(ql2xmdenable, "Enable/disable MiniDump. " "0 - MiniDump disabled. " "1 (Default) - MiniDump enabled."); int ql2xexlogins; module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xexlogins, "Number of extended Logins. " "0 (Default)- Disabled."); int ql2xexchoffld = 1024; module_param(ql2xexchoffld, uint, 0644); MODULE_PARM_DESC(ql2xexchoffld, "Number of target exchanges."); int ql2xiniexchg = 1024; module_param(ql2xiniexchg, uint, 0644); MODULE_PARM_DESC(ql2xiniexchg, "Number of initiator exchanges."); int ql2xfwholdabts; module_param(ql2xfwholdabts, int, S_IRUGO); MODULE_PARM_DESC(ql2xfwholdabts, "Allow FW to hold status IOCB until ABTS rsp received. " "0 (Default) Do not set fw option. " "1 - Set fw option to hold ABTS."); int ql2xmvasynctoatio = 1; module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xmvasynctoatio, "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" "0 (Default). Do not move IOCBs" "1 - Move IOCBs."); int ql2xautodetectsfp = 1; module_param(ql2xautodetectsfp, int, 0444); MODULE_PARM_DESC(ql2xautodetectsfp, "Detect SFP range and set appropriate distance.\n" "1 (Default): Enable\n"); int ql2xenablemsix = 1; module_param(ql2xenablemsix, int, 0444); MODULE_PARM_DESC(ql2xenablemsix, "Set to enable MSI or MSI-X interrupt mechanism.\n" " Default is 1, enable MSI-X interrupt mechanism.\n" " 0 -- enable traditional pin-based mechanism.\n" " 1 -- enable MSI-X interrupt mechanism.\n" " 2 -- enable MSI interrupt mechanism.\n"); int qla2xuseresexchforels; module_param(qla2xuseresexchforels, int, 0444); MODULE_PARM_DESC(qla2xuseresexchforels, "Reserve 1/2 of emergency exchanges for ELS.\n" " 0 (default): disabled"); static int ql2xprotmask; module_param(ql2xprotmask, int, 0644); MODULE_PARM_DESC(ql2xprotmask, "Override DIF/DIX protection capabilities mask\n" "Default is 0 which sets protection mask based on " "capabilities reported by HBA firmware.\n"); static int ql2xprotguard; module_param(ql2xprotguard, int, 0644); MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n" " 0 -- Let HBA firmware decide\n" " 1 -- Force T10 CRC\n" " 2 -- Force IP checksum\n"); int ql2xdifbundlinginternalbuffers; module_param(ql2xdifbundlinginternalbuffers, int, 0644); MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers, "Force using internal buffers for DIF information\n" "0 (Default). Based on check.\n" "1 Force using internal buffers\n"); int ql2xsmartsan; module_param(ql2xsmartsan, int, 0444); module_param_named(smartsan, ql2xsmartsan, int, 0444); MODULE_PARM_DESC(ql2xsmartsan, "Send SmartSAN Management Attributes for FDMI Registration." " Default is 0 - No SmartSAN registration," " 1 - Register SmartSAN Management Attributes."); int ql2xrdpenable; module_param(ql2xrdpenable, int, 0444); module_param_named(rdpenable, ql2xrdpenable, int, 0444); MODULE_PARM_DESC(ql2xrdpenable, "Enables RDP responses. " "0 - no RDP responses (default). " "1 - provide RDP responses."); int ql2xabts_wait_nvme = 1; module_param(ql2xabts_wait_nvme, int, 0444); MODULE_PARM_DESC(ql2xabts_wait_nvme, "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)"); static u32 ql2xdelay_before_pci_error_handling = 5; module_param(ql2xdelay_before_pci_error_handling, uint, 0644); MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling, "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n"); static void qla2x00_clear_drv_active(struct qla_hw_data *); static void qla2x00_free_device(scsi_qla_host_t *); static void qla2xxx_map_queues(struct Scsi_Host *shost); static void qla2x00_destroy_deferred_work(struct qla_hw_data *); u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES; module_param(ql2xnvme_queues, uint, S_IRUGO); MODULE_PARM_DESC(ql2xnvme_queues, "Number of NVMe Queues that can be configured.\n" "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n" "1 - Minimum number of queues supported\n" "8 - Default value"); int ql2xfc2target = 1; module_param(ql2xfc2target, int, 0444); MODULE_PARM_DESC(qla2xfc2target, "Enables FC2 Target support. " "0 - FC2 Target support is disabled. " "1 - FC2 Target support is enabled (default)."); static struct scsi_transport_template *qla2xxx_transport_template = NULL; struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; /* TODO Convert to inlines * * Timer routines */ __inline__ void qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval) { timer_setup(&vha->timer, qla2x00_timer, 0); vha->timer.expires = jiffies + interval * HZ; add_timer(&vha->timer); vha->timer_active = 1; } static inline void qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) { /* Currently used for 82XX only. */ if (vha->device_flags & DFLG_DEV_FAILED) { ql_dbg(ql_dbg_timer, vha, 0x600d, "Device in a failed state, returning.\n"); return; } mod_timer(&vha->timer, jiffies + interval * HZ); } static __inline__ void qla2x00_stop_timer(scsi_qla_host_t *vha) { del_timer_sync(&vha->timer); vha->timer_active = 0; } static int qla2x00_do_dpc(void *data); static void qla2x00_rst_aen(scsi_qla_host_t *); static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, struct req_que **, struct rsp_que **); static void qla2x00_free_fw_dump(struct qla_hw_data *); static void qla2x00_mem_free(struct qla_hw_data *); int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, struct qla_qpair *qpair); /* -------------------------------------------------------------------------- */ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, struct rsp_que *rsp) { struct qla_hw_data *ha = vha->hw; rsp->qpair = ha->base_qpair; rsp->req = req; ha->base_qpair->hw = ha; ha->base_qpair->req = req; ha->base_qpair->rsp = rsp; ha->base_qpair->vha = vha; ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; ha->base_qpair->srb_mempool = ha->srb_mempool; INIT_LIST_HEAD(&ha->base_qpair->hints_list); INIT_LIST_HEAD(&ha->base_qpair->dsd_list); ha->base_qpair->enable_class_2 = ql2xenableclass2; /* init qpair to this cpu. Will adjust at run time. */ qla_cpu_update(rsp->qpair, raw_smp_processor_id()); ha->base_qpair->pdev = ha->pdev; if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs; } static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, struct rsp_que *rsp) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *), GFP_KERNEL); if (!ha->req_q_map) { ql_log(ql_log_fatal, vha, 0x003b, "Unable to allocate memory for request queue ptrs.\n"); goto fail_req_map; } ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *), GFP_KERNEL); if (!ha->rsp_q_map) { ql_log(ql_log_fatal, vha, 0x003c, "Unable to allocate memory for response queue ptrs.\n"); goto fail_rsp_map; } ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); if (ha->base_qpair == NULL) { ql_log(ql_log_warn, vha, 0x00e0, "Failed to allocate base queue pair memory.\n"); goto fail_base_qpair; } qla_init_base_qpair(vha, req, rsp); if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) { ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), GFP_KERNEL); if (!ha->queue_pair_map) { ql_log(ql_log_fatal, vha, 0x0180, "Unable to allocate memory for queue pair ptrs.\n"); goto fail_qpair_map; } if (qla_mapq_alloc_qp_cpu_map(ha) != 0) { kfree(ha->queue_pair_map); ha->queue_pair_map = NULL; goto fail_qpair_map; } } /* * Make sure we record at least the request and response queue zero in * case we need to free them if part of the probe fails. */ ha->rsp_q_map[0] = rsp; ha->req_q_map[0] = req; set_bit(0, ha->rsp_qid_map); set_bit(0, ha->req_qid_map); return 0; fail_qpair_map: kfree(ha->base_qpair); ha->base_qpair = NULL; fail_base_qpair: kfree(ha->rsp_q_map); ha->rsp_q_map = NULL; fail_rsp_map: kfree(ha->req_q_map); ha->req_q_map = NULL; fail_req_map: return -ENOMEM; } static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) { if (IS_QLAFX00(ha)) { if (req && req->ring_fx00) dma_free_coherent(&ha->pdev->dev, (req->length_fx00 + 1) * sizeof(request_t), req->ring_fx00, req->dma_fx00); } else if (req && req->ring) dma_free_coherent(&ha->pdev->dev, (req->length + 1) * sizeof(request_t), req->ring, req->dma); if (req) kfree(req->outstanding_cmds); kfree(req); } static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) { if (IS_QLAFX00(ha)) { if (rsp && rsp->ring_fx00) dma_free_coherent(&ha->pdev->dev, (rsp->length_fx00 + 1) * sizeof(request_t), rsp->ring_fx00, rsp->dma_fx00); } else if (rsp && rsp->ring) { dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), rsp->ring, rsp->dma); } kfree(rsp); } static void qla2x00_free_queues(struct qla_hw_data *ha) { struct req_que *req; struct rsp_que *rsp; int cnt; unsigned long flags; if (ha->queue_pair_map) { kfree(ha->queue_pair_map); ha->queue_pair_map = NULL; } if (ha->base_qpair) { kfree(ha->base_qpair); ha->base_qpair = NULL; } qla_mapq_free_qp_cpu_map(ha); spin_lock_irqsave(&ha->hardware_lock, flags); for (cnt = 0; cnt < ha->max_req_queues; cnt++) { if (!test_bit(cnt, ha->req_qid_map)) continue; req = ha->req_q_map[cnt]; clear_bit(cnt, ha->req_qid_map); ha->req_q_map[cnt] = NULL; spin_unlock_irqrestore(&ha->hardware_lock, flags); qla2x00_free_req_que(ha, req); spin_lock_irqsave(&ha->hardware_lock, flags); } spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(ha->req_q_map); ha->req_q_map = NULL; spin_lock_irqsave(&ha->hardware_lock, flags); for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { if (!test_bit(cnt, ha->rsp_qid_map)) continue; rsp = ha->rsp_q_map[cnt]; clear_bit(cnt, ha->rsp_qid_map); ha->rsp_q_map[cnt] = NULL; spin_unlock_irqrestore(&ha->hardware_lock, flags); qla2x00_free_rsp_que(ha, rsp); spin_lock_irqsave(&ha->hardware_lock, flags); } spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(ha->rsp_q_map); ha->rsp_q_map = NULL; } static char * qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) { struct qla_hw_data *ha = vha->hw; static const char *const pci_bus_modes[] = { "33", "66", "100", "133", }; uint16_t pci_bus; pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; if (pci_bus) { snprintf(str, str_len, "PCI-X (%s MHz)", pci_bus_modes[pci_bus]); } else { pci_bus = (ha->pci_attr & BIT_8) >> 8; snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]); } return str; } static char * qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) { static const char *const pci_bus_modes[] = { "33", "66", "100", "133", }; struct qla_hw_data *ha = vha->hw; uint32_t pci_bus; if (pci_is_pcie(ha->pdev)) { uint32_t lstat, lspeed, lwidth; const char *speed_str; pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); lspeed = lstat & PCI_EXP_LNKCAP_SLS; lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; switch (lspeed) { case 1: speed_str = "2.5GT/s"; break; case 2: speed_str = "5.0GT/s"; break; case 3: speed_str = "8.0GT/s"; break; case 4: speed_str = "16.0GT/s"; break; default: speed_str = "<unknown>"; break; } snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth); return str; } pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; if (pci_bus == 0 || pci_bus == 8) snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus >> 3]); else snprintf(str, str_len, "PCI-X Mode %d (%s MHz)", pci_bus & 4 ? 2 : 1, pci_bus_modes[pci_bus & 3]); return str; } static char * qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) { char un_str[10]; struct qla_hw_data *ha = vha->hw; snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version); if (ha->fw_attributes & BIT_9) { strcat(str, "FLX"); return (str); } switch (ha->fw_attributes & 0xFF) { case 0x7: strcat(str, "EF"); break; case 0x17: strcat(str, "TP"); break; case 0x37: strcat(str, "IP"); break; case 0x77: strcat(str, "VI"); break; default: sprintf(un_str, "(%x)", ha->fw_attributes); strcat(str, un_str); break; } if (ha->fw_attributes & 0x100) strcat(str, "X"); return (str); } static char * qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) { struct qla_hw_data *ha = vha->hw; snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); return str; } void qla2x00_sp_free_dma(srb_t *sp) { struct qla_hw_data *ha = sp->vha->hw; struct scsi_cmnd *cmd = GET_CMD_SP(sp); if (sp->flags & SRB_DMA_VALID) { scsi_dma_unmap(cmd); sp->flags &= ~SRB_DMA_VALID; } if (sp->flags & SRB_CRC_PROT_DMA_VALID) { dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), scsi_prot_sg_count(cmd), cmd->sc_data_direction); sp->flags &= ~SRB_CRC_PROT_DMA_VALID; } if (sp->flags & SRB_CRC_CTX_DSD_VALID) { /* List assured to be having elements */ qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); sp->flags &= ~SRB_CRC_CTX_DSD_VALID; } if (sp->flags & SRB_CRC_CTX_DMA_VALID) { struct crc_context *ctx0 = sp->u.scmd.crc_ctx; dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); sp->flags &= ~SRB_CRC_CTX_DMA_VALID; } if (sp->flags & SRB_FCP_CMND_DMA_VALID) { struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx; dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, ctx1->fcp_cmnd_dma); list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list); sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt; sp->qpair->dsd_avail += ctx1->dsd_use_cnt; } if (sp->flags & SRB_GOT_BUF) qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); } void qla2x00_sp_compl(srb_t *sp, int res) { struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct completion *comp = sp->comp; /* kref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); cmd->result = res; sp->type = 0; scsi_done(cmd); if (comp) complete(comp); } void qla2xxx_qpair_sp_free_dma(srb_t *sp) { struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct qla_hw_data *ha = sp->fcport->vha->hw; if (sp->flags & SRB_DMA_VALID) { scsi_dma_unmap(cmd); sp->flags &= ~SRB_DMA_VALID; } if (sp->flags & SRB_CRC_PROT_DMA_VALID) { dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), scsi_prot_sg_count(cmd), cmd->sc_data_direction); sp->flags &= ~SRB_CRC_PROT_DMA_VALID; } if (sp->flags & SRB_CRC_CTX_DSD_VALID) { /* List assured to be having elements */ qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); sp->flags &= ~SRB_CRC_CTX_DSD_VALID; } if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) { struct crc_context *difctx = sp->u.scmd.crc_ctx; struct dsd_dma *dif_dsd, *nxt_dsd; list_for_each_entry_safe(dif_dsd, nxt_dsd, &difctx->ldif_dma_hndl_list, list) { list_del(&dif_dsd->list); dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr, dif_dsd->dsd_list_dma); kfree(dif_dsd); difctx->no_dif_bundl--; } list_for_each_entry_safe(dif_dsd, nxt_dsd, &difctx->ldif_dsd_list, list) { list_del(&dif_dsd->list); dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr, dif_dsd->dsd_list_dma); kfree(dif_dsd); difctx->no_ldif_dsd--; } if (difctx->no_ldif_dsd) { ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, "%s: difctx->no_ldif_dsd=%x\n", __func__, difctx->no_ldif_dsd); } if (difctx->no_dif_bundl) { ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, "%s: difctx->no_dif_bundl=%x\n", __func__, difctx->no_dif_bundl); } sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID; } if (sp->flags & SRB_FCP_CMND_DMA_VALID) { struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx; dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, ctx1->fcp_cmnd_dma); list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list); sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt; sp->qpair->dsd_avail += ctx1->dsd_use_cnt; sp->flags &= ~SRB_FCP_CMND_DMA_VALID; } if (sp->flags & SRB_CRC_CTX_DMA_VALID) { struct crc_context *ctx0 = sp->u.scmd.crc_ctx; dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); sp->flags &= ~SRB_CRC_CTX_DMA_VALID; } if (sp->flags & SRB_GOT_BUF) qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); } void qla2xxx_qpair_sp_compl(srb_t *sp, int res) { struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct completion *comp = sp->comp; /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); cmd->result = res; sp->type = 0; scsi_done(cmd); if (comp) complete(comp); } static int qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) { scsi_qla_host_t *vha = shost_priv(host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); srb_t *sp; int rval; if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) || WARN_ON_ONCE(!rport)) { cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; } if (ha->mqenable) { uint32_t tag; uint16_t hwq; struct qla_qpair *qpair = NULL; tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd)); hwq = blk_mq_unique_tag_to_hwq(tag); qpair = ha->queue_pair_map[hwq]; if (qpair) return qla2xxx_mqueuecommand(host, cmd, qpair); } if (ha->flags.eeh_busy) { if (ha->flags.pci_channel_io_perm_failure) { ql_dbg(ql_dbg_aer, vha, 0x9010, "PCI Channel IO permanent failure, exiting " "cmd=%p.\n", cmd); cmd->result = DID_NO_CONNECT << 16; } else { ql_dbg(ql_dbg_aer, vha, 0x9011, "EEH_Busy, Requeuing the cmd=%p.\n", cmd); cmd->result = DID_REQUEUE << 16; } goto qc24_fail_command; } rval = fc_remote_port_chkready(rport); if (rval) { cmd->result = rval; ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", cmd, rval); goto qc24_fail_command; } if (!vha->flags.difdix_supported && scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { ql_dbg(ql_dbg_io, vha, 0x3004, "DIF Cap not reg, fail DIF capable cmd's:%p.\n", cmd); cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; } if (!fcport || fcport->deleted) { cmd->result = DID_IMM_RETRY << 16; goto qc24_fail_command; } if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || atomic_read(&base_vha->loop_state) == LOOP_DEAD) { ql_dbg(ql_dbg_io, vha, 0x3005, "Returning DNC, fcport_state=%d loop_state=%d.\n", atomic_read(&fcport->state), atomic_read(&base_vha->loop_state)); cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; } goto qc24_target_busy; } /* * Return target busy if we've received a non-zero retry_delay_timer * in a FCP_RSP. */ if (fcport->retry_delay_timestamp == 0) { /* retry delay not set */ } else if (time_after(jiffies, fcport->retry_delay_timestamp)) fcport->retry_delay_timestamp = 0; else goto qc24_target_busy; sp = scsi_cmd_priv(cmd); /* ref: INIT */ qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport); sp->u.scmd.cmd = cmd; sp->type = SRB_SCSI_CMD; sp->free = qla2x00_sp_free_dma; sp->done = qla2x00_sp_compl; rval = ha->isp_ops->start_scsi(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); goto qc24_host_busy_free_sp; } return 0; qc24_host_busy_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); qc24_target_busy: return SCSI_MLQUEUE_TARGET_BUSY; qc24_fail_command: scsi_done(cmd); return 0; } /* For MQ supported I/O */ int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, struct qla_qpair *qpair) { scsi_qla_host_t *vha = shost_priv(host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); srb_t *sp; int rval; rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16); if (rval) { cmd->result = rval; ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076, "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", cmd, rval); goto qc24_fail_command; } if (!qpair->online) { ql_dbg(ql_dbg_io, vha, 0x3077, "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy); cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; } if (!fcport || fcport->deleted) { cmd->result = DID_IMM_RETRY << 16; goto qc24_fail_command; } if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || atomic_read(&base_vha->loop_state) == LOOP_DEAD) { ql_dbg(ql_dbg_io, vha, 0x3077, "Returning DNC, fcport_state=%d loop_state=%d.\n", atomic_read(&fcport->state), atomic_read(&base_vha->loop_state)); cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; } goto qc24_target_busy; } /* * Return target busy if we've received a non-zero retry_delay_timer * in a FCP_RSP. */ if (fcport->retry_delay_timestamp == 0) { /* retry delay not set */ } else if (time_after(jiffies, fcport->retry_delay_timestamp)) fcport->retry_delay_timestamp = 0; else goto qc24_target_busy; sp = scsi_cmd_priv(cmd); /* ref: INIT */ qla2xxx_init_sp(sp, vha, qpair, fcport); sp->u.scmd.cmd = cmd; sp->type = SRB_SCSI_CMD; sp->free = qla2xxx_qpair_sp_free_dma; sp->done = qla2xxx_qpair_sp_compl; rval = ha->isp_ops->start_scsi_mq(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); goto qc24_host_busy_free_sp; } return 0; qc24_host_busy_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); qc24_target_busy: return SCSI_MLQUEUE_TARGET_BUSY; qc24_fail_command: scsi_done(cmd); return 0; } /* * qla2x00_wait_for_hba_online * Wait till the HBA is online after going through * <= MAX_RETRIES_OF_ISP_ABORT or * finally HBA is disabled ie marked offline * * Input: * ha - pointer to host adapter structure * * Note: * Does context switching-Release SPIN_LOCK * (if any) before calling this routine. * * Return: * Success (Adapter is online) : 0 * Failed (Adapter is offline/disabled) : 1 */ int qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) { int return_status; unsigned long wait_online; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || ha->dpc_active) && time_before(jiffies, wait_online)) { msleep(1000); } if (base_vha->flags.online) return_status = QLA_SUCCESS; else return_status = QLA_FUNCTION_FAILED; return (return_status); } static inline int test_fcport_count(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; unsigned long flags; int res; /* Return 0 = sleep, x=wake */ spin_lock_irqsave(&ha->tgt.sess_lock, flags); ql_dbg(ql_dbg_init, vha, 0x00ec, "tgt %p, fcport_count=%d\n", vha, vha->fcport_count); res = (vha->fcport_count == 0); if (res) { struct fc_port *fcport; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->deleted != QLA_SESS_DELETED) { /* session(s) may not be fully logged in * (ie fcport_count=0), but session * deletion thread(s) may be inflight. */ res = 0; break; } } } spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return res; } /* * qla2x00_wait_for_sess_deletion can only be called from remove_one. * it has dependency on UNLOADING flag to stop device discovery */ void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) { u8 i; qla2x00_mark_all_devices_lost(vha); for (i = 0; i < 10; i++) { if (wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), HZ) > 0) break; } flush_workqueue(vha->hw->wq); } /* * qla2x00_wait_for_hba_ready * Wait till the HBA is ready before doing driver unload * * Input: * ha - pointer to host adapter structure * * Note: * Does context switching-Release SPIN_LOCK * (if any) before calling this routine. * */ static void qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); while ((qla2x00_reset_active(vha) || ha->dpc_active || ha->flags.mbox_busy) || test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { if (test_bit(UNLOADING, &base_vha->dpc_flags)) break; msleep(1000); } } int qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) { int return_status; unsigned long wait_reset; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || ha->dpc_active) && time_before(jiffies, wait_reset)) { msleep(1000); if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) && ha->flags.chip_reset_done) break; } if (ha->flags.chip_reset_done) return_status = QLA_SUCCESS; else return_status = QLA_FUNCTION_FAILED; return return_status; } /************************************************************************** * qla2xxx_eh_abort * * Description: * The abort function will abort the specified command. * * Input: * cmd = Linux SCSI command packet to be aborted. * * Returns: * Either SUCCESS or FAILED. * * Note: * Only return FAILED if command not returned by firmware. **************************************************************************/ static int qla2xxx_eh_abort(struct scsi_cmnd *cmd) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); DECLARE_COMPLETION_ONSTACK(comp); srb_t *sp; int ret; unsigned int id; uint64_t lun; int rval; struct qla_hw_data *ha = vha->hw; uint32_t ratov_j; struct qla_qpair *qpair; unsigned long flags; int fast_fail_status = SUCCESS; if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x8042, "PCI/Register disconnect, exiting.\n"); qla_pci_set_eeh_busy(vha); return FAILED; } /* Save any FAST_IO_FAIL value to return later if abort succeeds */ ret = fc_block_scsi_eh(cmd); if (ret != 0) fast_fail_status = ret; sp = scsi_cmd_priv(cmd); qpair = sp->qpair; vha->cmd_timeout_cnt++; if ((sp->fcport && sp->fcport->deleted) || !qpair) return fast_fail_status != SUCCESS ? fast_fail_status : FAILED; spin_lock_irqsave(qpair->qp_lock_ptr, flags); sp->comp = &comp; spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); id = cmd->device->id; lun = cmd->device->lun; ql_dbg(ql_dbg_taskm, vha, 0x8002, "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", vha->host_no, id, lun, sp, cmd, sp->handle); /* * Abort will release the original Command/sp from FW. Let the * original command call scsi_done. In return, he will wakeup * this sleeping thread. */ rval = ha->isp_ops->abort_command(sp); ql_dbg(ql_dbg_taskm, vha, 0x8003, "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval); /* Wait for the command completion. */ ratov_j = ha->r_a_tov/10 * 4 * 1000; ratov_j = msecs_to_jiffies(ratov_j); switch (rval) { case QLA_SUCCESS: if (!wait_for_completion_timeout(&comp, ratov_j)) { ql_dbg(ql_dbg_taskm, vha, 0xffff, "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", __func__, ha->r_a_tov/10); ret = FAILED; } else { ret = fast_fail_status; } break; default: ret = FAILED; break; } sp->comp = NULL; ql_log(ql_log_info, vha, 0x801c, "Abort command issued nexus=%ld:%d:%llu -- %x.\n", vha->host_no, id, lun, ret); return ret; } #define ABORT_POLLING_PERIOD 1000 #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) /* * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED. */ static int __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t, uint64_t l, enum nexus_wait_type type) { int cnt, match, status; unsigned long flags; scsi_qla_host_t *vha = qpair->vha; struct req_que *req = qpair->req; srb_t *sp; struct scsi_cmnd *cmd; unsigned long wait_iter = ABORT_WAIT_ITER; bool found; struct qla_hw_data *ha = vha->hw; status = QLA_SUCCESS; while (wait_iter--) { found = false; spin_lock_irqsave(qpair->qp_lock_ptr, flags); for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (!sp) continue; if (sp->type != SRB_SCSI_CMD) continue; if (vha->vp_idx != sp->vha->vp_idx) continue; match = 0; cmd = GET_CMD_SP(sp); switch (type) { case WAIT_HOST: match = 1; break; case WAIT_TARGET: if (sp->fcport) match = sp->fcport->d_id.b24 == t; else match = 0; break; case WAIT_LUN: if (sp->fcport) match = (sp->fcport->d_id.b24 == t && cmd->device->lun == l); else match = 0; break; } if (!match) continue; spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { ql_dbg(ql_dbg_taskm, vha, 0x8005, "Return:eh_wait.\n"); return status; } /* * SRB_SCSI_CMD is still in the outstanding_cmds array. * it means scsi_done has not called. Wait for it to * clear from outstanding_cmds. */ msleep(ABORT_POLLING_PERIOD); spin_lock_irqsave(qpair->qp_lock_ptr, flags); found = true; } spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); if (!found) break; } if (wait_iter == -1) status = QLA_FUNCTION_FAILED; return status; } int qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, uint64_t l, enum nexus_wait_type type) { struct qla_qpair *qpair; struct qla_hw_data *ha = vha->hw; int i, status = QLA_SUCCESS; status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l, type); for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) { qpair = ha->queue_pair_map[i]; if (!qpair) continue; status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l, type); } return status; } static char *reset_errors[] = { "HBA not online", "HBA not ready", "Task management failed", "Waiting for command completions", }; static int qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; scsi_qla_host_t *vha = shost_priv(sdev->host); struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); fc_port_t *fcport = (struct fc_port *) sdev->hostdata; struct qla_hw_data *ha = vha->hw; int err; if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x803e, "PCI/Register disconnect, exiting.\n"); qla_pci_set_eeh_busy(vha); return FAILED; } if (!fcport) { return FAILED; } err = fc_block_rport(rport); if (err != 0) return err; if (fcport->deleted) return FAILED; ql_log(ql_log_info, vha, 0x8009, "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no, sdev->id, sdev->lun, cmd); err = 0; if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x800a, "Wait for hba online failed for cmd=%p.\n", cmd); goto eh_reset_failed; } err = 2; if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x800c, "do_reset failed for cmd=%p.\n", cmd); goto eh_reset_failed; } err = 3; if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, cmd->device->lun, WAIT_LUN) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x800d, "wait for pending cmds failed for cmd=%p.\n", cmd); goto eh_reset_failed; } ql_log(ql_log_info, vha, 0x800e, "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", vha->host_no, sdev->id, sdev->lun, cmd); return SUCCESS; eh_reset_failed: ql_log(ql_log_info, vha, 0x800f, "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", reset_errors[err], vha->host_no, sdev->id, sdev->lun, cmd); vha->reset_cmd_err_cnt++; return FAILED; } static int qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport)); struct qla_hw_data *ha = vha->hw; fc_port_t *fcport = *(fc_port_t **)rport->dd_data; int err; if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x803f, "PCI/Register disconnect, exiting.\n"); qla_pci_set_eeh_busy(vha); return FAILED; } if (!fcport) { return FAILED; } err = fc_block_rport(rport); if (err != 0) return err; if (fcport->deleted) return FAILED; ql_log(ql_log_info, vha, 0x8009, "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no, sdev->id, cmd); err = 0; if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x800a, "Wait for hba online failed for cmd=%p.\n", cmd); goto eh_reset_failed; } err = 2; if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x800c, "target_reset failed for cmd=%p.\n", cmd); goto eh_reset_failed; } err = 3; if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, 0, WAIT_TARGET) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x800d, "wait for pending cmds failed for cmd=%p.\n", cmd); goto eh_reset_failed; } ql_log(ql_log_info, vha, 0x800e, "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n", vha->host_no, sdev->id, cmd); return SUCCESS; eh_reset_failed: ql_log(ql_log_info, vha, 0x800f, "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, cmd); vha->reset_cmd_err_cnt++; return FAILED; } /************************************************************************** * qla2xxx_eh_bus_reset * * Description: * The bus reset function will reset the bus and abort any executing * commands. * * Input: * cmd = Linux SCSI command packet of the command that cause the * bus reset. * * Returns: * SUCCESS/FAILURE (defined as macro in scsi.h). * **************************************************************************/ static int qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); int ret = FAILED; unsigned int id; uint64_t lun; struct qla_hw_data *ha = vha->hw; if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x8040, "PCI/Register disconnect, exiting.\n"); qla_pci_set_eeh_busy(vha); return FAILED; } id = cmd->device->id; lun = cmd->device->lun; if (qla2x00_chip_is_down(vha)) return ret; ql_log(ql_log_info, vha, 0x8012, "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x8013, "Wait for hba online failed board disabled.\n"); goto eh_bus_reset_done; } if (qla2x00_loop_reset(vha) == QLA_SUCCESS) ret = SUCCESS; if (ret == FAILED) goto eh_bus_reset_done; /* Flush outstanding commands. */ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x8014, "Wait for pending commands failed.\n"); ret = FAILED; } eh_bus_reset_done: ql_log(ql_log_warn, vha, 0x802b, "BUS RESET %s nexus=%ld:%d:%llu.\n", (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); return ret; } /************************************************************************** * qla2xxx_eh_host_reset * * Description: * The reset function will reset the Adapter. * * Input: * cmd = Linux SCSI command packet of the command that cause the * adapter reset. * * Returns: * Either SUCCESS or FAILED. * * Note: **************************************************************************/ static int qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); struct qla_hw_data *ha = vha->hw; int ret = FAILED; unsigned int id; uint64_t lun; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x8041, "PCI/Register disconnect, exiting.\n"); qla_pci_set_eeh_busy(vha); return SUCCESS; } id = cmd->device->id; lun = cmd->device->lun; ql_log(ql_log_info, vha, 0x8018, "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); /* * No point in issuing another reset if one is active. Also do not * attempt a reset if we are updating flash. */ if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) goto eh_host_reset_lock; if (vha != base_vha) { if (qla2x00_vp_abort_isp(vha)) goto eh_host_reset_lock; } else { if (IS_P3P_TYPE(vha->hw)) { if (!qla82xx_fcoe_ctx_reset(vha)) { /* Ctx reset success */ ret = SUCCESS; goto eh_host_reset_lock; } /* fall thru if ctx reset failed */ } if (ha->wq) flush_workqueue(ha->wq); set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); if (ha->isp_ops->abort_isp(base_vha)) { clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); /* failed. schedule dpc to try */ set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x802a, "wait for hba online failed.\n"); goto eh_host_reset_lock; } } clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); } /* Waiting for command to be returned to OS.*/ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == QLA_SUCCESS) ret = SUCCESS; eh_host_reset_lock: ql_log(ql_log_info, vha, 0x8017, "ADAPTER RESET %s nexus=%ld:%d:%llu.\n", (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); return ret; } /* * qla2x00_loop_reset * Issue loop reset. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qla2x00_loop_reset(scsi_qla_host_t *vha) { int ret; struct qla_hw_data *ha = vha->hw; if (IS_QLAFX00(ha)) return QLA_SUCCESS; if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(vha); ret = qla2x00_full_login_lip(vha); if (ret != QLA_SUCCESS) { ql_dbg(ql_dbg_taskm, vha, 0x802d, "full_login_lip=%d.\n", ret); } } if (ha->flags.enable_lip_reset) { ret = qla2x00_lip_reset(vha); if (ret != QLA_SUCCESS) ql_dbg(ql_dbg_taskm, vha, 0x802e, "lip_reset failed (%d).\n", ret); } /* Issue marker command only when we are going to start the I/O */ vha->marker_needed = 1; return QLA_SUCCESS; } /* * The caller must ensure that no completion interrupts will happen * while this function is in progress. */ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, unsigned long *flags) __releases(qp->qp_lock_ptr) __acquires(qp->qp_lock_ptr) { DECLARE_COMPLETION_ONSTACK(comp); scsi_qla_host_t *vha = qp->vha; struct qla_hw_data *ha = vha->hw; struct scsi_cmnd *cmd = GET_CMD_SP(sp); int rval; bool ret_cmd; uint32_t ratov_j; lockdep_assert_held(qp->qp_lock_ptr); if (qla2x00_chip_is_down(vha)) { sp->done(sp, res); return; } if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS || (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !qla2x00_isp_reg_stat(ha))) { if (sp->comp) { sp->done(sp, res); return; } sp->comp = &comp; spin_unlock_irqrestore(qp->qp_lock_ptr, *flags); rval = ha->isp_ops->abort_command(sp); /* Wait for command completion. */ ret_cmd = false; ratov_j = ha->r_a_tov/10 * 4 * 1000; ratov_j = msecs_to_jiffies(ratov_j); switch (rval) { case QLA_SUCCESS: if (wait_for_completion_timeout(&comp, ratov_j)) { ql_dbg(ql_dbg_taskm, vha, 0xffff, "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", __func__, ha->r_a_tov/10); ret_cmd = true; } /* else FW return SP to driver */ break; default: ret_cmd = true; break; } spin_lock_irqsave(qp->qp_lock_ptr, *flags); if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd))) sp->done(sp, res); } else { sp->done(sp, res); } } /* * The caller must ensure that no completion interrupts will happen * while this function is in progress. */ static void __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) { int cnt; unsigned long flags; srb_t *sp; scsi_qla_host_t *vha = qp->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_cmd *cmd; if (!ha->req_q_map) return; spin_lock_irqsave(qp->qp_lock_ptr, flags); req = qp->req; for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { /* * perform lockless completion during driver unload */ if (qla2x00_chip_is_down(vha)) { req->outstanding_cmds[cnt] = NULL; spin_unlock_irqrestore(qp->qp_lock_ptr, flags); sp->done(sp, res); spin_lock_irqsave(qp->qp_lock_ptr, flags); continue; } switch (sp->cmd_type) { case TYPE_SRB: qla2x00_abort_srb(qp, sp, res, &flags); break; case TYPE_TGT_CMD: if (!vha->hw->tgt.tgt_ops || !tgt || qla_ini_mode_enabled(vha)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n", vha->dpc_flags); continue; } cmd = (struct qla_tgt_cmd *)sp; cmd->aborted = 1; break; case TYPE_TGT_TMCMD: /* Skip task management functions. */ break; default: break; } req->outstanding_cmds[cnt] = NULL; } } spin_unlock_irqrestore(qp->qp_lock_ptr, flags); } /* * The caller must ensure that no completion interrupts will happen * while this function is in progress. */ void qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) { int que; struct qla_hw_data *ha = vha->hw; /* Continue only if initialization complete. */ if (!ha->base_qpair) return; __qla2x00_abort_all_cmds(ha->base_qpair, res); if (!ha->queue_pair_map) return; for (que = 0; que < ha->max_qpairs; que++) { if (!ha->queue_pair_map[que]) continue; __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res); } } static int qla2xxx_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; sdev->hostdata = *(fc_port_t **)rport->dd_data; return 0; } static int qla2xxx_slave_configure(struct scsi_device *sdev) { scsi_qla_host_t *vha = shost_priv(sdev->host); struct req_que *req = vha->req; if (IS_T10_PI_CAPABLE(vha->hw)) blk_queue_update_dma_alignment(sdev->request_queue, 0x7); scsi_change_queue_depth(sdev, req->max_q_depth); return 0; } static void qla2xxx_slave_destroy(struct scsi_device *sdev) { sdev->hostdata = NULL; } /** * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. * @ha: HA context * * At exit, the @ha's flags.enable_64bit_addressing set to indicated * supported addressing method. */ static void qla2x00_config_dma_addressing(struct qla_hw_data *ha) { /* Assume a 32bit DMA mask. */ ha->flags.enable_64bit_addressing = 0; if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { /* Any upper-dword bits set? */ if (MSD(dma_get_required_mask(&ha->pdev->dev)) && !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { /* Ok, a 64bit DMA mask is applicable. */ ha->flags.enable_64bit_addressing = 1; ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; return; } } dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); } static void qla2x00_enable_intrs(struct qla_hw_data *ha) { unsigned long flags = 0; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; spin_lock_irqsave(&ha->hardware_lock, flags); ha->interrupts_on = 1; /* enable risc and host interrupts */ wrt_reg_word(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC); rd_reg_word(&reg->ictrl); spin_unlock_irqrestore(&ha->hardware_lock, flags); } static void qla2x00_disable_intrs(struct qla_hw_data *ha) { unsigned long flags = 0; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; spin_lock_irqsave(&ha->hardware_lock, flags); ha->interrupts_on = 0; /* disable risc and host interrupts */ wrt_reg_word(&reg->ictrl, 0); rd_reg_word(&reg->ictrl); spin_unlock_irqrestore(&ha->hardware_lock, flags); } static void qla24xx_enable_intrs(struct qla_hw_data *ha) { unsigned long flags = 0; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; spin_lock_irqsave(&ha->hardware_lock, flags); ha->interrupts_on = 1; wrt_reg_dword(&reg->ictrl, ICRX_EN_RISC_INT); rd_reg_dword(&reg->ictrl); spin_unlock_irqrestore(&ha->hardware_lock, flags); } static void qla24xx_disable_intrs(struct qla_hw_data *ha) { unsigned long flags = 0; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; if (IS_NOPOLLING_TYPE(ha)) return; spin_lock_irqsave(&ha->hardware_lock, flags); ha->interrupts_on = 0; wrt_reg_dword(&reg->ictrl, 0); rd_reg_dword(&reg->ictrl); spin_unlock_irqrestore(&ha->hardware_lock, flags); } static int qla2x00_iospace_config(struct qla_hw_data *ha) { resource_size_t pio; uint16_t msix; if (pci_request_selected_regions(ha->pdev, ha->bars, QLA2XXX_DRIVER_NAME)) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, "Failed to reserve PIO/MMIO regions (%s), aborting.\n", pci_name(ha->pdev)); goto iospace_error_exit; } if (!(ha->bars & 1)) goto skip_pio; /* We only need PIO for Flash operations on ISP2312 v2 chips. */ pio = pci_resource_start(ha->pdev, 0); if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { ql_log_pci(ql_log_warn, ha->pdev, 0x0012, "Invalid pci I/O region size (%s).\n", pci_name(ha->pdev)); pio = 0; } } else { ql_log_pci(ql_log_warn, ha->pdev, 0x0013, "Region #0 no a PIO resource (%s).\n", pci_name(ha->pdev)); pio = 0; } ha->pio_address = pio; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, "PIO address=%llu.\n", (unsigned long long)ha->pio_address); skip_pio: /* Use MMIO operations for all accesses. */ if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, "Region #1 not an MMIO resource (%s), aborting.\n", pci_name(ha->pdev)); goto iospace_error_exit; } if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, "Invalid PCI mem region size (%s), aborting.\n", pci_name(ha->pdev)); goto iospace_error_exit; } ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); if (!ha->iobase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0017, "Cannot remap MMIO (%s), aborting.\n", pci_name(ha->pdev)); goto iospace_error_exit; } /* Determine queue resources */ ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = QLA_BASE_VECTORS; /* Check if FW supports MQ or not */ if (!(ha->fw_attributes & BIT_6)) goto mqiobase_exit; if (!ql2xmqsupport || !ql2xnvmeenable || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), pci_resource_len(ha->pdev, 3)); if (ha->mqiobase) { ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, "MQIO Base=%p.\n", ha->mqiobase); /* Read MSIX vector size of the board */ pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); ha->msix_count = msix + 1; /* Max queues are bounded by available msix vectors */ /* MB interrupt uses 1 vector */ ha->max_req_queues = ha->msix_count - 1; ha->max_rsp_queues = ha->max_req_queues; /* Queue pairs is the max value minus the base queue pair */ ha->max_qpairs = ha->max_rsp_queues - 1; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188, "Max no of queues pairs: %d.\n", ha->max_qpairs); ql_log_pci(ql_log_info, ha->pdev, 0x001a, "MSI-X vector count: %d.\n", ha->msix_count); } else ql_log_pci(ql_log_info, ha->pdev, 0x001b, "BAR 3 not enabled.\n"); mqiobase_exit: ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c, "MSIX Count: %d.\n", ha->msix_count); return (0); iospace_error_exit: return (-ENOMEM); } static int qla83xx_iospace_config(struct qla_hw_data *ha) { uint16_t msix; if (pci_request_selected_regions(ha->pdev, ha->bars, QLA2XXX_DRIVER_NAME)) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0117, "Failed to reserve PIO/MMIO regions (%s), aborting.\n", pci_name(ha->pdev)); goto iospace_error_exit; } /* Use MMIO operations for all accesses. */ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { ql_log_pci(ql_log_warn, ha->pdev, 0x0118, "Invalid pci I/O region size (%s).\n", pci_name(ha->pdev)); goto iospace_error_exit; } if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { ql_log_pci(ql_log_warn, ha->pdev, 0x0119, "Invalid PCI mem region size (%s), aborting\n", pci_name(ha->pdev)); goto iospace_error_exit; } ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN); if (!ha->iobase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x011a, "Cannot remap MMIO (%s), aborting.\n", pci_name(ha->pdev)); goto iospace_error_exit; } /* 64bit PCI BAR - BAR2 will correspoond to region 4 */ /* 83XX 26XX always use MQ type access for queues * - mbar 2, a.k.a region 4 */ ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = QLA_BASE_VECTORS; ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4), pci_resource_len(ha->pdev, 4)); if (!ha->mqiobase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, "BAR2/region4 not enabled\n"); goto mqiobase_exit; } ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2), pci_resource_len(ha->pdev, 2)); if (ha->msixbase) { /* Read MSIX vector size of the board */ pci_read_config_word(ha->pdev, QLA_83XX_PCI_MSIX_CONTROL, &msix); ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1; /* * By default, driver uses at least two msix vectors * (default & rspq) */ if (ql2xmqsupport || ql2xnvmeenable) { /* MB interrupt uses 1 vector */ ha->max_req_queues = ha->msix_count - 1; /* ATIOQ needs 1 vector. That's 1 less QPair */ if (QLA_TGT_MODE_ENABLED()) ha->max_req_queues--; ha->max_rsp_queues = ha->max_req_queues; /* Queue pairs is the max value minus * the base queue pair */ ha->max_qpairs = ha->max_req_queues - 1; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3, "Max no of queues pairs: %d.\n", ha->max_qpairs); } ql_log_pci(ql_log_info, ha->pdev, 0x011c, "MSI-X vector count: %d.\n", ha->msix_count); } else ql_log_pci(ql_log_info, ha->pdev, 0x011e, "BAR 1 not enabled.\n"); mqiobase_exit: ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f, "MSIX Count: %d.\n", ha->msix_count); return 0; iospace_error_exit: return -ENOMEM; } static struct isp_operations qla2100_isp_ops = { .pci_config = qla2100_pci_config, .reset_chip = qla2x00_reset_chip, .chip_diag = qla2x00_chip_diag, .config_rings = qla2x00_config_rings, .reset_adapter = qla2x00_reset_adapter, .nvram_config = qla2x00_nvram_config, .update_fw_options = qla2x00_update_fw_options, .load_risc = qla2x00_load_risc, .pci_info_str = qla2x00_pci_info_str, .fw_version_str = qla2x00_fw_version_str, .intr_handler = qla2100_intr_handler, .enable_intrs = qla2x00_enable_intrs, .disable_intrs = qla2x00_disable_intrs, .abort_command = qla2x00_abort_command, .target_reset = qla2x00_abort_target, .lun_reset = qla2x00_lun_reset, .fabric_login = qla2x00_login_fabric, .fabric_logout = qla2x00_fabric_logout, .calc_req_entries = qla2x00_calc_iocbs_32, .build_iocbs = qla2x00_build_scsi_iocbs_32, .prep_ms_iocb = qla2x00_prep_ms_iocb, .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, .read_nvram = qla2x00_read_nvram_data, .write_nvram = qla2x00_write_nvram_data, .fw_dump = qla2100_fw_dump, .beacon_on = NULL, .beacon_off = NULL, .beacon_blink = NULL, .read_optrom = qla2x00_read_optrom_data, .write_optrom = qla2x00_write_optrom_data, .get_flash_version = qla2x00_get_flash_version, .start_scsi = qla2x00_start_scsi, .start_scsi_mq = NULL, .abort_isp = qla2x00_abort_isp, .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla2300_isp_ops = { .pci_config = qla2300_pci_config, .reset_chip = qla2x00_reset_chip, .chip_diag = qla2x00_chip_diag, .config_rings = qla2x00_config_rings, .reset_adapter = qla2x00_reset_adapter, .nvram_config = qla2x00_nvram_config, .update_fw_options = qla2x00_update_fw_options, .load_risc = qla2x00_load_risc, .pci_info_str = qla2x00_pci_info_str, .fw_version_str = qla2x00_fw_version_str, .intr_handler = qla2300_intr_handler, .enable_intrs = qla2x00_enable_intrs, .disable_intrs = qla2x00_disable_intrs, .abort_command = qla2x00_abort_command, .target_reset = qla2x00_abort_target, .lun_reset = qla2x00_lun_reset, .fabric_login = qla2x00_login_fabric, .fabric_logout = qla2x00_fabric_logout, .calc_req_entries = qla2x00_calc_iocbs_32, .build_iocbs = qla2x00_build_scsi_iocbs_32, .prep_ms_iocb = qla2x00_prep_ms_iocb, .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb, .read_nvram = qla2x00_read_nvram_data, .write_nvram = qla2x00_write_nvram_data, .fw_dump = qla2300_fw_dump, .beacon_on = qla2x00_beacon_on, .beacon_off = qla2x00_beacon_off, .beacon_blink = qla2x00_beacon_blink, .read_optrom = qla2x00_read_optrom_data, .write_optrom = qla2x00_write_optrom_data, .get_flash_version = qla2x00_get_flash_version, .start_scsi = qla2x00_start_scsi, .start_scsi_mq = NULL, .abort_isp = qla2x00_abort_isp, .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla24xx_isp_ops = { .pci_config = qla24xx_pci_config, .reset_chip = qla24xx_reset_chip, .chip_diag = qla24xx_chip_diag, .config_rings = qla24xx_config_rings, .reset_adapter = qla24xx_reset_adapter, .nvram_config = qla24xx_nvram_config, .update_fw_options = qla24xx_update_fw_options, .load_risc = qla24xx_load_risc, .pci_info_str = qla24xx_pci_info_str, .fw_version_str = qla24xx_fw_version_str, .intr_handler = qla24xx_intr_handler, .enable_intrs = qla24xx_enable_intrs, .disable_intrs = qla24xx_disable_intrs, .abort_command = qla24xx_abort_command, .target_reset = qla24xx_abort_target, .lun_reset = qla24xx_lun_reset, .fabric_login = qla24xx_login_fabric, .fabric_logout = qla24xx_fabric_logout, .calc_req_entries = NULL, .build_iocbs = NULL, .prep_ms_iocb = qla24xx_prep_ms_iocb, .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, .read_nvram = qla24xx_read_nvram_data, .write_nvram = qla24xx_write_nvram_data, .fw_dump = qla24xx_fw_dump, .beacon_on = qla24xx_beacon_on, .beacon_off = qla24xx_beacon_off, .beacon_blink = qla24xx_beacon_blink, .read_optrom = qla24xx_read_optrom_data, .write_optrom = qla24xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_start_scsi, .start_scsi_mq = NULL, .abort_isp = qla2x00_abort_isp, .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla25xx_isp_ops = { .pci_config = qla25xx_pci_config, .reset_chip = qla24xx_reset_chip, .chip_diag = qla24xx_chip_diag, .config_rings = qla24xx_config_rings, .reset_adapter = qla24xx_reset_adapter, .nvram_config = qla24xx_nvram_config, .update_fw_options = qla24xx_update_fw_options, .load_risc = qla24xx_load_risc, .pci_info_str = qla24xx_pci_info_str, .fw_version_str = qla24xx_fw_version_str, .intr_handler = qla24xx_intr_handler, .enable_intrs = qla24xx_enable_intrs, .disable_intrs = qla24xx_disable_intrs, .abort_command = qla24xx_abort_command, .target_reset = qla24xx_abort_target, .lun_reset = qla24xx_lun_reset, .fabric_login = qla24xx_login_fabric, .fabric_logout = qla24xx_fabric_logout, .calc_req_entries = NULL, .build_iocbs = NULL, .prep_ms_iocb = qla24xx_prep_ms_iocb, .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, .read_nvram = qla25xx_read_nvram_data, .write_nvram = qla25xx_write_nvram_data, .fw_dump = qla25xx_fw_dump, .beacon_on = qla24xx_beacon_on, .beacon_off = qla24xx_beacon_off, .beacon_blink = qla24xx_beacon_blink, .read_optrom = qla25xx_read_optrom_data, .write_optrom = qla24xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_dif_start_scsi, .start_scsi_mq = qla2xxx_dif_start_scsi_mq, .abort_isp = qla2x00_abort_isp, .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla81xx_isp_ops = { .pci_config = qla25xx_pci_config, .reset_chip = qla24xx_reset_chip, .chip_diag = qla24xx_chip_diag, .config_rings = qla24xx_config_rings, .reset_adapter = qla24xx_reset_adapter, .nvram_config = qla81xx_nvram_config, .update_fw_options = qla24xx_update_fw_options, .load_risc = qla81xx_load_risc, .pci_info_str = qla24xx_pci_info_str, .fw_version_str = qla24xx_fw_version_str, .intr_handler = qla24xx_intr_handler, .enable_intrs = qla24xx_enable_intrs, .disable_intrs = qla24xx_disable_intrs, .abort_command = qla24xx_abort_command, .target_reset = qla24xx_abort_target, .lun_reset = qla24xx_lun_reset, .fabric_login = qla24xx_login_fabric, .fabric_logout = qla24xx_fabric_logout, .calc_req_entries = NULL, .build_iocbs = NULL, .prep_ms_iocb = qla24xx_prep_ms_iocb, .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, .read_nvram = NULL, .write_nvram = NULL, .fw_dump = qla81xx_fw_dump, .beacon_on = qla24xx_beacon_on, .beacon_off = qla24xx_beacon_off, .beacon_blink = qla83xx_beacon_blink, .read_optrom = qla25xx_read_optrom_data, .write_optrom = qla24xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_dif_start_scsi, .start_scsi_mq = qla2xxx_dif_start_scsi_mq, .abort_isp = qla2x00_abort_isp, .iospace_config = qla2x00_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla82xx_isp_ops = { .pci_config = qla82xx_pci_config, .reset_chip = qla82xx_reset_chip, .chip_diag = qla24xx_chip_diag, .config_rings = qla82xx_config_rings, .reset_adapter = qla24xx_reset_adapter, .nvram_config = qla81xx_nvram_config, .update_fw_options = qla24xx_update_fw_options, .load_risc = qla82xx_load_risc, .pci_info_str = qla24xx_pci_info_str, .fw_version_str = qla24xx_fw_version_str, .intr_handler = qla82xx_intr_handler, .enable_intrs = qla82xx_enable_intrs, .disable_intrs = qla82xx_disable_intrs, .abort_command = qla24xx_abort_command, .target_reset = qla24xx_abort_target, .lun_reset = qla24xx_lun_reset, .fabric_login = qla24xx_login_fabric, .fabric_logout = qla24xx_fabric_logout, .calc_req_entries = NULL, .build_iocbs = NULL, .prep_ms_iocb = qla24xx_prep_ms_iocb, .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, .read_nvram = qla24xx_read_nvram_data, .write_nvram = qla24xx_write_nvram_data, .fw_dump = qla82xx_fw_dump, .beacon_on = qla82xx_beacon_on, .beacon_off = qla82xx_beacon_off, .beacon_blink = NULL, .read_optrom = qla82xx_read_optrom_data, .write_optrom = qla82xx_write_optrom_data, .get_flash_version = qla82xx_get_flash_version, .start_scsi = qla82xx_start_scsi, .start_scsi_mq = NULL, .abort_isp = qla82xx_abort_isp, .iospace_config = qla82xx_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla8044_isp_ops = { .pci_config = qla82xx_pci_config, .reset_chip = qla82xx_reset_chip, .chip_diag = qla24xx_chip_diag, .config_rings = qla82xx_config_rings, .reset_adapter = qla24xx_reset_adapter, .nvram_config = qla81xx_nvram_config, .update_fw_options = qla24xx_update_fw_options, .load_risc = qla82xx_load_risc, .pci_info_str = qla24xx_pci_info_str, .fw_version_str = qla24xx_fw_version_str, .intr_handler = qla8044_intr_handler, .enable_intrs = qla82xx_enable_intrs, .disable_intrs = qla82xx_disable_intrs, .abort_command = qla24xx_abort_command, .target_reset = qla24xx_abort_target, .lun_reset = qla24xx_lun_reset, .fabric_login = qla24xx_login_fabric, .fabric_logout = qla24xx_fabric_logout, .calc_req_entries = NULL, .build_iocbs = NULL, .prep_ms_iocb = qla24xx_prep_ms_iocb, .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, .read_nvram = NULL, .write_nvram = NULL, .fw_dump = qla8044_fw_dump, .beacon_on = qla82xx_beacon_on, .beacon_off = qla82xx_beacon_off, .beacon_blink = NULL, .read_optrom = qla8044_read_optrom_data, .write_optrom = qla8044_write_optrom_data, .get_flash_version = qla82xx_get_flash_version, .start_scsi = qla82xx_start_scsi, .start_scsi_mq = NULL, .abort_isp = qla8044_abort_isp, .iospace_config = qla82xx_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla83xx_isp_ops = { .pci_config = qla25xx_pci_config, .reset_chip = qla24xx_reset_chip, .chip_diag = qla24xx_chip_diag, .config_rings = qla24xx_config_rings, .reset_adapter = qla24xx_reset_adapter, .nvram_config = qla81xx_nvram_config, .update_fw_options = qla24xx_update_fw_options, .load_risc = qla81xx_load_risc, .pci_info_str = qla24xx_pci_info_str, .fw_version_str = qla24xx_fw_version_str, .intr_handler = qla24xx_intr_handler, .enable_intrs = qla24xx_enable_intrs, .disable_intrs = qla24xx_disable_intrs, .abort_command = qla24xx_abort_command, .target_reset = qla24xx_abort_target, .lun_reset = qla24xx_lun_reset, .fabric_login = qla24xx_login_fabric, .fabric_logout = qla24xx_fabric_logout, .calc_req_entries = NULL, .build_iocbs = NULL, .prep_ms_iocb = qla24xx_prep_ms_iocb, .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, .read_nvram = NULL, .write_nvram = NULL, .fw_dump = qla83xx_fw_dump, .beacon_on = qla24xx_beacon_on, .beacon_off = qla24xx_beacon_off, .beacon_blink = qla83xx_beacon_blink, .read_optrom = qla25xx_read_optrom_data, .write_optrom = qla24xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_dif_start_scsi, .start_scsi_mq = qla2xxx_dif_start_scsi_mq, .abort_isp = qla2x00_abort_isp, .iospace_config = qla83xx_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qlafx00_isp_ops = { .pci_config = qlafx00_pci_config, .reset_chip = qlafx00_soft_reset, .chip_diag = qlafx00_chip_diag, .config_rings = qlafx00_config_rings, .reset_adapter = qlafx00_soft_reset, .nvram_config = NULL, .update_fw_options = NULL, .load_risc = NULL, .pci_info_str = qlafx00_pci_info_str, .fw_version_str = qlafx00_fw_version_str, .intr_handler = qlafx00_intr_handler, .enable_intrs = qlafx00_enable_intrs, .disable_intrs = qlafx00_disable_intrs, .abort_command = qla24xx_async_abort_command, .target_reset = qlafx00_abort_target, .lun_reset = qlafx00_lun_reset, .fabric_login = NULL, .fabric_logout = NULL, .calc_req_entries = NULL, .build_iocbs = NULL, .prep_ms_iocb = qla24xx_prep_ms_iocb, .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, .read_nvram = qla24xx_read_nvram_data, .write_nvram = qla24xx_write_nvram_data, .fw_dump = NULL, .beacon_on = qla24xx_beacon_on, .beacon_off = qla24xx_beacon_off, .beacon_blink = NULL, .read_optrom = qla24xx_read_optrom_data, .write_optrom = qla24xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, .start_scsi = qlafx00_start_scsi, .start_scsi_mq = NULL, .abort_isp = qlafx00_abort_isp, .iospace_config = qlafx00_iospace_config, .initialize_adapter = qlafx00_initialize_adapter, }; static struct isp_operations qla27xx_isp_ops = { .pci_config = qla25xx_pci_config, .reset_chip = qla24xx_reset_chip, .chip_diag = qla24xx_chip_diag, .config_rings = qla24xx_config_rings, .reset_adapter = qla24xx_reset_adapter, .nvram_config = qla81xx_nvram_config, .update_fw_options = qla24xx_update_fw_options, .load_risc = qla81xx_load_risc, .pci_info_str = qla24xx_pci_info_str, .fw_version_str = qla24xx_fw_version_str, .intr_handler = qla24xx_intr_handler, .enable_intrs = qla24xx_enable_intrs, .disable_intrs = qla24xx_disable_intrs, .abort_command = qla24xx_abort_command, .target_reset = qla24xx_abort_target, .lun_reset = qla24xx_lun_reset, .fabric_login = qla24xx_login_fabric, .fabric_logout = qla24xx_fabric_logout, .calc_req_entries = NULL, .build_iocbs = NULL, .prep_ms_iocb = qla24xx_prep_ms_iocb, .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, .read_nvram = NULL, .write_nvram = NULL, .fw_dump = qla27xx_fwdump, .mpi_fw_dump = qla27xx_mpi_fwdump, .beacon_on = qla24xx_beacon_on, .beacon_off = qla24xx_beacon_off, .beacon_blink = qla83xx_beacon_blink, .read_optrom = qla25xx_read_optrom_data, .write_optrom = qla24xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_dif_start_scsi, .start_scsi_mq = qla2xxx_dif_start_scsi_mq, .abort_isp = qla2x00_abort_isp, .iospace_config = qla83xx_iospace_config, .initialize_adapter = qla2x00_initialize_adapter, }; static inline void qla2x00_set_isp_flags(struct qla_hw_data *ha) { ha->device_type = DT_EXTENDED_IDS; switch (ha->pdev->device) { case PCI_DEVICE_ID_QLOGIC_ISP2100: ha->isp_type |= DT_ISP2100; ha->device_type &= ~DT_EXTENDED_IDS; ha->fw_srisc_address = RISC_START_ADDRESS_2100; break; case PCI_DEVICE_ID_QLOGIC_ISP2200: ha->isp_type |= DT_ISP2200; ha->device_type &= ~DT_EXTENDED_IDS; ha->fw_srisc_address = RISC_START_ADDRESS_2100; break; case PCI_DEVICE_ID_QLOGIC_ISP2300: ha->isp_type |= DT_ISP2300; ha->device_type |= DT_ZIO_SUPPORTED; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP2312: ha->isp_type |= DT_ISP2312; ha->device_type |= DT_ZIO_SUPPORTED; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP2322: ha->isp_type |= DT_ISP2322; ha->device_type |= DT_ZIO_SUPPORTED; if (ha->pdev->subsystem_vendor == 0x1028 && ha->pdev->subsystem_device == 0x0170) ha->device_type |= DT_OEM_001; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP6312: ha->isp_type |= DT_ISP6312; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP6322: ha->isp_type |= DT_ISP6322; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP2422: ha->isp_type |= DT_ISP2422; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2432: ha->isp_type |= DT_ISP2432; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP8432: ha->isp_type |= DT_ISP8432; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP5422: ha->isp_type |= DT_ISP5422; ha->device_type |= DT_FWI2; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP5432: ha->isp_type |= DT_ISP5432; ha->device_type |= DT_FWI2; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2532: ha->isp_type |= DT_ISP2532; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP8001: ha->isp_type |= DT_ISP8001; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP8021: ha->isp_type |= DT_ISP8021; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->fw_srisc_address = RISC_START_ADDRESS_2400; /* Initialize 82XX ISP flags */ qla82xx_init_flags(ha); break; case PCI_DEVICE_ID_QLOGIC_ISP8044: ha->isp_type |= DT_ISP8044; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->fw_srisc_address = RISC_START_ADDRESS_2400; /* Initialize 82XX ISP flags */ qla82xx_init_flags(ha); break; case PCI_DEVICE_ID_QLOGIC_ISP2031: ha->isp_type |= DT_ISP2031; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->device_type |= DT_T10_PI; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP8031: ha->isp_type |= DT_ISP8031; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->device_type |= DT_T10_PI; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISPF001: ha->isp_type |= DT_ISPFX00; break; case PCI_DEVICE_ID_QLOGIC_ISP2071: ha->isp_type |= DT_ISP2071; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->device_type |= DT_T10_PI; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2271: ha->isp_type |= DT_ISP2271; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->device_type |= DT_T10_PI; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2261: ha->isp_type |= DT_ISP2261; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->device_type |= DT_T10_PI; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2081: case PCI_DEVICE_ID_QLOGIC_ISP2089: ha->isp_type |= DT_ISP2081; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->device_type |= DT_T10_PI; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2281: case PCI_DEVICE_ID_QLOGIC_ISP2289: ha->isp_type |= DT_ISP2281; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->device_type |= DT_T10_PI; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; } if (IS_QLA82XX(ha)) ha->port_no = ha->portnum & 1; else { /* Get adapter physical port no from interrupt pin register. */ pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->port_no--; else ha->port_no = !(ha->port_no & 1); } ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", ha->device_type, ha->port_no, ha->fw_srisc_address); } static void qla2xxx_scan_start(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); if (vha->hw->flags.running_gold_fw) return; set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(RSCN_UPDATE, &vha->dpc_flags); set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags); } static int qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) { scsi_qla_host_t *vha = shost_priv(shost); if (test_bit(UNLOADING, &vha->dpc_flags)) return 1; if (!vha->host) return 1; if (time > vha->hw->loop_reset_delay * HZ) return 1; return atomic_read(&vha->loop_state) == LOOP_READY; } static void qla_heartbeat_work_fn(struct work_struct *work) { struct qla_hw_data *ha = container_of(work, struct qla_hw_data, heartbeat_work); struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); if (!ha->flags.mbox_busy && base_vha->flags.init_done) qla_no_op_mb(base_vha); } static void qla2x00_iocb_work_fn(struct work_struct *work) { struct scsi_qla_host *vha = container_of(work, struct scsi_qla_host, iocb_work); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); int i = 2; unsigned long flags; if (test_bit(UNLOADING, &base_vha->dpc_flags)) return; while (!list_empty(&vha->work_list) && i > 0) { qla2x00_do_work(vha); i--; } spin_lock_irqsave(&vha->work_lock, flags); clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags); spin_unlock_irqrestore(&vha->work_lock, flags); } static void qla_trace_init(void) { qla_trc_array = trace_array_get_by_name("qla2xxx"); if (!qla_trc_array) { ql_log(ql_log_fatal, NULL, 0x0001, "Unable to create qla2xxx trace instance, instance logging will be disabled.\n"); return; } QLA_TRACE_ENABLE(qla_trc_array); } static void qla_trace_uninit(void) { if (!qla_trc_array) return; trace_array_put(qla_trc_array); } /* * PCI driver interface */ static int qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { int ret = -ENODEV; struct Scsi_Host *host; scsi_qla_host_t *base_vha = NULL; struct qla_hw_data *ha; char pci_info[30]; char fw_str[30], wq_name[30]; struct scsi_host_template *sht; int bars, mem_only = 0; uint16_t req_length = 0, rsp_length = 0; struct req_que *req = NULL; struct rsp_que *rsp = NULL; int i; bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); sht = &qla2xxx_driver_template; if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) { bars = pci_select_bars(pdev, IORESOURCE_MEM); mem_only = 1; ql_dbg_pci(ql_dbg_init, pdev, 0x0007, "Mem only adapter.\n"); } ql_dbg_pci(ql_dbg_init, pdev, 0x0008, "Bars=%d.\n", bars); if (mem_only) { if (pci_enable_device_mem(pdev)) return ret; } else { if (pci_enable_device(pdev)) return ret; } if (is_kdump_kernel()) { ql2xmqsupport = 0; ql2xallocfwdump = 0; } ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); if (!ha) { ql_log_pci(ql_log_fatal, pdev, 0x0009, "Unable to allocate memory for ha.\n"); goto disable_device; } ql_dbg_pci(ql_dbg_init, pdev, 0x000a, "Memory allocated for ha=%p.\n", ha); ha->pdev = pdev; INIT_LIST_HEAD(&ha->tgt.q_full_list); spin_lock_init(&ha->tgt.q_full_lock); spin_lock_init(&ha->tgt.sess_lock); spin_lock_init(&ha->tgt.atio_lock); spin_lock_init(&ha->sadb_lock); INIT_LIST_HEAD(&ha->sadb_tx_index_list); INIT_LIST_HEAD(&ha->sadb_rx_index_list); spin_lock_init(&ha->sadb_fp_lock); if (qla_edif_sadb_build_free_pool(ha)) { kfree(ha); goto disable_device; } atomic_set(&ha->nvme_active_aen_cnt, 0); /* Clear our data area */ ha->bars = bars; ha->mem_only = mem_only; spin_lock_init(&ha->hardware_lock); spin_lock_init(&ha->vport_slock); mutex_init(&ha->selflogin_lock); mutex_init(&ha->optrom_mutex); /* Set ISP-type information. */ qla2x00_set_isp_flags(ha); /* Set EEH reset type to fundamental if required by hba */ if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) pdev->needs_freset = 1; ha->prev_topology = 0; ha->init_cb_size = sizeof(init_cb_t); ha->link_data_rate = PORT_SPEED_UNKNOWN; ha->optrom_size = OPTROM_SIZE_2300; ha->max_exchg = FW_MAX_EXCHANGES_CNT; atomic_set(&ha->num_pend_mbx_stage1, 0); atomic_set(&ha->num_pend_mbx_stage2, 0); atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD); ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD; INIT_LIST_HEAD(&ha->tmf_pending); INIT_LIST_HEAD(&ha->tmf_active); /* Assign ISP specific operations. */ if (IS_QLA2100(ha)) { ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; req_length = REQUEST_ENTRY_CNT_2100; rsp_length = RESPONSE_ENTRY_CNT_2100; ha->max_loop_id = SNS_LAST_LOOP_ID_2100; ha->gid_list_info_size = 4; ha->flash_conf_off = ~0; ha->flash_data_off = ~0; ha->nvram_conf_off = ~0; ha->nvram_data_off = ~0; ha->isp_ops = &qla2100_isp_ops; } else if (IS_QLA2200(ha)) { ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; req_length = REQUEST_ENTRY_CNT_2200; rsp_length = RESPONSE_ENTRY_CNT_2100; ha->max_loop_id = SNS_LAST_LOOP_ID_2100; ha->gid_list_info_size = 4; ha->flash_conf_off = ~0; ha->flash_data_off = ~0; ha->nvram_conf_off = ~0; ha->nvram_data_off = ~0; ha->isp_ops = &qla2100_isp_ops; } else if (IS_QLA23XX(ha)) { ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100; ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_2200; rsp_length = RESPONSE_ENTRY_CNT_2300; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->gid_list_info_size = 6; if (IS_QLA2322(ha) || IS_QLA6322(ha)) ha->optrom_size = OPTROM_SIZE_2322; ha->flash_conf_off = ~0; ha->flash_data_off = ~0; ha->nvram_conf_off = ~0; ha->nvram_data_off = ~0; ha->isp_ops = &qla2300_isp_ops; } else if (IS_QLA24XX_TYPE(ha)) { ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_24XX; rsp_length = RESPONSE_ENTRY_CNT_2300; ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_24xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_24XX; ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; ha->isp_ops = &qla24xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; ha->flash_data_off = FARX_ACCESS_FLASH_DATA; ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; } else if (IS_QLA25XX(ha)) { ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_24XX; rsp_length = RESPONSE_ENTRY_CNT_2300; ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_24xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_25XX; ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla25xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; ha->flash_data_off = FARX_ACCESS_FLASH_DATA; ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; } else if (IS_QLA81XX(ha)) { ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_24XX; rsp_length = RESPONSE_ENTRY_CNT_2300; ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_81XX; ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla81xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; ha->nvram_conf_off = ~0; ha->nvram_data_off = ~0; } else if (IS_QLA82XX(ha)) { ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_82XX; rsp_length = RESPONSE_ENTRY_CNT_82XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_82XX; ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla82xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; ha->flash_data_off = FARX_ACCESS_FLASH_DATA; ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; } else if (IS_QLA8044(ha)) { ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_82XX; rsp_length = RESPONSE_ENTRY_CNT_82XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_83XX; ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla8044_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; ha->flash_data_off = FARX_ACCESS_FLASH_DATA; ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; } else if (IS_QLA83XX(ha)) { ha->portnum = PCI_FUNC(ha->pdev->devfn); ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_83XX; rsp_length = RESPONSE_ENTRY_CNT_83XX; ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_83XX; ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla83xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; ha->nvram_conf_off = ~0; ha->nvram_data_off = ~0; } else if (IS_QLAFX00(ha)) { ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00; ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00; ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; req_length = REQUEST_ENTRY_CNT_FX00; rsp_length = RESPONSE_ENTRY_CNT_FX00; ha->isp_ops = &qlafx00_isp_ops; ha->port_down_retry_count = 30; /* default value */ ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; ha->mr.fw_hbt_en = 1; ha->mr.host_info_resend = false; ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; } else if (IS_QLA27XX(ha)) { ha->portnum = PCI_FUNC(ha->pdev->devfn); ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_83XX; rsp_length = RESPONSE_ENTRY_CNT_83XX; ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_83XX; ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla27xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; ha->nvram_conf_off = ~0; ha->nvram_data_off = ~0; } else if (IS_QLA28XX(ha)) { ha->portnum = PCI_FUNC(ha->pdev->devfn); ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_83XX; rsp_length = RESPONSE_ENTRY_CNT_83XX; ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_28XX; ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla27xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX; ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX; ha->nvram_conf_off = ~0; ha->nvram_data_off = ~0; } ql_dbg_pci(ql_dbg_init, pdev, 0x001e, "mbx_count=%d, req_length=%d, " "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, " "max_fibre_devices=%d.\n", ha->mbx_count, req_length, rsp_length, ha->max_loop_id, ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, ha->nvram_npiv_size, ha->max_fibre_devices); ql_dbg_pci(ql_dbg_init, pdev, 0x001f, "isp_ops=%p, flash_conf_off=%d, " "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, ha->nvram_conf_off, ha->nvram_data_off); /* Configure PCI I/O space */ ret = ha->isp_ops->iospace_config(ha); if (ret) goto iospace_config_failed; ql_log_pci(ql_log_info, pdev, 0x001d, "Found an ISP%04X irq %d iobase 0x%p.\n", pdev->device, pdev->irq, ha->iobase); mutex_init(&ha->vport_lock); mutex_init(&ha->mq_lock); init_completion(&ha->mbx_cmd_comp); complete(&ha->mbx_cmd_comp); init_completion(&ha->mbx_intr_comp); init_completion(&ha->dcbx_comp); init_completion(&ha->lb_portup_comp); set_bit(0, (unsigned long *) ha->vp_idx_map); qla2x00_config_dma_addressing(ha); ql_dbg_pci(ql_dbg_init, pdev, 0x0020, "64 Bit addressing is %s.\n", ha->flags.enable_64bit_addressing ? "enable" : "disable"); ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); if (ret) { ql_log_pci(ql_log_fatal, pdev, 0x0031, "Failed to allocate memory for adapter, aborting.\n"); goto probe_hw_failed; } req->max_q_depth = MAX_Q_DEPTH; if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) req->max_q_depth = ql2xmaxqdepth; base_vha = qla2x00_create_host(sht, ha); if (!base_vha) { ret = -ENOMEM; goto probe_hw_failed; } pci_set_drvdata(pdev, base_vha); set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); host = base_vha->host; base_vha->req = req; if (IS_QLA2XXX_MIDTYPE(ha)) base_vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(base_vha); else base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + base_vha->vp_idx; /* Setup fcport template structure. */ ha->mr.fcport.vha = base_vha; ha->mr.fcport.port_type = FCT_UNKNOWN; ha->mr.fcport.loop_id = FC_NO_LOOP_ID; qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; ha->mr.fcport.scan_state = 1; qla2xxx_reset_stats(host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN | QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT | QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN); /* Set the SG table size based on ISP type */ if (!IS_FWI2_CAPABLE(ha)) { if (IS_QLA2100(ha)) host->sg_tablesize = 32; } else { if (!IS_QLA82XX(ha)) host->sg_tablesize = QLA_SG_ALL; } host->max_id = ha->max_fibre_devices; host->cmd_per_lun = 3; host->unique_id = host->host_no; if (ql2xenabledif && ql2xenabledif != 2) { ql_log(ql_log_warn, base_vha, 0x302d, "Invalid value for ql2xenabledif, resetting it to default (2)\n"); ql2xenabledif = 2; } if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) host->max_cmd_len = 32; else host->max_cmd_len = MAX_CMDSZ; host->max_channel = MAX_BUSES - 1; /* Older HBAs support only 16-bit LUNs */ if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) && ql2xmaxlun > 0xffff) host->max_lun = 0xffff; else host->max_lun = ql2xmaxlun; host->transportt = qla2xxx_transport_template; sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); ql_dbg(ql_dbg_init, base_vha, 0x0033, "max_id=%d this_id=%d " "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, host->this_id, host->cmd_per_lun, host->unique_id, host->max_cmd_len, host->max_channel, host->max_lun, host->transportt, sht->vendor_id); INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn); /* Set up the irqs */ ret = qla2x00_request_irqs(ha, rsp); if (ret) goto probe_failed; /* Alloc arrays of request and response ring ptrs */ ret = qla2x00_alloc_queues(ha, req, rsp); if (ret) { ql_log(ql_log_fatal, base_vha, 0x003d, "Failed to allocate memory for queue pointers..." "aborting.\n"); ret = -ENODEV; goto probe_failed; } if (ha->mqenable) { /* number of hardware queues supported by blk/scsi-mq*/ host->nr_hw_queues = ha->max_qpairs; ql_dbg(ql_dbg_init, base_vha, 0x0192, "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues); } else { if (ql2xnvmeenable) { host->nr_hw_queues = ha->max_qpairs; ql_dbg(ql_dbg_init, base_vha, 0x0194, "FC-NVMe support is enabled, HW queues=%d\n", host->nr_hw_queues); } else { ql_dbg(ql_dbg_init, base_vha, 0x0193, "blk/scsi-mq disabled.\n"); } } qlt_probe_one_stage1(base_vha, ha); pci_save_state(pdev); /* Assign back pointers */ rsp->req = req; req->rsp = rsp; if (IS_QLAFX00(ha)) { ha->rsp_q_map[0] = rsp; ha->req_q_map[0] = req; set_bit(0, ha->req_qid_map); set_bit(0, ha->rsp_qid_map); } /* FWI2-capable only. */ req->req_q_in = &ha->iobase->isp24.req_q_in; req->req_q_out = &ha->iobase->isp24.req_q_out; rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; } if (IS_QLAFX00(ha)) { req->req_q_in = &ha->iobase->ispfx00.req_q_in; req->req_q_out = &ha->iobase->ispfx00.req_q_out; rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in; rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; } if (IS_P3P_TYPE(ha)) { req->req_q_out = &ha->iobase->isp82.req_q_out[0]; rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; } ql_dbg(ql_dbg_multiq, base_vha, 0xc009, "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, "req->req_q_in=%p req->req_q_out=%p " "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); ql_dbg(ql_dbg_init, base_vha, 0x003e, "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); ql_dbg(ql_dbg_init, base_vha, 0x003f, "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0); if (unlikely(!ha->wq)) { ret = -ENOMEM; goto probe_failed; } if (ha->isp_ops->initialize_adapter(base_vha)) { ql_log(ql_log_fatal, base_vha, 0x00d6, "Failed to initialize adapter - Adapter flags %x.\n", base_vha->device_flags); if (IS_QLA82XX(ha)) { qla82xx_idc_lock(ha); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); qla82xx_idc_unlock(ha); ql_log(ql_log_fatal, base_vha, 0x00d7, "HW State: FAILED.\n"); } else if (IS_QLA8044(ha)) { qla8044_idc_lock(ha); qla8044_wr_direct(base_vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_FAILED); qla8044_idc_unlock(ha); ql_log(ql_log_fatal, base_vha, 0x0150, "HW State: FAILED.\n"); } ret = -ENODEV; goto probe_failed; } if (IS_QLAFX00(ha)) host->can_queue = QLAFX00_MAX_CANQUEUE; else host->can_queue = req->num_outstanding_cmds - 10; ql_dbg(ql_dbg_init, base_vha, 0x0032, "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", host->can_queue, base_vha->req, base_vha->mgmt_svr_loop_id, host->sg_tablesize); /* Check if FW supports MQ or not for ISP25xx */ if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6)) ha->mqenable = 0; if (ha->mqenable) { bool startit = false; if (QLA_TGT_MODE_ENABLED()) startit = false; if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) startit = true; /* Create start of day qpairs for Block MQ */ for (i = 0; i < ha->max_qpairs; i++) qla2xxx_create_qpair(base_vha, 5, 0, startit); } qla_init_iocb_limit(base_vha); if (ha->flags.running_gold_fw) goto skip_dpc; /* * Startup the kernel thread for this host adapter */ ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, "%s_dpc", base_vha->host_str); if (IS_ERR(ha->dpc_thread)) { ql_log(ql_log_fatal, base_vha, 0x00ed, "Failed to start DPC thread.\n"); ret = PTR_ERR(ha->dpc_thread); ha->dpc_thread = NULL; goto probe_failed; } ql_dbg(ql_dbg_init, base_vha, 0x00ee, "DPC thread started successfully.\n"); /* * If we're not coming up in initiator mode, we might sit for * a while without waking up the dpc thread, which leads to a * stuck process warning. So just kick the dpc once here and * let the kthread start (and go back to sleep in qla2x00_do_dpc). */ qla2xxx_wake_dpc(base_vha); INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); INIT_WORK(&ha->idc_state_handler, qla83xx_idc_state_handler_work); INIT_WORK(&ha->nic_core_unrecoverable, qla83xx_nic_core_unrecoverable_work); } skip_dpc: list_add_tail(&base_vha->list, &ha->vp_list); base_vha->host->irq = ha->pdev->irq; /* Initialized the timer */ qla2x00_start_timer(base_vha, WATCH_INTERVAL); ql_dbg(ql_dbg_init, base_vha, 0x00ef, "Started qla2x00_timer with " "interval=%d.\n", WATCH_INTERVAL); ql_dbg(ql_dbg_init, base_vha, 0x00f0, "Detected hba at address=%p.\n", ha); if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) { int prot = 0, guard; base_vha->flags.difdix_supported = 1; ql_dbg(ql_dbg_init, base_vha, 0x00f1, "Registering for DIF/DIX type 1 and 3 protection.\n"); if (ql2xprotmask) scsi_host_set_prot(host, ql2xprotmask); else scsi_host_set_prot(host, prot | SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION); guard = SHOST_DIX_GUARD_CRC; if (IS_PI_IPGUARD_CAPABLE(ha) && (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) guard |= SHOST_DIX_GUARD_IP; if (ql2xprotguard) scsi_host_set_guard(host, ql2xprotguard); else scsi_host_set_guard(host, guard); } else base_vha->flags.difdix_supported = 0; } ha->isp_ops->enable_intrs(ha); if (IS_QLAFX00(ha)) { ret = qlafx00_fx_disc(base_vha, &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); host->sg_tablesize = (ha->mr.extended_io_enabled) ? QLA_SG_ALL : 128; } ret = scsi_add_host(host, &pdev->dev); if (ret) goto probe_failed; base_vha->flags.init_done = 1; base_vha->flags.online = 1; ha->prev_minidump_failed = 0; ql_dbg(ql_dbg_init, base_vha, 0x00f2, "Init done and hba is online.\n"); if (qla_ini_mode_enabled(base_vha) || qla_dual_mode_enabled(base_vha)) scsi_scan_host(host); else ql_log(ql_log_info, base_vha, 0x0122, "skipping scsi_scan_host() for non-initiator port\n"); qla2x00_alloc_sysfs_attr(base_vha); if (IS_QLAFX00(ha)) { ret = qlafx00_fx_disc(base_vha, &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); /* Register system information */ ret = qlafx00_fx_disc(base_vha, &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); } qla2x00_init_host_attr(base_vha); qla2x00_dfs_setup(base_vha); ql_log(ql_log_info, base_vha, 0x00fb, "QLogic %s - %s.\n", ha->model_number, ha->model_desc); ql_log(ql_log_info, base_vha, 0x00fc, "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info, sizeof(pci_info)), pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no, ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); qlt_add_target(ha, base_vha); clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); if (test_bit(UNLOADING, &base_vha->dpc_flags)) return -ENODEV; return 0; probe_failed: qla_enode_stop(base_vha); qla_edb_stop(base_vha); vfree(base_vha->scan.l); if (base_vha->gnl.l) { dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); base_vha->gnl.l = NULL; } if (base_vha->timer_active) qla2x00_stop_timer(base_vha); base_vha->flags.online = 0; if (ha->dpc_thread) { struct task_struct *t = ha->dpc_thread; ha->dpc_thread = NULL; kthread_stop(t); } qla2x00_free_device(base_vha); scsi_host_put(base_vha->host); /* * Need to NULL out local req/rsp after * qla2x00_free_device => qla2x00_free_queues frees * what these are pointing to. Or else we'll * fall over below in qla2x00_free_req/rsp_que. */ req = NULL; rsp = NULL; probe_hw_failed: qla2x00_mem_free(ha); qla2x00_free_req_que(ha, req); qla2x00_free_rsp_que(ha, rsp); qla2x00_clear_drv_active(ha); iospace_config_failed: if (IS_P3P_TYPE(ha)) { if (!ha->nx_pcibase) iounmap((device_reg_t *)ha->nx_pcibase); if (!ql2xdbwr) iounmap((device_reg_t *)ha->nxdb_wr_ptr); } else { if (ha->iobase) iounmap(ha->iobase); if (ha->cregbase) iounmap(ha->cregbase); } pci_release_selected_regions(ha->pdev, ha->bars); kfree(ha); disable_device: pci_disable_device(pdev); return ret; } static void __qla_set_remove_flag(scsi_qla_host_t *base_vha) { scsi_qla_host_t *vp; unsigned long flags; struct qla_hw_data *ha; if (!base_vha) return; ha = base_vha->hw; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags); /* * Indicate device removal to prevent future board_disable * and wait until any pending board_disable has completed. */ set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); spin_unlock_irqrestore(&ha->vport_slock, flags); } static void qla2x00_shutdown(struct pci_dev *pdev) { scsi_qla_host_t *vha; struct qla_hw_data *ha; vha = pci_get_drvdata(pdev); ha = vha->hw; ql_log(ql_log_info, vha, 0xfffa, "Adapter shutdown\n"); /* * Prevent future board_disable and wait * until any pending board_disable has completed. */ __qla_set_remove_flag(vha); cancel_work_sync(&ha->board_disable); if (!atomic_read(&pdev->enable_cnt)) return; /* Notify ISPFX00 firmware */ if (IS_QLAFX00(ha)) qlafx00_driver_shutdown(vha, 20); /* Turn-off FCE trace */ if (ha->flags.fce_enabled) { qla2x00_disable_fce_trace(vha, NULL, NULL); ha->flags.fce_enabled = 0; } /* Turn-off EFT trace */ if (ha->eft) qla2x00_disable_eft_trace(vha); if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (ha->flags.fw_started) qla2x00_abort_isp_cleanup(vha); } else { /* Stop currently executing firmware. */ qla2x00_try_to_stop_firmware(vha); } /* Disable timer */ if (vha->timer_active) qla2x00_stop_timer(vha); /* Turn adapter off line */ vha->flags.online = 0; /* turn-off interrupts on the card */ if (ha->interrupts_on) { vha->flags.init_done = 0; ha->isp_ops->disable_intrs(ha); } qla2x00_free_irqs(vha); qla2x00_free_fw_dump(ha); pci_disable_device(pdev); ql_log(ql_log_info, vha, 0xfffe, "Adapter shutdown successfully.\n"); } /* Deletes all the virtual ports for a given ha */ static void qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) { scsi_qla_host_t *vha; unsigned long flags; mutex_lock(&ha->vport_lock); while (ha->cur_vport_count) { spin_lock_irqsave(&ha->vport_slock, flags); BUG_ON(base_vha->list.next == &ha->vp_list); /* This assumes first entry in ha->vp_list is always base vha */ vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); scsi_host_get(vha->host); spin_unlock_irqrestore(&ha->vport_slock, flags); mutex_unlock(&ha->vport_lock); qla_nvme_delete(vha); fc_vport_terminate(vha->fc_vport); scsi_host_put(vha->host); mutex_lock(&ha->vport_lock); } mutex_unlock(&ha->vport_lock); } /* Stops all deferred work threads */ static void qla2x00_destroy_deferred_work(struct qla_hw_data *ha) { /* Cancel all work and destroy DPC workqueues */ if (ha->dpc_lp_wq) { cancel_work_sync(&ha->idc_aen); destroy_workqueue(ha->dpc_lp_wq); ha->dpc_lp_wq = NULL; } if (ha->dpc_hp_wq) { cancel_work_sync(&ha->nic_core_reset); cancel_work_sync(&ha->idc_state_handler); cancel_work_sync(&ha->nic_core_unrecoverable); destroy_workqueue(ha->dpc_hp_wq); ha->dpc_hp_wq = NULL; } /* Kill the kernel thread for this host */ if (ha->dpc_thread) { struct task_struct *t = ha->dpc_thread; /* * qla2xxx_wake_dpc checks for ->dpc_thread * so we need to zero it out. */ ha->dpc_thread = NULL; kthread_stop(t); } } static void qla2x00_unmap_iobases(struct qla_hw_data *ha) { if (IS_QLA82XX(ha)) { iounmap((device_reg_t *)ha->nx_pcibase); if (!ql2xdbwr) iounmap((device_reg_t *)ha->nxdb_wr_ptr); } else { if (ha->iobase) iounmap(ha->iobase); if (ha->cregbase) iounmap(ha->cregbase); if (ha->mqiobase) iounmap(ha->mqiobase); if (ha->msixbase) iounmap(ha->msixbase); } } static void qla2x00_clear_drv_active(struct qla_hw_data *ha) { if (IS_QLA8044(ha)) { qla8044_idc_lock(ha); qla8044_clear_drv_active(ha); qla8044_idc_unlock(ha); } else if (IS_QLA82XX(ha)) { qla82xx_idc_lock(ha); qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); } } static void qla2x00_remove_one(struct pci_dev *pdev) { scsi_qla_host_t *base_vha; struct qla_hw_data *ha; base_vha = pci_get_drvdata(pdev); ha = base_vha->hw; ql_log(ql_log_info, base_vha, 0xb079, "Removing driver\n"); __qla_set_remove_flag(base_vha); cancel_work_sync(&ha->board_disable); /* * If the PCI device is disabled then there was a PCI-disconnect and * qla2x00_disable_board_on_pci_error has taken care of most of the * resources. */ if (!atomic_read(&pdev->enable_cnt)) { dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); base_vha->gnl.l = NULL; scsi_host_put(base_vha->host); kfree(ha); pci_set_drvdata(pdev, NULL); return; } qla2x00_wait_for_hba_ready(base_vha); /* * if UNLOADING flag is already set, then continue unload, * where it was set first. */ if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) return; if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (ha->flags.fw_started) qla2x00_abort_isp_cleanup(base_vha); } else if (!IS_QLAFX00(ha)) { if (IS_QLA8031(ha)) { ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, "Clearing fcoe driver presence.\n"); if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) ql_dbg(ql_dbg_p3p, base_vha, 0xb079, "Error while clearing DRV-Presence.\n"); } qla2x00_try_to_stop_firmware(base_vha); } qla2x00_wait_for_sess_deletion(base_vha); qla_nvme_delete(base_vha); dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); base_vha->gnl.l = NULL; qla_enode_stop(base_vha); qla_edb_stop(base_vha); vfree(base_vha->scan.l); if (IS_QLAFX00(ha)) qlafx00_driver_shutdown(base_vha, 20); qla2x00_delete_all_vps(ha, base_vha); qla2x00_dfs_remove(base_vha); qla84xx_put_chip(base_vha); /* Disable timer */ if (base_vha->timer_active) qla2x00_stop_timer(base_vha); base_vha->flags.online = 0; /* free DMA memory */ if (ha->exlogin_buf) qla2x00_free_exlogin_buffer(ha); /* free DMA memory */ if (ha->exchoffld_buf) qla2x00_free_exchoffld_buffer(ha); qla2x00_destroy_deferred_work(ha); qlt_remove_target(ha, base_vha); qla2x00_free_sysfs_attr(base_vha, true); fc_remove_host(base_vha->host); scsi_remove_host(base_vha->host); qla2x00_free_device(base_vha); qla2x00_clear_drv_active(ha); scsi_host_put(base_vha->host); qla2x00_unmap_iobases(ha); pci_release_selected_regions(ha->pdev, ha->bars); kfree(ha); pci_disable_device(pdev); } static inline void qla24xx_free_purex_list(struct purex_list *list) { struct purex_item *item, *next; ulong flags; spin_lock_irqsave(&list->lock, flags); list_for_each_entry_safe(item, next, &list->head, list) { list_del(&item->list); if (item == &item->vha->default_item) continue; kfree(item); } spin_unlock_irqrestore(&list->lock, flags); } static void qla2x00_free_device(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); /* Disable timer */ if (vha->timer_active) qla2x00_stop_timer(vha); qla25xx_delete_queues(vha); vha->flags.online = 0; /* turn-off interrupts on the card */ if (ha->interrupts_on) { vha->flags.init_done = 0; ha->isp_ops->disable_intrs(ha); } qla2x00_free_fcports(vha); qla2x00_free_irqs(vha); /* Flush the work queue and remove it */ if (ha->wq) { destroy_workqueue(ha->wq); ha->wq = NULL; } qla24xx_free_purex_list(&vha->purex_list); qla2x00_mem_free(ha); qla82xx_md_free(vha); qla_edif_sadb_release_free_pool(ha); qla_edif_sadb_release(ha); qla2x00_free_queues(ha); } void qla2x00_free_fcports(struct scsi_qla_host *vha) { fc_port_t *fcport, *tfcport; list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) qla2x00_free_fcport(fcport); } static inline void qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport) { int now; if (!fcport->rport) return; if (fcport->rport) { ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, "%s %8phN. rport %p roles %x\n", __func__, fcport->port_name, fcport->rport, fcport->rport->roles); fc_remote_port_delete(fcport->rport); } qlt_do_generation_tick(vha, &now); } /* * qla2x00_mark_device_lost Updates fcport state when device goes offline. * * Input: ha = adapter block pointer. fcport = port structure pointer. * * Return: None. * * Context: */ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, int do_login) { if (IS_QLAFX00(vha->hw)) { qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); qla2x00_schedule_rport_del(vha, fcport); return; } if (atomic_read(&fcport->state) == FCS_ONLINE && vha->vp_idx == fcport->vha->vp_idx) { qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); qla2x00_schedule_rport_del(vha, fcport); } /* * We may need to retry the login, so don't change the state of the * port but do the retries. */ if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); if (!do_login) return; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } void qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha) { fc_port_t *fcport; ql_dbg(ql_dbg_disc, vha, 0x20f1, "Mark all dev lost\n"); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (ql2xfc2target && fcport->loop_id != FC_NO_LOOP_ID && (fcport->flags & FCF_FCP2_DEVICE) && fcport->port_type == FCT_TARGET && !qla2x00_reset_active(vha)) { ql_dbg(ql_dbg_disc, vha, 0x211a, "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC", fcport->flags, fcport->port_type, fcport->d_id.b24, fcport->port_name); continue; } fcport->scan_state = 0; qlt_schedule_sess_for_deletion(fcport); } } static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) { int i; if (IS_FWI2_CAPABLE(ha)) return; for (i = 0; i < SNS_FIRST_LOOP_ID; i++) set_bit(i, ha->loop_id_map); set_bit(MANAGEMENT_SERVER, ha->loop_id_map); set_bit(BROADCAST, ha->loop_id_map); } /* * qla2x00_mem_alloc * Allocates adapter memory. * * Returns: * 0 = success. * !0 = failure. */ static int qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, struct req_que **req, struct rsp_que **rsp) { char name[16]; int rc; if (QLA_TGT_MODE_ENABLED() || EDIF_CAP(ha)) { ha->vp_map = kcalloc(MAX_MULTI_ID_FABRIC, sizeof(struct qla_vp_map), GFP_KERNEL); if (!ha->vp_map) goto fail; } ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, &ha->init_cb_dma, GFP_KERNEL); if (!ha->init_cb) goto fail_free_vp_map; rc = btree_init32(&ha->host_map); if (rc) goto fail_free_init_cb; if (qlt_mem_alloc(ha) < 0) goto fail_free_btree; ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); if (!ha->gid_list) goto fail_free_tgt_mem; ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); if (!ha->srb_mempool) goto fail_free_gid_list; if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) { /* Allocate cache for CT6 Ctx. */ if (!ctx_cachep) { ctx_cachep = kmem_cache_create("qla2xxx_ctx", sizeof(struct ct6_dsd), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ctx_cachep) goto fail_free_srb_mempool; } ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, ctx_cachep); if (!ha->ctx_mempool) goto fail_free_srb_mempool; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, "ctx_cachep=%p ctx_mempool=%p.\n", ctx_cachep, ha->ctx_mempool); } /* Get memory for cached NVRAM */ ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); if (!ha->nvram) goto fail_free_ctx_mempool; snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME, ha->pdev->device); ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, DMA_POOL_SIZE, 8, 0); if (!ha->s_dma_pool) goto fail_free_nvram; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) { ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, DSD_LIST_DMA_POOL_SIZE, 8, 0); if (!ha->dl_dma_pool) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0023, "Failed to allocate memory for dl_dma_pool.\n"); goto fail_s_dma_pool; } ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, FCP_CMND_DMA_POOL_SIZE, 8, 0); if (!ha->fcp_cmnd_dma_pool) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0024, "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); goto fail_dl_dma_pool; } if (ql2xenabledif) { u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE; struct dsd_dma *dsd, *nxt; uint i; /* Creata a DMA pool of buffers for DIF bundling */ ha->dif_bundl_pool = dma_pool_create(name, &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0); if (!ha->dif_bundl_pool) { ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, "%s: failed create dif_bundl_pool\n", __func__); goto fail_dif_bundl_dma_pool; } INIT_LIST_HEAD(&ha->pool.good.head); INIT_LIST_HEAD(&ha->pool.unusable.head); ha->pool.good.count = 0; ha->pool.unusable.count = 0; for (i = 0; i < 128; i++) { dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC); if (!dsd) { ql_dbg_pci(ql_dbg_init, ha->pdev, 0xe0ee, "%s: failed alloc dsd\n", __func__); return -ENOMEM; } ha->dif_bundle_kallocs++; dsd->dsd_addr = dma_pool_alloc( ha->dif_bundl_pool, GFP_ATOMIC, &dsd->dsd_list_dma); if (!dsd->dsd_addr) { ql_dbg_pci(ql_dbg_init, ha->pdev, 0xe0ee, "%s: failed alloc ->dsd_addr\n", __func__); kfree(dsd); ha->dif_bundle_kallocs--; continue; } ha->dif_bundle_dma_allocs++; /* * if DMA buffer crosses 4G boundary, * put it on bad list */ if (MSD(dsd->dsd_list_dma) ^ MSD(dsd->dsd_list_dma + bufsize)) { list_add_tail(&dsd->list, &ha->pool.unusable.head); ha->pool.unusable.count++; } else { list_add_tail(&dsd->list, &ha->pool.good.head); ha->pool.good.count++; } } /* return the good ones back to the pool */ list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) { list_del(&dsd->list); dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, dsd->dsd_list_dma); ha->dif_bundle_dma_allocs--; kfree(dsd); ha->dif_bundle_kallocs--; } ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, "%s: dif dma pool (good=%u unusable=%u)\n", __func__, ha->pool.good.count, ha->pool.unusable.count); } ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n", ha->dl_dma_pool, ha->fcp_cmnd_dma_pool, ha->dif_bundl_pool); } /* Allocate memory for SNS commands */ if (IS_QLA2100(ha) || IS_QLA2200(ha)) { /* Get consistent memory allocated for SNS commands */ ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); if (!ha->sns_cmd) goto fail_dma_pool; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, "sns_cmd: %p.\n", ha->sns_cmd); } else { /* Get consistent memory allocated for MS IOCB */ ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &ha->ms_iocb_dma); if (!ha->ms_iocb) goto fail_dma_pool; /* Get consistent memory allocated for CT SNS commands */ ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); if (!ha->ct_sns) goto fail_free_ms_iocb; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, "ms_iocb=%p ct_sns=%p.\n", ha->ms_iocb, ha->ct_sns); } /* Allocate memory for request ring */ *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); if (!*req) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0028, "Failed to allocate memory for req.\n"); goto fail_req; } (*req)->length = req_len; (*req)->ring = dma_alloc_coherent(&ha->pdev->dev, ((*req)->length + 1) * sizeof(request_t), &(*req)->dma, GFP_KERNEL); if (!(*req)->ring) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0029, "Failed to allocate memory for req_ring.\n"); goto fail_req_ring; } /* Allocate memory for response ring */ *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); if (!*rsp) { ql_log_pci(ql_log_fatal, ha->pdev, 0x002a, "Failed to allocate memory for rsp.\n"); goto fail_rsp; } (*rsp)->hw = ha; (*rsp)->length = rsp_len; (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * sizeof(response_t), &(*rsp)->dma, GFP_KERNEL); if (!(*rsp)->ring) { ql_log_pci(ql_log_fatal, ha->pdev, 0x002b, "Failed to allocate memory for rsp_ring.\n"); goto fail_rsp_ring; } (*req)->rsp = *rsp; (*rsp)->req = *req; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, "req=%p req->length=%d req->ring=%p rsp=%p " "rsp->length=%d rsp->ring=%p.\n", *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, (*rsp)->ring); /* Allocate memory for NVRAM data for vports */ if (ha->nvram_npiv_size) { ha->npiv_info = kcalloc(ha->nvram_npiv_size, sizeof(struct qla_npiv_entry), GFP_KERNEL); if (!ha->npiv_info) { ql_log_pci(ql_log_fatal, ha->pdev, 0x002d, "Failed to allocate memory for npiv_info.\n"); goto fail_npiv_info; } } else ha->npiv_info = NULL; /* Get consistent memory allocated for EX-INIT-CB. */ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &ha->ex_init_cb_dma); if (!ha->ex_init_cb) goto fail_ex_init_cb; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, "ex_init_cb=%p.\n", ha->ex_init_cb); } /* Get consistent memory allocated for Special Features-CB. */ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &ha->sf_init_cb_dma); if (!ha->sf_init_cb) goto fail_sf_init_cb; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199, "sf_init_cb=%p.\n", ha->sf_init_cb); } /* Get consistent memory allocated for Async Port-Database. */ if (!IS_FWI2_CAPABLE(ha)) { ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &ha->async_pd_dma); if (!ha->async_pd) goto fail_async_pd; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, "async_pd=%p.\n", ha->async_pd); } INIT_LIST_HEAD(&ha->vp_list); /* Allocate memory for our loop_id bitmap */ ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE), sizeof(long), GFP_KERNEL); if (!ha->loop_id_map) goto fail_loop_id_map; else { qla2x00_set_reserved_loop_ids(ha); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, "loop_id_map=%p.\n", ha->loop_id_map); } ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev, SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL); if (!ha->sfp_data) { ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, "Unable to allocate memory for SFP read-data.\n"); goto fail_sfp_data; } ha->flt = dma_alloc_coherent(&ha->pdev->dev, sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma, GFP_KERNEL); if (!ha->flt) { ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, "Unable to allocate memory for FLT.\n"); goto fail_flt_buffer; } /* allocate the purex dma pool */ ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev, ELS_MAX_PAYLOAD, 8, 0); if (!ha->purex_dma_pool) { ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, "Unable to allocate purex_dma_pool.\n"); goto fail_flt; } ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16; ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev, ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL); if (!ha->elsrej.c) { ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff, "Alloc failed for els reject cmd.\n"); goto fail_elsrej; } ha->elsrej.c->er_cmd = ELS_LS_RJT; ha->elsrej.c->er_reason = ELS_RJT_LOGIC; ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA; ha->lsrjt.size = sizeof(struct fcnvme_ls_rjt); ha->lsrjt.c = dma_alloc_coherent(&ha->pdev->dev, ha->lsrjt.size, &ha->lsrjt.cdma, GFP_KERNEL); if (!ha->lsrjt.c) { ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff, "Alloc failed for nvme fc reject cmd.\n"); goto fail_lsrjt; } return 0; fail_lsrjt: dma_free_coherent(&ha->pdev->dev, ha->elsrej.size, ha->elsrej.c, ha->elsrej.cdma); fail_elsrej: dma_pool_destroy(ha->purex_dma_pool); fail_flt: dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->flt, ha->flt_dma); fail_flt_buffer: dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data, ha->sfp_data_dma); fail_sfp_data: kfree(ha->loop_id_map); fail_loop_id_map: dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); fail_async_pd: dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma); fail_sf_init_cb: dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); fail_ex_init_cb: kfree(ha->npiv_info); fail_npiv_info: dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * sizeof(response_t), (*rsp)->ring, (*rsp)->dma); (*rsp)->ring = NULL; (*rsp)->dma = 0; fail_rsp_ring: kfree(*rsp); *rsp = NULL; fail_rsp: dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * sizeof(request_t), (*req)->ring, (*req)->dma); (*req)->ring = NULL; (*req)->dma = 0; fail_req_ring: kfree(*req); *req = NULL; fail_req: dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), ha->ct_sns, ha->ct_sns_dma); ha->ct_sns = NULL; ha->ct_sns_dma = 0; fail_free_ms_iocb: dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); ha->ms_iocb = NULL; ha->ms_iocb_dma = 0; if (ha->sns_cmd) dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), ha->sns_cmd, ha->sns_cmd_dma); fail_dma_pool: if (ql2xenabledif) { struct dsd_dma *dsd, *nxt; list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, list) { list_del(&dsd->list); dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, dsd->dsd_list_dma); ha->dif_bundle_dma_allocs--; kfree(dsd); ha->dif_bundle_kallocs--; ha->pool.unusable.count--; } dma_pool_destroy(ha->dif_bundl_pool); ha->dif_bundl_pool = NULL; } fail_dif_bundl_dma_pool: if (IS_QLA82XX(ha) || ql2xenabledif) { dma_pool_destroy(ha->fcp_cmnd_dma_pool); ha->fcp_cmnd_dma_pool = NULL; } fail_dl_dma_pool: if (IS_QLA82XX(ha) || ql2xenabledif) { dma_pool_destroy(ha->dl_dma_pool); ha->dl_dma_pool = NULL; } fail_s_dma_pool: dma_pool_destroy(ha->s_dma_pool); ha->s_dma_pool = NULL; fail_free_nvram: kfree(ha->nvram); ha->nvram = NULL; fail_free_ctx_mempool: mempool_destroy(ha->ctx_mempool); ha->ctx_mempool = NULL; fail_free_srb_mempool: mempool_destroy(ha->srb_mempool); ha->srb_mempool = NULL; fail_free_gid_list: dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), ha->gid_list, ha->gid_list_dma); ha->gid_list = NULL; ha->gid_list_dma = 0; fail_free_tgt_mem: qlt_mem_free(ha); fail_free_btree: btree_destroy32(&ha->host_map); fail_free_init_cb: dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, ha->init_cb_dma); ha->init_cb = NULL; ha->init_cb_dma = 0; fail_free_vp_map: kfree(ha->vp_map); fail: ql_log(ql_log_fatal, NULL, 0x0030, "Memory allocation failure.\n"); return -ENOMEM; } int qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) { int rval; uint16_t size, max_cnt; uint32_t temp; struct qla_hw_data *ha = vha->hw; /* Return if we don't need to alloacate any extended logins */ if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400) return QLA_SUCCESS; if (!IS_EXLOGIN_OFFLD_CAPABLE(ha)) return QLA_SUCCESS; ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins); max_cnt = 0; rval = qla_get_exlogin_status(vha, &size, &max_cnt); if (rval != QLA_SUCCESS) { ql_log_pci(ql_log_fatal, ha->pdev, 0xd029, "Failed to get exlogin status.\n"); return rval; } temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; temp *= size; if (temp != ha->exlogin_size) { qla2x00_free_exlogin_buffer(ha); ha->exlogin_size = temp; ql_log(ql_log_info, vha, 0xd024, "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", max_cnt, size, temp); ql_log(ql_log_info, vha, 0xd025, "EXLOGIN: requested size=0x%x\n", ha->exlogin_size); /* Get consistent memory for extended logins */ ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); if (!ha->exlogin_buf) { ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, "Failed to allocate memory for exlogin_buf_dma.\n"); return -ENOMEM; } } /* Now configure the dma buffer */ rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma); if (rval) { ql_log(ql_log_fatal, vha, 0xd033, "Setup extended login buffer ****FAILED****.\n"); qla2x00_free_exlogin_buffer(ha); } return rval; } /* * qla2x00_free_exlogin_buffer * * Input: * ha = adapter block pointer */ void qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) { if (ha->exlogin_buf) { dma_free_coherent(&ha->pdev->dev, ha->exlogin_size, ha->exlogin_buf, ha->exlogin_buf_dma); ha->exlogin_buf = NULL; ha->exlogin_size = 0; } } static void qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) { u32 temp; struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb; *ret_cnt = FW_DEF_EXCHANGES_CNT; if (max_cnt > vha->hw->max_exchg) max_cnt = vha->hw->max_exchg; if (qla_ini_mode_enabled(vha)) { if (vha->ql2xiniexchg > max_cnt) vha->ql2xiniexchg = max_cnt; if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT) *ret_cnt = vha->ql2xiniexchg; } else if (qla_tgt_mode_enabled(vha)) { if (vha->ql2xexchoffld > max_cnt) { vha->ql2xexchoffld = max_cnt; icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); } if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT) *ret_cnt = vha->ql2xexchoffld; } else if (qla_dual_mode_enabled(vha)) { temp = vha->ql2xiniexchg + vha->ql2xexchoffld; if (temp > max_cnt) { vha->ql2xiniexchg -= (temp - max_cnt)/2; vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1); temp = max_cnt; icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); } if (temp > FW_DEF_EXCHANGES_CNT) *ret_cnt = temp; } } int qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) { int rval; u16 size, max_cnt; u32 actual_cnt, totsz; struct qla_hw_data *ha = vha->hw; if (!ha->flags.exchoffld_enabled) return QLA_SUCCESS; if (!IS_EXCHG_OFFLD_CAPABLE(ha)) return QLA_SUCCESS; max_cnt = 0; rval = qla_get_exchoffld_status(vha, &size, &max_cnt); if (rval != QLA_SUCCESS) { ql_log_pci(ql_log_fatal, ha->pdev, 0xd012, "Failed to get exlogin status.\n"); return rval; } qla2x00_number_of_exch(vha, &actual_cnt, max_cnt); ql_log(ql_log_info, vha, 0xd014, "Actual exchange offload count: %d.\n", actual_cnt); totsz = actual_cnt * size; if (totsz != ha->exchoffld_size) { qla2x00_free_exchoffld_buffer(ha); if (actual_cnt <= FW_DEF_EXCHANGES_CNT) { ha->exchoffld_size = 0; ha->flags.exchoffld_enabled = 0; return QLA_SUCCESS; } ha->exchoffld_size = totsz; ql_log(ql_log_info, vha, 0xd016, "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n", max_cnt, actual_cnt, size, totsz); ql_log(ql_log_info, vha, 0xd017, "Exchange Buffers requested size = 0x%x\n", ha->exchoffld_size); /* Get consistent memory for extended logins */ ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); if (!ha->exchoffld_buf) { ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, "Failed to allocate memory for Exchange Offload.\n"); if (ha->max_exchg > (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) { ha->max_exchg -= REDUCE_EXCHANGES_CNT; } else if (ha->max_exchg > (FW_DEF_EXCHANGES_CNT + 512)) { ha->max_exchg -= 512; } else { ha->flags.exchoffld_enabled = 0; ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, "Disabling Exchange offload due to lack of memory\n"); } ha->exchoffld_size = 0; return -ENOMEM; } } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) { /* pathological case */ qla2x00_free_exchoffld_buffer(ha); ha->exchoffld_size = 0; ha->flags.exchoffld_enabled = 0; ql_log(ql_log_info, vha, 0xd016, "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n", ha->exchoffld_size, actual_cnt, size, totsz); return 0; } /* Now configure the dma buffer */ rval = qla_set_exchoffld_mem_cfg(vha); if (rval) { ql_log(ql_log_fatal, vha, 0xd02e, "Setup exchange offload buffer ****FAILED****.\n"); qla2x00_free_exchoffld_buffer(ha); } else { /* re-adjust number of target exchange */ struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb; if (qla_ini_mode_enabled(vha)) icb->exchange_count = 0; else icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld); } return rval; } /* * qla2x00_free_exchoffld_buffer * * Input: * ha = adapter block pointer */ void qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) { if (ha->exchoffld_buf) { dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size, ha->exchoffld_buf, ha->exchoffld_buf_dma); ha->exchoffld_buf = NULL; ha->exchoffld_size = 0; } } /* * qla2x00_free_fw_dump * Frees fw dump stuff. * * Input: * ha = adapter block pointer */ static void qla2x00_free_fw_dump(struct qla_hw_data *ha) { struct fwdt *fwdt = ha->fwdt; uint j; if (ha->fce) dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, ha->fce_dma); if (ha->eft) dma_free_coherent(&ha->pdev->dev, EFT_SIZE, ha->eft, ha->eft_dma); vfree(ha->fw_dump); ha->fce = NULL; ha->fce_dma = 0; ha->flags.fce_enabled = 0; ha->eft = NULL; ha->eft_dma = 0; ha->fw_dumped = false; ha->fw_dump_cap_flags = 0; ha->fw_dump_reading = 0; ha->fw_dump = NULL; ha->fw_dump_len = 0; for (j = 0; j < 2; j++, fwdt++) { vfree(fwdt->template); fwdt->template = NULL; fwdt->length = 0; } } /* * qla2x00_mem_free * Frees all adapter allocated memory. * * Input: * ha = adapter block pointer. */ static void qla2x00_mem_free(struct qla_hw_data *ha) { qla2x00_free_fw_dump(ha); if (ha->mctp_dump) dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, ha->mctp_dump_dma); ha->mctp_dump = NULL; mempool_destroy(ha->srb_mempool); ha->srb_mempool = NULL; if (ha->dcbx_tlv) dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, ha->dcbx_tlv, ha->dcbx_tlv_dma); ha->dcbx_tlv = NULL; if (ha->xgmac_data) dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, ha->xgmac_data, ha->xgmac_data_dma); ha->xgmac_data = NULL; if (ha->sns_cmd) dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), ha->sns_cmd, ha->sns_cmd_dma); ha->sns_cmd = NULL; ha->sns_cmd_dma = 0; if (ha->ct_sns) dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), ha->ct_sns, ha->ct_sns_dma); ha->ct_sns = NULL; ha->ct_sns_dma = 0; if (ha->sfp_data) dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data, ha->sfp_data_dma); ha->sfp_data = NULL; if (ha->flt) dma_free_coherent(&ha->pdev->dev, sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, ha->flt, ha->flt_dma); ha->flt = NULL; ha->flt_dma = 0; if (ha->ms_iocb) dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); ha->ms_iocb = NULL; ha->ms_iocb_dma = 0; if (ha->sf_init_cb) dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma); if (ha->ex_init_cb) dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); ha->ex_init_cb = NULL; ha->ex_init_cb_dma = 0; if (ha->async_pd) dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); ha->async_pd = NULL; ha->async_pd_dma = 0; dma_pool_destroy(ha->s_dma_pool); ha->s_dma_pool = NULL; if (ha->gid_list) dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), ha->gid_list, ha->gid_list_dma); ha->gid_list = NULL; ha->gid_list_dma = 0; if (!list_empty(&ha->base_qpair->dsd_list)) { struct dsd_dma *dsd_ptr, *tdsd_ptr; /* clean up allocated prev pool */ list_for_each_entry_safe(dsd_ptr, tdsd_ptr, &ha->base_qpair->dsd_list, list) { dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma); list_del(&dsd_ptr->list); kfree(dsd_ptr); } } dma_pool_destroy(ha->dl_dma_pool); ha->dl_dma_pool = NULL; dma_pool_destroy(ha->fcp_cmnd_dma_pool); ha->fcp_cmnd_dma_pool = NULL; mempool_destroy(ha->ctx_mempool); ha->ctx_mempool = NULL; if (ql2xenabledif && ha->dif_bundl_pool) { struct dsd_dma *dsd, *nxt; list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, list) { list_del(&dsd->list); dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, dsd->dsd_list_dma); ha->dif_bundle_dma_allocs--; kfree(dsd); ha->dif_bundle_kallocs--; ha->pool.unusable.count--; } list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) { list_del(&dsd->list); dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, dsd->dsd_list_dma); ha->dif_bundle_dma_allocs--; kfree(dsd); ha->dif_bundle_kallocs--; } } dma_pool_destroy(ha->dif_bundl_pool); ha->dif_bundl_pool = NULL; qlt_mem_free(ha); qla_remove_hostmap(ha); if (ha->init_cb) dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, ha->init_cb_dma); dma_pool_destroy(ha->purex_dma_pool); ha->purex_dma_pool = NULL; if (ha->elsrej.c) { dma_free_coherent(&ha->pdev->dev, ha->elsrej.size, ha->elsrej.c, ha->elsrej.cdma); ha->elsrej.c = NULL; } if (ha->lsrjt.c) { dma_free_coherent(&ha->pdev->dev, ha->lsrjt.size, ha->lsrjt.c, ha->lsrjt.cdma); ha->lsrjt.c = NULL; } ha->init_cb = NULL; ha->init_cb_dma = 0; vfree(ha->optrom_buffer); ha->optrom_buffer = NULL; kfree(ha->nvram); ha->nvram = NULL; kfree(ha->npiv_info); ha->npiv_info = NULL; kfree(ha->swl); ha->swl = NULL; kfree(ha->loop_id_map); ha->sf_init_cb = NULL; ha->sf_init_cb_dma = 0; ha->loop_id_map = NULL; kfree(ha->vp_map); ha->vp_map = NULL; } struct scsi_qla_host *qla2x00_create_host(const struct scsi_host_template *sht, struct qla_hw_data *ha) { struct Scsi_Host *host; struct scsi_qla_host *vha = NULL; host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); if (!host) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0107, "Failed to allocate host from the scsi layer, aborting.\n"); return NULL; } /* Clear our data area */ vha = shost_priv(host); memset(vha, 0, sizeof(scsi_qla_host_t)); vha->host = host; vha->host_no = host->host_no; vha->hw = ha; vha->qlini_mode = ql2x_ini_mode; vha->ql2xexchoffld = ql2xexchoffld; vha->ql2xiniexchg = ql2xiniexchg; INIT_LIST_HEAD(&vha->vp_fcports); INIT_LIST_HEAD(&vha->work_list); INIT_LIST_HEAD(&vha->list); INIT_LIST_HEAD(&vha->qla_cmd_list); INIT_LIST_HEAD(&vha->logo_list); INIT_LIST_HEAD(&vha->plogi_ack_list); INIT_LIST_HEAD(&vha->qp_list); INIT_LIST_HEAD(&vha->gnl.fcports); INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn); INIT_LIST_HEAD(&vha->purex_list.head); spin_lock_init(&vha->purex_list.lock); spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); init_waitqueue_head(&vha->fcport_waitQ); init_waitqueue_head(&vha->vref_waitq); qla_enode_init(vha); qla_edb_init(vha); vha->gnl.size = sizeof(struct get_name_list_extended) * (ha->max_loop_id + 1); vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL); if (!vha->gnl.l) { ql_log(ql_log_fatal, vha, 0xd04a, "Alloc failed for name list.\n"); scsi_host_put(vha->host); return NULL; } /* todo: what about ext login? */ vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp); vha->scan.l = vmalloc(vha->scan.size); if (!vha->scan.l) { ql_log(ql_log_fatal, vha, 0xd04a, "Alloc failed for scan database.\n"); dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); vha->gnl.l = NULL; scsi_host_put(vha->host); return NULL; } INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); snprintf(vha->host_str, sizeof(vha->host_str), "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no); ql_dbg(ql_dbg_init, vha, 0x0041, "Allocated the host=%p hw=%p vha=%p dev_name=%s", vha->host, vha->hw, vha, dev_name(&(ha->pdev->dev))); return vha; } struct qla_work_evt * qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) { struct qla_work_evt *e; if (test_bit(UNLOADING, &vha->dpc_flags)) return NULL; if (qla_vha_mark_busy(vha)) return NULL; e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); if (!e) { QLA_VHA_MARK_NOT_BUSY(vha); return NULL; } INIT_LIST_HEAD(&e->list); e->type = type; e->flags = QLA_EVT_FLAG_FREE; return e; } int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) { unsigned long flags; bool q = false; spin_lock_irqsave(&vha->work_lock, flags); list_add_tail(&e->list, &vha->work_list); if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) q = true; spin_unlock_irqrestore(&vha->work_lock, flags); if (q) queue_work(vha->hw->wq, &vha->iocb_work); return QLA_SUCCESS; } int qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, u32 data) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_AEN); if (!e) return QLA_FUNCTION_FAILED; e->u.aen.code = code; e->u.aen.data = data; return qla2x00_post_work(vha, e); } int qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); if (!e) return QLA_FUNCTION_FAILED; memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); return qla2x00_post_work(vha, e); } #define qla2x00_post_async_work(name, type) \ int qla2x00_post_async_##name##_work( \ struct scsi_qla_host *vha, \ fc_port_t *fcport, uint16_t *data) \ { \ struct qla_work_evt *e; \ \ e = qla2x00_alloc_work(vha, type); \ if (!e) \ return QLA_FUNCTION_FAILED; \ \ e->u.logio.fcport = fcport; \ if (data) { \ e->u.logio.data[0] = data[0]; \ e->u.logio.data[1] = data[1]; \ } \ fcport->flags |= FCF_ASYNC_ACTIVE; \ return qla2x00_post_work(vha, e); \ } qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO); qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE); int qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT); if (!e) return QLA_FUNCTION_FAILED; e->u.uevent.code = code; return qla2x00_post_work(vha, e); } static void qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) { char event_string[40]; char *envp[] = { event_string, NULL }; switch (code) { case QLA_UEVENT_CODE_FW_DUMP: snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu", vha->host_no); break; default: /* do nothing */ break; } kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); } int qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, uint32_t *data, int cnt) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_AENFX); if (!e) return QLA_FUNCTION_FAILED; e->u.aenfx.evtcode = evtcode; e->u.aenfx.count = cnt; memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt); return qla2x00_post_work(vha, e); } void qla24xx_sched_upd_fcport(fc_port_t *fcport) { unsigned long flags; if (IS_SW_RESV_ADDR(fcport->d_id)) return; spin_lock_irqsave(&fcport->vha->work_lock, flags); if (fcport->disc_state == DSC_UPD_FCPORT) { spin_unlock_irqrestore(&fcport->vha->work_lock, flags); return; } fcport->jiffies_at_registration = jiffies; fcport->sec_since_registration = 0; fcport->next_disc_state = DSC_DELETED; qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); spin_unlock_irqrestore(&fcport->vha->work_lock, flags); queue_work(system_unbound_wq, &fcport->reg_work); } static void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) { unsigned long flags; fc_port_t *fcport = NULL, *tfcp; struct qlt_plogi_ack_t *pla = (struct qlt_plogi_ack_t *)e->u.new_sess.pla; uint8_t free_fcport = 0; ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC enter\n", __func__, __LINE__, e->u.new_sess.port_name); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); if (fcport) { fcport->d_id = e->u.new_sess.id; if (pla) { fcport->fw_login_state = DSC_LS_PLOGI_PEND; memcpy(fcport->node_name, pla->iocb.u.isp24.u.plogi.node_name, WWN_SIZE); qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); /* we took an extra ref_count to prevent PLOGI ACK when * fcport/sess has not been created. */ pla->ref_count--; } } else { spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (fcport) { fcport->d_id = e->u.new_sess.id; fcport->flags |= FCF_FABRIC_DEVICE; fcport->fw_login_state = DSC_LS_PLOGI_PEND; fcport->tgt_short_link_down_cnt = 0; memcpy(fcport->port_name, e->u.new_sess.port_name, WWN_SIZE); fcport->fc4_type = e->u.new_sess.fc4_type; if (NVME_PRIORITY(vha->hw, fcport)) fcport->do_prli_nvme = 1; else fcport->do_prli_nvme = 0; if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) { fcport->dm_login_expire = jiffies + QLA_N2N_WAIT_TIME * HZ; fcport->fc4_type = FS_FC4TYPE_FCP; fcport->n2n_flag = 1; if (vha->flags.nvme_enabled) fcport->fc4_type |= FS_FC4TYPE_NVME; } } else { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC mem alloc fail.\n", __func__, e->u.new_sess.port_name); if (pla) { list_del(&pla->list); kmem_cache_free(qla_tgt_plogi_cachep, pla); } return; } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* search again to make sure no one else got ahead */ tfcp = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); if (tfcp) { /* should rarily happen */ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC found existing fcport b4 add. DS %d LS %d\n", __func__, tfcp->port_name, tfcp->disc_state, tfcp->fw_login_state); free_fcport = 1; } else { list_add_tail(&fcport->list, &vha->vp_fcports); } if (pla) { qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); pla->ref_count--; } } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); if (fcport) { fcport->id_changed = 1; fcport->scan_state = QLA_FCPORT_FOUND; fcport->chip_reset = vha->hw->base_qpair->chip_reset; memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); if (pla) { if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) { u16 wd3_lo; fcport->fw_login_state = DSC_LS_PRLI_PEND; fcport->local = 0; fcport->loop_id = le16_to_cpu( pla->iocb.u.isp24.nport_handle); fcport->fw_login_state = DSC_LS_PRLI_PEND; wd3_lo = le16_to_cpu( pla->iocb.u.isp24.u.prli.wd3_lo); if (wd3_lo & BIT_7) fcport->conf_compl_supported = 1; if ((wd3_lo & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; } qlt_plogi_ack_unref(vha, pla); } else { fc_port_t *dfcp = NULL; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); tfcp = qla2x00_find_fcport_by_nportid(vha, &e->u.new_sess.id, 1); if (tfcp && (tfcp != fcport)) { /* * We have a conflict fcport with same NportID. */ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC found conflict b4 add. DS %d LS %d\n", __func__, tfcp->port_name, tfcp->disc_state, tfcp->fw_login_state); switch (tfcp->disc_state) { case DSC_DELETED: break; case DSC_DELETE_PEND: fcport->login_pause = 1; tfcp->conflict = fcport; break; default: fcport->login_pause = 1; tfcp->conflict = fcport; dfcp = tfcp; break; } } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); if (dfcp) qlt_schedule_sess_for_deletion(tfcp); if (N2N_TOPO(vha->hw)) { fcport->flags &= ~FCF_FABRIC_DEVICE; fcport->keep_nport_handle = 1; if (vha->flags.nvme_enabled) { fcport->fc4_type = (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP); fcport->n2n_flag = 1; } fcport->fw_login_state = 0; schedule_delayed_work(&vha->scan.scan_work, 5); } else { qla24xx_fcport_handle_login(vha, fcport); } } } if (free_fcport) { qla2x00_free_fcport(fcport); if (pla) { list_del(&pla->list); kmem_cache_free(qla_tgt_plogi_cachep, pla); } } } static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e) { struct srb *sp = e->u.iosb.sp; int rval; rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2043, "%s: %s: Re-issue IOCB failed (%d).\n", __func__, sp->name, rval); qla24xx_sp_unmap(vha, sp); } } void qla2x00_do_work(struct scsi_qla_host *vha) { struct qla_work_evt *e, *tmp; unsigned long flags; LIST_HEAD(work); int rc; spin_lock_irqsave(&vha->work_lock, flags); list_splice_init(&vha->work_list, &work); spin_unlock_irqrestore(&vha->work_lock, flags); list_for_each_entry_safe(e, tmp, &work, list) { rc = QLA_SUCCESS; switch (e->type) { case QLA_EVT_AEN: fc_host_post_event(vha->host, fc_get_event_number(), e->u.aen.code, e->u.aen.data); break; case QLA_EVT_IDC_ACK: qla81xx_idc_ack(vha, e->u.idc_ack.mb); break; case QLA_EVT_ASYNC_LOGIN: qla2x00_async_login(vha, e->u.logio.fcport, e->u.logio.data); break; case QLA_EVT_ASYNC_LOGOUT: rc = qla2x00_async_logout(vha, e->u.logio.fcport); break; case QLA_EVT_ASYNC_ADISC: qla2x00_async_adisc(vha, e->u.logio.fcport, e->u.logio.data); break; case QLA_EVT_UEVENT: qla2x00_uevent_emit(vha, e->u.uevent.code); break; case QLA_EVT_AENFX: qlafx00_process_aen(vha, e); break; case QLA_EVT_UNMAP: qla24xx_sp_unmap(vha, e->u.iosb.sp); break; case QLA_EVT_RELOGIN: qla2x00_relogin(vha); break; case QLA_EVT_NEW_SESS: qla24xx_create_new_sess(vha, e); break; case QLA_EVT_GPDB: qla24xx_async_gpdb(vha, e->u.fcport.fcport, e->u.fcport.opt); break; case QLA_EVT_PRLI: qla24xx_async_prli(vha, e->u.fcport.fcport); break; case QLA_EVT_GPSC: qla24xx_async_gpsc(vha, e->u.fcport.fcport); break; case QLA_EVT_GNL: qla24xx_async_gnl(vha, e->u.fcport.fcport); break; case QLA_EVT_NACK: qla24xx_do_nack_work(vha, e); break; case QLA_EVT_ASYNC_PRLO: rc = qla2x00_async_prlo(vha, e->u.logio.fcport); break; case QLA_EVT_ASYNC_PRLO_DONE: qla2x00_async_prlo_done(vha, e->u.logio.fcport, e->u.logio.data); break; case QLA_EVT_GPNFT: qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type, e->u.gpnft.sp); break; case QLA_EVT_GPNFT_DONE: qla24xx_async_gpnft_done(vha, e->u.iosb.sp); break; case QLA_EVT_GNNFT_DONE: qla24xx_async_gnnft_done(vha, e->u.iosb.sp); break; case QLA_EVT_GFPNID: qla24xx_async_gfpnid(vha, e->u.fcport.fcport); break; case QLA_EVT_SP_RETRY: qla_sp_retry(vha, e); break; case QLA_EVT_IIDMA: qla_do_iidma_work(vha, e->u.fcport.fcport); break; case QLA_EVT_ELS_PLOGI: qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, e->u.fcport.fcport, false); break; case QLA_EVT_SA_REPLACE: rc = qla24xx_issue_sa_replace_iocb(vha, e); break; } if (rc == EAGAIN) { /* put 'work' at head of 'vha->work_list' */ spin_lock_irqsave(&vha->work_lock, flags); list_splice(&work, &vha->work_list); spin_unlock_irqrestore(&vha->work_lock, flags); break; } list_del_init(&e->list); if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); /* For each work completed decrement vha ref count */ QLA_VHA_MARK_NOT_BUSY(vha); } } int qla24xx_post_relogin_work(struct scsi_qla_host *vha) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN); if (!e) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return QLA_FUNCTION_FAILED; } return qla2x00_post_work(vha, e); } /* Relogins all the fcports of a vport * Context: dpc thread */ void qla2x00_relogin(struct scsi_qla_host *vha) { fc_port_t *fcport; int status, relogin_needed = 0; struct event_arg ea; list_for_each_entry(fcport, &vha->vp_fcports, list) { /* * If the port is not ONLINE then try to login * to it if we haven't run out of retries. */ if (atomic_read(&fcport->state) != FCS_ONLINE && fcport->login_retry) { if (fcport->scan_state != QLA_FCPORT_FOUND || fcport->disc_state == DSC_LOGIN_AUTH_PEND || fcport->disc_state == DSC_LOGIN_COMPLETE) continue; if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) || fcport->disc_state == DSC_DELETE_PEND) { relogin_needed = 1; } else { if (vha->hw->current_topology != ISP_CFG_NL) { memset(&ea, 0, sizeof(ea)); ea.fcport = fcport; qla24xx_handle_relogin_event(vha, &ea); } else if (vha->hw->current_topology == ISP_CFG_NL && IS_QLA2XXX_MIDTYPE(vha->hw)) { (void)qla24xx_fcport_handle_login(vha, fcport); } else if (vha->hw->current_topology == ISP_CFG_NL) { fcport->login_retry--; status = qla2x00_local_device_login(vha, fcport); if (status == QLA_SUCCESS) { fcport->old_loop_id = fcport->loop_id; ql_dbg(ql_dbg_disc, vha, 0x2003, "Port login OK: logged in ID 0x%x.\n", fcport->loop_id); qla2x00_update_fcport (vha, fcport); } else if (status == 1) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); /* retry the login again */ ql_dbg(ql_dbg_disc, vha, 0x2007, "Retrying %d login again loop_id 0x%x.\n", fcport->login_retry, fcport->loop_id); } else { fcport->login_retry = 0; } if (fcport->login_retry == 0 && status != QLA_SUCCESS) qla2x00_clear_loop_id(fcport); } } } if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; } if (relogin_needed) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); ql_dbg(ql_dbg_disc, vha, 0x400e, "Relogin end.\n"); } /* Schedule work on any of the dpc-workqueues */ void qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code) { struct qla_hw_data *ha = base_vha->hw; switch (work_code) { case MBA_IDC_AEN: /* 0x8200 */ if (ha->dpc_lp_wq) queue_work(ha->dpc_lp_wq, &ha->idc_aen); break; case QLA83XX_NIC_CORE_RESET: /* 0x1 */ if (!ha->flags.nic_core_reset_hdlr_active) { if (ha->dpc_hp_wq) queue_work(ha->dpc_hp_wq, &ha->nic_core_reset); } else ql_dbg(ql_dbg_p3p, base_vha, 0xb05e, "NIC Core reset is already active. Skip " "scheduling it again.\n"); break; case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */ if (ha->dpc_hp_wq) queue_work(ha->dpc_hp_wq, &ha->idc_state_handler); break; case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */ if (ha->dpc_hp_wq) queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable); break; default: ql_log(ql_log_warn, base_vha, 0xb05f, "Unknown work-code=0x%x.\n", work_code); } return; } /* Work: Perform NIC Core Unrecoverable state handling */ void qla83xx_nic_core_unrecoverable_work(struct work_struct *work) { struct qla_hw_data *ha = container_of(work, struct qla_hw_data, nic_core_unrecoverable); scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); uint32_t dev_state = 0; qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); qla83xx_reset_ownership(base_vha); if (ha->flags.nic_core_reset_owner) { ha->flags.nic_core_reset_owner = 0; qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n"); qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); } qla83xx_idc_unlock(base_vha, 0); } /* Work: Execute IDC state handler */ void qla83xx_idc_state_handler_work(struct work_struct *work) { struct qla_hw_data *ha = container_of(work, struct qla_hw_data, idc_state_handler); scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); uint32_t dev_state = 0; qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); if (dev_state == QLA8XXX_DEV_FAILED || dev_state == QLA8XXX_DEV_NEED_QUIESCENT) qla83xx_idc_state_handler(base_vha); qla83xx_idc_unlock(base_vha, 0); } static int qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha) { int rval = QLA_SUCCESS; unsigned long heart_beat_wait = jiffies + (1 * HZ); uint32_t heart_beat_counter1, heart_beat_counter2; do { if (time_after(jiffies, heart_beat_wait)) { ql_dbg(ql_dbg_p3p, base_vha, 0xb07c, "Nic Core f/w is not alive.\n"); rval = QLA_FUNCTION_FAILED; break; } qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, &heart_beat_counter1); qla83xx_idc_unlock(base_vha, 0); msleep(100); qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT, &heart_beat_counter2); qla83xx_idc_unlock(base_vha, 0); } while (heart_beat_counter1 == heart_beat_counter2); return rval; } /* Work: Perform NIC Core Reset handling */ void qla83xx_nic_core_reset_work(struct work_struct *work) { struct qla_hw_data *ha = container_of(work, struct qla_hw_data, nic_core_reset); scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); uint32_t dev_state = 0; if (IS_QLA2031(ha)) { if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS) ql_log(ql_log_warn, base_vha, 0xb081, "Failed to dump mctp\n"); return; } if (!ha->flags.nic_core_reset_hdlr_active) { if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); qla83xx_idc_unlock(base_vha, 0); if (dev_state != QLA8XXX_DEV_NEED_RESET) { ql_dbg(ql_dbg_p3p, base_vha, 0xb07a, "Nic Core f/w is alive.\n"); return; } } ha->flags.nic_core_reset_hdlr_active = 1; if (qla83xx_nic_core_reset(base_vha)) { /* NIC Core reset failed. */ ql_dbg(ql_dbg_p3p, base_vha, 0xb061, "NIC Core reset failed.\n"); } ha->flags.nic_core_reset_hdlr_active = 0; } } /* Work: Handle 8200 IDC aens */ void qla83xx_service_idc_aen(struct work_struct *work) { struct qla_hw_data *ha = container_of(work, struct qla_hw_data, idc_aen); scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); uint32_t dev_state, idc_control; qla83xx_idc_lock(base_vha, 0); qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control); qla83xx_idc_unlock(base_vha, 0); if (dev_state == QLA8XXX_DEV_NEED_RESET) { if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) { ql_dbg(ql_dbg_p3p, base_vha, 0xb062, "Application requested NIC Core Reset.\n"); qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); } else if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) { ql_dbg(ql_dbg_p3p, base_vha, 0xb07b, "Other protocol driver requested NIC Core Reset.\n"); qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET); } } else if (dev_state == QLA8XXX_DEV_FAILED || dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER); } } /* * Control the frequency of IDC lock retries */ #define QLA83XX_WAIT_LOGIC_MS 100 static int qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha) { int rval; uint32_t data; uint32_t idc_lck_rcvry_stage_mask = 0x3; uint32_t idc_lck_rcvry_owner_mask = 0x3c; struct qla_hw_data *ha = base_vha->hw; ql_dbg(ql_dbg_p3p, base_vha, 0xb086, "Trying force recovery of the IDC lock.\n"); rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); if (rval) return rval; if ((data & idc_lck_rcvry_stage_mask) > 0) { return QLA_SUCCESS; } else { data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2); rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, data); if (rval) return rval; msleep(200); rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data); if (rval) return rval; if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) { data &= (IDC_LOCK_RECOVERY_STAGE2 | ~(idc_lck_rcvry_stage_mask)); rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, data); if (rval) return rval; /* Forcefully perform IDC UnLock */ rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); if (rval) return rval; /* Clear lock-id by setting 0xff */ rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); if (rval) return rval; /* Clear lock-recovery by setting 0x0 */ rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, 0x0); if (rval) return rval; } else return QLA_SUCCESS; } return rval; } static int qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha) { int rval = QLA_SUCCESS; uint32_t o_drv_lockid, n_drv_lockid; unsigned long lock_recovery_timeout; lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT; retry_lockid: rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid); if (rval) goto exit; /* MAX wait time before forcing IDC Lock recovery = 2 secs */ if (time_after_eq(jiffies, lock_recovery_timeout)) { if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS) return QLA_SUCCESS; else return QLA_FUNCTION_FAILED; } rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid); if (rval) goto exit; if (o_drv_lockid == n_drv_lockid) { msleep(QLA83XX_WAIT_LOGIC_MS); goto retry_lockid; } else return QLA_SUCCESS; exit: return rval; } /* * Context: task, can sleep */ void qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) { uint32_t data; uint32_t lock_owner; struct qla_hw_data *ha = base_vha->hw; might_sleep(); /* IDC-lock implementation using driver-lock/lock-id remote registers */ retry_lock: if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data) == QLA_SUCCESS) { if (data) { /* Setting lock-id to our function-number */ qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, ha->portnum); } else { qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &lock_owner); ql_dbg(ql_dbg_p3p, base_vha, 0xb063, "Failed to acquire IDC lock, acquired by %d, " "retrying...\n", lock_owner); /* Retry/Perform IDC-Lock recovery */ if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) { msleep(QLA83XX_WAIT_LOGIC_MS); goto retry_lock; } else ql_log(ql_log_warn, base_vha, 0xb075, "IDC Lock recovery FAILED.\n"); } } return; } static bool qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha, struct purex_entry_24xx *purex) { char fwstr[16]; u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0]; struct port_database_24xx *pdb; /* Domain Controller is always logged-out. */ /* if RDP request is not from Domain Controller: */ if (sid != 0xfffc01) return false; ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid); pdb = kzalloc(sizeof(*pdb), GFP_KERNEL); if (!pdb) { ql_dbg(ql_dbg_init, vha, 0x0181, "%s: Failed allocate pdb\n", __func__); } else if (qla24xx_get_port_database(vha, le16_to_cpu(purex->nport_handle), pdb)) { ql_dbg(ql_dbg_init, vha, 0x0181, "%s: Failed get pdb sid=%x\n", __func__, sid); } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE && pdb->current_login_state != PDS_PRLI_COMPLETE) { ql_dbg(ql_dbg_init, vha, 0x0181, "%s: Port not logged in sid=%#x\n", __func__, sid); } else { /* RDP request is from logged in port */ kfree(pdb); return false; } kfree(pdb); vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr)); fwstr[strcspn(fwstr, " ")] = 0; /* if FW version allows RDP response length upto 2048 bytes: */ if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0) return false; ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr); /* RDP response length is to be reduced to maximum 256 bytes */ return true; } /* * Function Name: qla24xx_process_purex_iocb * * Description: * Prepare a RDP response and send to Fabric switch * * PARAMETERS: * vha: SCSI qla host * purex: RDP request received by HBA */ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, struct purex_item *item) { struct qla_hw_data *ha = vha->hw; struct purex_entry_24xx *purex = (struct purex_entry_24xx *)&item->iocb; dma_addr_t rsp_els_dma; dma_addr_t rsp_payload_dma; dma_addr_t stat_dma; dma_addr_t sfp_dma; struct els_entry_24xx *rsp_els = NULL; struct rdp_rsp_payload *rsp_payload = NULL; struct link_statistics *stat = NULL; uint8_t *sfp = NULL; uint16_t sfp_flags = 0; uint rsp_payload_length = sizeof(*rsp_payload); int rval; ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180, "%s: Enter\n", __func__); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181, "-------- ELS REQ -------\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182, purex, sizeof(*purex)); if (qla25xx_rdp_rsp_reduce_size(vha, purex)) { rsp_payload_length = offsetof(typeof(*rsp_payload), optical_elmt_desc); ql_dbg(ql_dbg_init, vha, 0x0181, "Reducing RSP payload length to %u bytes...\n", rsp_payload_length); } rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &rsp_els_dma, GFP_KERNEL); if (!rsp_els) { ql_log(ql_log_warn, vha, 0x0183, "Failed allocate dma buffer ELS RSP.\n"); goto dealloc; } rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload), &rsp_payload_dma, GFP_KERNEL); if (!rsp_payload) { ql_log(ql_log_warn, vha, 0x0184, "Failed allocate dma buffer ELS RSP payload.\n"); goto dealloc; } sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN, &sfp_dma, GFP_KERNEL); stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat), &stat_dma, GFP_KERNEL); /* Prepare Response IOCB */ rsp_els->entry_type = ELS_IOCB_TYPE; rsp_els->entry_count = 1; rsp_els->sys_define = 0; rsp_els->entry_status = 0; rsp_els->handle = 0; rsp_els->nport_handle = purex->nport_handle; rsp_els->tx_dsd_count = cpu_to_le16(1); rsp_els->vp_index = purex->vp_idx; rsp_els->sof_type = EST_SOFI3; rsp_els->rx_xchg_address = purex->rx_xchg_addr; rsp_els->rx_dsd_count = 0; rsp_els->opcode = purex->els_frame_payload[0]; rsp_els->d_id[0] = purex->s_id[0]; rsp_els->d_id[1] = purex->s_id[1]; rsp_els->d_id[2] = purex->s_id[2]; rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC); rsp_els->rx_byte_count = 0; rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length); put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address); rsp_els->tx_len = rsp_els->tx_byte_count; rsp_els->rx_address = 0; rsp_els->rx_len = 0; /* Prepare Response Payload */ rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */ rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) - sizeof(rsp_payload->hdr)); /* Link service Request Info Descriptor */ rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1); rsp_payload->ls_req_info_desc.desc_len = cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc)); rsp_payload->ls_req_info_desc.req_payload_word_0 = cpu_to_be32p((uint32_t *)purex->els_frame_payload); /* Link service Request Info Descriptor 2 */ rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1); rsp_payload->ls_req_info_desc2.desc_len = cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2)); rsp_payload->ls_req_info_desc2.req_payload_word_0 = cpu_to_be32p((uint32_t *)purex->els_frame_payload); rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000); rsp_payload->sfp_diag_desc.desc_len = cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc)); if (sfp) { /* SFP Flags */ memset(sfp, 0, SFP_RTDI_LEN); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0); if (!rval) { /* SFP Flags bits 3-0: Port Tx Laser Type */ if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5)) sfp_flags |= BIT_0; /* short wave */ else if (sfp[0] & BIT_1) sfp_flags |= BIT_1; /* long wave 1310nm */ else if (sfp[1] & BIT_4) sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */ } /* SFP Type */ memset(sfp, 0, SFP_RTDI_LEN); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0); if (!rval) { sfp_flags |= BIT_4; /* optical */ if (sfp[0] == 0x3) sfp_flags |= BIT_6; /* sfp+ */ } rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags); /* SFP Diagnostics */ memset(sfp, 0, SFP_RTDI_LEN); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0); if (!rval) { __be16 *trx = (__force __be16 *)sfp; /* already be16 */ rsp_payload->sfp_diag_desc.temperature = trx[0]; rsp_payload->sfp_diag_desc.vcc = trx[1]; rsp_payload->sfp_diag_desc.tx_bias = trx[2]; rsp_payload->sfp_diag_desc.tx_power = trx[3]; rsp_payload->sfp_diag_desc.rx_power = trx[4]; } } /* Port Speed Descriptor */ rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001); rsp_payload->port_speed_desc.desc_len = cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc)); rsp_payload->port_speed_desc.speed_capab = cpu_to_be16( qla25xx_fdmi_port_speed_capability(ha)); rsp_payload->port_speed_desc.operating_speed = cpu_to_be16( qla25xx_fdmi_port_speed_currently(ha)); /* Link Error Status Descriptor */ rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002); rsp_payload->ls_err_desc.desc_len = cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc)); if (stat) { rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0); if (!rval) { rsp_payload->ls_err_desc.link_fail_cnt = cpu_to_be32(le32_to_cpu(stat->link_fail_cnt)); rsp_payload->ls_err_desc.loss_sync_cnt = cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt)); rsp_payload->ls_err_desc.loss_sig_cnt = cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt)); rsp_payload->ls_err_desc.prim_seq_err_cnt = cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt)); rsp_payload->ls_err_desc.inval_xmit_word_cnt = cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt)); rsp_payload->ls_err_desc.inval_crc_cnt = cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt)); rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6; } } /* Portname Descriptor */ rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003); rsp_payload->port_name_diag_desc.desc_len = cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc)); memcpy(rsp_payload->port_name_diag_desc.WWNN, vha->node_name, sizeof(rsp_payload->port_name_diag_desc.WWNN)); memcpy(rsp_payload->port_name_diag_desc.WWPN, vha->port_name, sizeof(rsp_payload->port_name_diag_desc.WWPN)); /* F-Port Portname Descriptor */ rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003); rsp_payload->port_name_direct_desc.desc_len = cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc)); memcpy(rsp_payload->port_name_direct_desc.WWNN, vha->fabric_node_name, sizeof(rsp_payload->port_name_direct_desc.WWNN)); memcpy(rsp_payload->port_name_direct_desc.WWPN, vha->fabric_port_name, sizeof(rsp_payload->port_name_direct_desc.WWPN)); /* Bufer Credit Descriptor */ rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006); rsp_payload->buffer_credit_desc.desc_len = cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc)); rsp_payload->buffer_credit_desc.fcport_b2b = 0; rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0); rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0); if (ha->flags.plogi_template_valid) { uint32_t tmp = be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred); rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp); } if (rsp_payload_length < sizeof(*rsp_payload)) goto send; /* Optical Element Descriptor, Temperature */ rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007); rsp_payload->optical_elmt_desc[0].desc_len = cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); /* Optical Element Descriptor, Voltage */ rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007); rsp_payload->optical_elmt_desc[1].desc_len = cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); /* Optical Element Descriptor, Tx Bias Current */ rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007); rsp_payload->optical_elmt_desc[2].desc_len = cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); /* Optical Element Descriptor, Tx Power */ rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007); rsp_payload->optical_elmt_desc[3].desc_len = cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); /* Optical Element Descriptor, Rx Power */ rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007); rsp_payload->optical_elmt_desc[4].desc_len = cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); if (sfp) { memset(sfp, 0, SFP_RTDI_LEN); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0); if (!rval) { __be16 *trx = (__force __be16 *)sfp; /* already be16 */ /* Optical Element Descriptor, Temperature */ rsp_payload->optical_elmt_desc[0].high_alarm = trx[0]; rsp_payload->optical_elmt_desc[0].low_alarm = trx[1]; rsp_payload->optical_elmt_desc[0].high_warn = trx[2]; rsp_payload->optical_elmt_desc[0].low_warn = trx[3]; rsp_payload->optical_elmt_desc[0].element_flags = cpu_to_be32(1 << 28); /* Optical Element Descriptor, Voltage */ rsp_payload->optical_elmt_desc[1].high_alarm = trx[4]; rsp_payload->optical_elmt_desc[1].low_alarm = trx[5]; rsp_payload->optical_elmt_desc[1].high_warn = trx[6]; rsp_payload->optical_elmt_desc[1].low_warn = trx[7]; rsp_payload->optical_elmt_desc[1].element_flags = cpu_to_be32(2 << 28); /* Optical Element Descriptor, Tx Bias Current */ rsp_payload->optical_elmt_desc[2].high_alarm = trx[8]; rsp_payload->optical_elmt_desc[2].low_alarm = trx[9]; rsp_payload->optical_elmt_desc[2].high_warn = trx[10]; rsp_payload->optical_elmt_desc[2].low_warn = trx[11]; rsp_payload->optical_elmt_desc[2].element_flags = cpu_to_be32(3 << 28); /* Optical Element Descriptor, Tx Power */ rsp_payload->optical_elmt_desc[3].high_alarm = trx[12]; rsp_payload->optical_elmt_desc[3].low_alarm = trx[13]; rsp_payload->optical_elmt_desc[3].high_warn = trx[14]; rsp_payload->optical_elmt_desc[3].low_warn = trx[15]; rsp_payload->optical_elmt_desc[3].element_flags = cpu_to_be32(4 << 28); /* Optical Element Descriptor, Rx Power */ rsp_payload->optical_elmt_desc[4].high_alarm = trx[16]; rsp_payload->optical_elmt_desc[4].low_alarm = trx[17]; rsp_payload->optical_elmt_desc[4].high_warn = trx[18]; rsp_payload->optical_elmt_desc[4].low_warn = trx[19]; rsp_payload->optical_elmt_desc[4].element_flags = cpu_to_be32(5 << 28); } memset(sfp, 0, SFP_RTDI_LEN); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0); if (!rval) { /* Temperature high/low alarm/warning */ rsp_payload->optical_elmt_desc[0].element_flags |= cpu_to_be32( (sfp[0] >> 7 & 1) << 3 | (sfp[0] >> 6 & 1) << 2 | (sfp[4] >> 7 & 1) << 1 | (sfp[4] >> 6 & 1) << 0); /* Voltage high/low alarm/warning */ rsp_payload->optical_elmt_desc[1].element_flags |= cpu_to_be32( (sfp[0] >> 5 & 1) << 3 | (sfp[0] >> 4 & 1) << 2 | (sfp[4] >> 5 & 1) << 1 | (sfp[4] >> 4 & 1) << 0); /* Tx Bias Current high/low alarm/warning */ rsp_payload->optical_elmt_desc[2].element_flags |= cpu_to_be32( (sfp[0] >> 3 & 1) << 3 | (sfp[0] >> 2 & 1) << 2 | (sfp[4] >> 3 & 1) << 1 | (sfp[4] >> 2 & 1) << 0); /* Tx Power high/low alarm/warning */ rsp_payload->optical_elmt_desc[3].element_flags |= cpu_to_be32( (sfp[0] >> 1 & 1) << 3 | (sfp[0] >> 0 & 1) << 2 | (sfp[4] >> 1 & 1) << 1 | (sfp[4] >> 0 & 1) << 0); /* Rx Power high/low alarm/warning */ rsp_payload->optical_elmt_desc[4].element_flags |= cpu_to_be32( (sfp[1] >> 7 & 1) << 3 | (sfp[1] >> 6 & 1) << 2 | (sfp[5] >> 7 & 1) << 1 | (sfp[5] >> 6 & 1) << 0); } } /* Optical Product Data Descriptor */ rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008); rsp_payload->optical_prod_desc.desc_len = cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc)); if (sfp) { memset(sfp, 0, SFP_RTDI_LEN); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0); if (!rval) { memcpy(rsp_payload->optical_prod_desc.vendor_name, sfp + 0, sizeof(rsp_payload->optical_prod_desc.vendor_name)); memcpy(rsp_payload->optical_prod_desc.part_number, sfp + 20, sizeof(rsp_payload->optical_prod_desc.part_number)); memcpy(rsp_payload->optical_prod_desc.revision, sfp + 36, sizeof(rsp_payload->optical_prod_desc.revision)); memcpy(rsp_payload->optical_prod_desc.serial_number, sfp + 48, sizeof(rsp_payload->optical_prod_desc.serial_number)); } memset(sfp, 0, SFP_RTDI_LEN); rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0); if (!rval) { memcpy(rsp_payload->optical_prod_desc.date, sfp + 0, sizeof(rsp_payload->optical_prod_desc.date)); } } send: ql_dbg(ql_dbg_init, vha, 0x0183, "Sending ELS Response to RDP Request...\n"); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184, "-------- ELS RSP -------\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185, rsp_els, sizeof(*rsp_els)); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186, "-------- ELS RSP PAYLOAD -------\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187, rsp_payload, rsp_payload_length); rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0); if (rval) { ql_log(ql_log_warn, vha, 0x0188, "%s: iocb failed to execute -> %x\n", __func__, rval); } else if (rsp_els->comp_status) { ql_log(ql_log_warn, vha, 0x0189, "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", __func__, rsp_els->comp_status, rsp_els->error_subcode_1, rsp_els->error_subcode_2); } else { ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__); } dealloc: if (stat) dma_free_coherent(&ha->pdev->dev, sizeof(*stat), stat, stat_dma); if (sfp) dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN, sfp, sfp_dma); if (rsp_payload) dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload), rsp_payload, rsp_payload_dma); if (rsp_els) dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, rsp_els_dma); } void qla24xx_free_purex_item(struct purex_item *item) { if (item == &item->vha->default_item) memset(&item->vha->default_item, 0, sizeof(struct purex_item)); else kfree(item); } void qla24xx_process_purex_list(struct purex_list *list) { struct list_head head = LIST_HEAD_INIT(head); struct purex_item *item, *next; ulong flags; spin_lock_irqsave(&list->lock, flags); list_splice_init(&list->head, &head); spin_unlock_irqrestore(&list->lock, flags); list_for_each_entry_safe(item, next, &head, list) { list_del(&item->list); item->process_item(item->vha, item); qla24xx_free_purex_item(item); } } /* * Context: task, can sleep */ void qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) { #if 0 uint16_t options = (requester_id << 15) | BIT_7; #endif uint16_t retry; uint32_t data; struct qla_hw_data *ha = base_vha->hw; might_sleep(); /* IDC-unlock implementation using driver-unlock/lock-id * remote registers */ retry = 0; retry_unlock: if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data) == QLA_SUCCESS) { if (data == ha->portnum) { qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data); /* Clearing lock-id by setting 0xff */ qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff); } else if (retry < 10) { /* SV: XXX: IDC unlock retrying needed here? */ /* Retry for IDC-unlock */ msleep(QLA83XX_WAIT_LOGIC_MS); retry++; ql_dbg(ql_dbg_p3p, base_vha, 0xb064, "Failed to release IDC lock, retrying=%d\n", retry); goto retry_unlock; } } else if (retry < 10) { /* Retry for IDC-unlock */ msleep(QLA83XX_WAIT_LOGIC_MS); retry++; ql_dbg(ql_dbg_p3p, base_vha, 0xb065, "Failed to read drv-lockid, retrying=%d\n", retry); goto retry_unlock; } return; #if 0 /* XXX: IDC-unlock implementation using access-control mbx */ retry = 0; retry_unlock2: if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { if (retry < 10) { /* Retry for IDC-unlock */ msleep(QLA83XX_WAIT_LOGIC_MS); retry++; ql_dbg(ql_dbg_p3p, base_vha, 0xb066, "Failed to release IDC lock, retrying=%d\n", retry); goto retry_unlock2; } } return; #endif } int __qla83xx_set_drv_presence(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; uint32_t drv_presence; rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); if (rval == QLA_SUCCESS) { drv_presence |= (1 << ha->portnum); rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, drv_presence); } return rval; } int qla83xx_set_drv_presence(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; qla83xx_idc_lock(vha, 0); rval = __qla83xx_set_drv_presence(vha); qla83xx_idc_unlock(vha, 0); return rval; } int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; uint32_t drv_presence; rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); if (rval == QLA_SUCCESS) { drv_presence &= ~(1 << ha->portnum); rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, drv_presence); } return rval; } int qla83xx_clear_drv_presence(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; qla83xx_idc_lock(vha, 0); rval = __qla83xx_clear_drv_presence(vha); qla83xx_idc_unlock(vha, 0); return rval; } static void qla83xx_need_reset_handler(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t drv_ack, drv_presence; unsigned long ack_timeout; /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */ ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); while (1) { qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); if ((drv_ack & drv_presence) == drv_presence) break; if (time_after_eq(jiffies, ack_timeout)) { ql_log(ql_log_warn, vha, 0xb067, "RESET ACK TIMEOUT! drv_presence=0x%x " "drv_ack=0x%x\n", drv_presence, drv_ack); /* * The function(s) which did not ack in time are forced * to withdraw any further participation in the IDC * reset. */ if (drv_ack != drv_presence) qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE, drv_ack); break; } qla83xx_idc_unlock(vha, 0); msleep(1000); qla83xx_idc_lock(vha, 0); } qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD); ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n"); } static int qla83xx_device_bootstrap(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; uint32_t idc_control; qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING); ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n"); /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */ __qla83xx_get_idc_control(vha, &idc_control); idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET; __qla83xx_set_idc_control(vha, 0); qla83xx_idc_unlock(vha, 0); rval = qla83xx_restart_nic_firmware(vha); qla83xx_idc_lock(vha, 0); if (rval != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0xb06a, "Failed to restart NIC f/w.\n"); qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n"); } else { ql_dbg(ql_dbg_p3p, vha, 0xb06c, "Success in restarting nic f/w.\n"); qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n"); } return rval; } /* Assumes idc_lock always held on entry */ int qla83xx_idc_state_handler(scsi_qla_host_t *base_vha) { struct qla_hw_data *ha = base_vha->hw; int rval = QLA_SUCCESS; unsigned long dev_init_timeout; uint32_t dev_state; /* Wait for MAX-INIT-TIMEOUT for the device to go ready */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); while (1) { if (time_after_eq(jiffies, dev_init_timeout)) { ql_log(ql_log_warn, base_vha, 0xb06e, "Initialization TIMEOUT!\n"); /* Init timeout. Disable further NIC Core * communication. */ qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED); ql_log(ql_log_info, base_vha, 0xb06f, "HW State: FAILED.\n"); } qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state); switch (dev_state) { case QLA8XXX_DEV_READY: if (ha->flags.nic_core_reset_owner) qla83xx_idc_audit(base_vha, IDC_AUDIT_COMPLETION); ha->flags.nic_core_reset_owner = 0; ql_dbg(ql_dbg_p3p, base_vha, 0xb070, "Reset_owner reset by 0x%x.\n", ha->portnum); goto exit; case QLA8XXX_DEV_COLD: if (ha->flags.nic_core_reset_owner) rval = qla83xx_device_bootstrap(base_vha); else { /* Wait for AEN to change device-state */ qla83xx_idc_unlock(base_vha, 0); msleep(1000); qla83xx_idc_lock(base_vha, 0); } break; case QLA8XXX_DEV_INITIALIZING: /* Wait for AEN to change device-state */ qla83xx_idc_unlock(base_vha, 0); msleep(1000); qla83xx_idc_lock(base_vha, 0); break; case QLA8XXX_DEV_NEED_RESET: if (!ql2xdontresethba && ha->flags.nic_core_reset_owner) qla83xx_need_reset_handler(base_vha); else { /* Wait for AEN to change device-state */ qla83xx_idc_unlock(base_vha, 0); msleep(1000); qla83xx_idc_lock(base_vha, 0); } /* reset timeout value after need reset handler */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); break; case QLA8XXX_DEV_NEED_QUIESCENT: /* XXX: DEBUG for now */ qla83xx_idc_unlock(base_vha, 0); msleep(1000); qla83xx_idc_lock(base_vha, 0); break; case QLA8XXX_DEV_QUIESCENT: /* XXX: DEBUG for now */ if (ha->flags.quiesce_owner) goto exit; qla83xx_idc_unlock(base_vha, 0); msleep(1000); qla83xx_idc_lock(base_vha, 0); dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); break; case QLA8XXX_DEV_FAILED: if (ha->flags.nic_core_reset_owner) qla83xx_idc_audit(base_vha, IDC_AUDIT_COMPLETION); ha->flags.nic_core_reset_owner = 0; __qla83xx_clear_drv_presence(base_vha); qla83xx_idc_unlock(base_vha, 0); qla8xxx_dev_failed_handler(base_vha); rval = QLA_FUNCTION_FAILED; qla83xx_idc_lock(base_vha, 0); goto exit; case QLA8XXX_BAD_VALUE: qla83xx_idc_unlock(base_vha, 0); msleep(1000); qla83xx_idc_lock(base_vha, 0); break; default: ql_log(ql_log_warn, base_vha, 0xb071, "Unknown Device State: %x.\n", dev_state); qla83xx_idc_unlock(base_vha, 0); qla8xxx_dev_failed_handler(base_vha); rval = QLA_FUNCTION_FAILED; qla83xx_idc_lock(base_vha, 0); goto exit; } } exit: return rval; } void qla2x00_disable_board_on_pci_error(struct work_struct *work) { struct qla_hw_data *ha = container_of(work, struct qla_hw_data, board_disable); struct pci_dev *pdev = ha->pdev; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); ql_log(ql_log_warn, base_vha, 0x015b, "Disabling adapter.\n"); if (!atomic_read(&pdev->enable_cnt)) { ql_log(ql_log_info, base_vha, 0xfffc, "PCI device disabled, no action req for PCI error=%lx\n", base_vha->pci_flags); return; } /* * if UNLOADING flag is already set, then continue unload, * where it was set first. */ if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) return; qla2x00_wait_for_sess_deletion(base_vha); qla2x00_delete_all_vps(ha, base_vha); qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); qla2x00_dfs_remove(base_vha); qla84xx_put_chip(base_vha); if (base_vha->timer_active) qla2x00_stop_timer(base_vha); base_vha->flags.online = 0; qla2x00_destroy_deferred_work(ha); /* * Do not try to stop beacon blink as it will issue a mailbox * command. */ qla2x00_free_sysfs_attr(base_vha, false); fc_remove_host(base_vha->host); scsi_remove_host(base_vha->host); base_vha->flags.init_done = 0; qla25xx_delete_queues(base_vha); qla2x00_free_fcports(base_vha); qla2x00_free_irqs(base_vha); qla2x00_mem_free(ha); qla82xx_md_free(base_vha); qla2x00_free_queues(ha); qla2x00_unmap_iobases(ha); pci_release_selected_regions(ha->pdev, ha->bars); pci_disable_device(pdev); /* * Let qla2x00_remove_one cleanup qla_hw_data on device removal. */ } /************************************************************************** * qla2x00_do_dpc * This kernel thread is a task that is schedule by the interrupt handler * to perform the background processing for interrupts. * * Notes: * This task always run in the context of a kernel thread. It * is kick-off by the driver's detect code and starts up * up one per adapter. It immediately goes to sleep and waits for * some fibre event. When either the interrupt handler or * the timer routine detects a event it will one of the task * bits then wake us up. **************************************************************************/ static int qla2x00_do_dpc(void *data) { scsi_qla_host_t *base_vha; struct qla_hw_data *ha; uint32_t online; struct qla_qpair *qpair; ha = (struct qla_hw_data *)data; base_vha = pci_get_drvdata(ha->pdev); set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { ql_dbg(ql_dbg_dpc, base_vha, 0x4000, "DPC handler sleeping.\n"); schedule(); if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags)) qla_pci_set_eeh_busy(base_vha); if (!base_vha->flags.init_done || ha->flags.mbox_busy) goto end_loop; if (ha->flags.eeh_busy) { ql_dbg(ql_dbg_dpc, base_vha, 0x4003, "eeh_busy=%d.\n", ha->flags.eeh_busy); goto end_loop; } ha->dpc_active = 1; ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, "DPC handler waking up, dpc_flags=0x%lx.\n", base_vha->dpc_flags); if (test_bit(UNLOADING, &base_vha->dpc_flags)) break; if (IS_P3P_TYPE(ha)) { if (IS_QLA8044(ha)) { if (test_and_clear_bit(ISP_UNRECOVERABLE, &base_vha->dpc_flags)) { qla8044_idc_lock(ha); qla8044_wr_direct(base_vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_FAILED); qla8044_idc_unlock(ha); ql_log(ql_log_info, base_vha, 0x4004, "HW State: FAILED.\n"); qla8044_device_state_handler(base_vha); continue; } } else { if (test_and_clear_bit(ISP_UNRECOVERABLE, &base_vha->dpc_flags)) { qla82xx_idc_lock(ha); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); qla82xx_idc_unlock(ha); ql_log(ql_log_info, base_vha, 0x0151, "HW State: FAILED.\n"); qla82xx_device_state_handler(base_vha); continue; } } if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4005, "FCoE context reset scheduled.\n"); if (!(test_and_set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags))) { if (qla82xx_fcoe_ctx_reset(base_vha)) { /* FCoE-ctx reset failed. * Escalate to chip-reset */ set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); } clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); } ql_dbg(ql_dbg_dpc, base_vha, 0x4006, "FCoE context reset end.\n"); } } else if (IS_QLAFX00(ha)) { if (test_and_clear_bit(ISP_UNRECOVERABLE, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4020, "Firmware Reset Recovery\n"); if (qlafx00_reset_initialize(base_vha)) { /* Failed. Abort isp later. */ if (!test_bit(UNLOADING, &base_vha->dpc_flags)) { set_bit(ISP_UNRECOVERABLE, &base_vha->dpc_flags); ql_dbg(ql_dbg_dpc, base_vha, 0x4021, "Reset Recovery Failed\n"); } } } if (test_and_clear_bit(FX00_TARGET_SCAN, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4022, "ISPFx00 Target Scan scheduled\n"); if (qlafx00_rescan_isp(base_vha)) { if (!test_bit(UNLOADING, &base_vha->dpc_flags)) set_bit(ISP_UNRECOVERABLE, &base_vha->dpc_flags); ql_dbg(ql_dbg_dpc, base_vha, 0x401e, "ISPFx00 Target Scan Failed\n"); } ql_dbg(ql_dbg_dpc, base_vha, 0x401f, "ISPFx00 Target Scan End\n"); } if (test_and_clear_bit(FX00_HOST_INFO_RESEND, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4023, "ISPFx00 Host Info resend scheduled\n"); qlafx00_fx_disc(base_vha, &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); } } if (test_and_clear_bit(DETECT_SFP_CHANGE, &base_vha->dpc_flags)) { /* Semantic: * - NO-OP -- await next ISP-ABORT. Preferred method * to minimize disruptions that will occur * when a forced chip-reset occurs. * - Force -- ISP-ABORT scheduled. */ /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */ } if (test_and_clear_bit (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && !test_bit(UNLOADING, &base_vha->dpc_flags)) { bool do_reset = true; switch (base_vha->qlini_mode) { case QLA2XXX_INI_MODE_ENABLED: break; case QLA2XXX_INI_MODE_DISABLED: if (!qla_tgt_mode_enabled(base_vha) && !ha->flags.fw_started) do_reset = false; break; case QLA2XXX_INI_MODE_DUAL: if (!qla_dual_mode_enabled(base_vha) && !ha->flags.fw_started) do_reset = false; break; default: break; } if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags))) { base_vha->flags.online = 1; ql_dbg(ql_dbg_dpc, base_vha, 0x4007, "ISP abort scheduled.\n"); if (ha->isp_ops->abort_isp(base_vha)) { /* failed. retry later */ set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); } clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); ql_dbg(ql_dbg_dpc, base_vha, 0x4008, "ISP abort end.\n"); } } if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) { if (atomic_read(&base_vha->loop_state) == LOOP_READY) { qla24xx_process_purex_list (&base_vha->purex_list); clear_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags); } } if (IS_QLAFX00(ha)) goto loop_resync_check; if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4009, "Quiescence mode scheduled.\n"); if (IS_P3P_TYPE(ha)) { if (IS_QLA82XX(ha)) qla82xx_device_state_handler(base_vha); if (IS_QLA8044(ha)) qla8044_device_state_handler(base_vha); clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); if (!ha->flags.quiesce_owner) { qla2x00_perform_loop_resync(base_vha); if (IS_QLA82XX(ha)) { qla82xx_idc_lock(ha); qla82xx_clear_qsnt_ready( base_vha); qla82xx_idc_unlock(ha); } else if (IS_QLA8044(ha)) { qla8044_idc_lock(ha); qla8044_clear_qsnt_ready( base_vha); qla8044_idc_unlock(ha); } } } else { clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); qla2x00_quiesce_io(base_vha); } ql_dbg(ql_dbg_dpc, base_vha, 0x400a, "Quiescence mode end.\n"); } if (test_and_clear_bit(RESET_MARKER_NEEDED, &base_vha->dpc_flags) && (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { ql_dbg(ql_dbg_dpc, base_vha, 0x400b, "Reset marker scheduled.\n"); qla2x00_rst_aen(base_vha); clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); ql_dbg(ql_dbg_dpc, base_vha, 0x400c, "Reset marker end.\n"); } /* Retry each device up to login retry count */ if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) && !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && atomic_read(&base_vha->loop_state) != LOOP_DOWN) { if (!base_vha->relogin_jif || time_after_eq(jiffies, base_vha->relogin_jif)) { base_vha->relogin_jif = jiffies + HZ; clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags); ql_dbg(ql_dbg_disc, base_vha, 0x400d, "Relogin scheduled.\n"); qla24xx_post_relogin_work(base_vha); } } loop_resync_check: if (!qla2x00_reset_active(base_vha) && test_and_clear_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags)) { /* * Allow abort_isp to complete before moving on to scanning. */ ql_dbg(ql_dbg_dpc, base_vha, 0x400f, "Loop resync scheduled.\n"); if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &base_vha->dpc_flags))) { qla2x00_loop_resync(base_vha); clear_bit(LOOP_RESYNC_ACTIVE, &base_vha->dpc_flags); } ql_dbg(ql_dbg_dpc, base_vha, 0x4010, "Loop resync end.\n"); } if (IS_QLAFX00(ha)) goto intr_on_check; if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && atomic_read(&base_vha->loop_state) == LOOP_READY) { clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); qla2xxx_flash_npiv_conf(base_vha); } intr_on_check: if (!ha->interrupts_on) ha->isp_ops->enable_intrs(ha); if (test_and_clear_bit(BEACON_BLINK_NEEDED, &base_vha->dpc_flags)) { if (ha->beacon_blink_led == 1) ha->isp_ops->beacon_blink(base_vha); } /* qpair online check */ if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED, &base_vha->dpc_flags)) { if (ha->flags.eeh_busy || ha->flags.pci_channel_io_perm_failure) online = 0; else online = 1; mutex_lock(&ha->mq_lock); list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) qpair->online = online; mutex_unlock(&ha->mq_lock); } if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) { u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold; if (threshold > ha->orig_fw_xcb_count) threshold = ha->orig_fw_xcb_count; ql_log(ql_log_info, base_vha, 0xffffff, "SET ZIO Activity exchange threshold to %d.\n", threshold); if (qla27xx_set_zio_threshold(base_vha, threshold)) { ql_log(ql_log_info, base_vha, 0xffffff, "Unable to SET ZIO Activity exchange threshold to %d.\n", threshold); } } if (!IS_QLAFX00(ha)) qla2x00_do_dpc_all_vps(base_vha); if (test_and_clear_bit(N2N_LINK_RESET, &base_vha->dpc_flags)) { qla2x00_lip_reset(base_vha); } ha->dpc_active = 0; end_loop: set_current_state(TASK_INTERRUPTIBLE); } /* End of while(1) */ __set_current_state(TASK_RUNNING); ql_dbg(ql_dbg_dpc, base_vha, 0x4011, "DPC handler exiting.\n"); /* * Make sure that nobody tries to wake us up again. */ ha->dpc_active = 0; /* Cleanup any residual CTX SRBs. */ qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); return 0; } void qla2xxx_wake_dpc(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct task_struct *t = ha->dpc_thread; if (!test_bit(UNLOADING, &vha->dpc_flags) && t) wake_up_process(t); } /* * qla2x00_rst_aen * Processes asynchronous reset. * * Input: * ha = adapter block pointer. */ static void qla2x00_rst_aen(scsi_qla_host_t *vha) { if (vha->flags.online && !vha->flags.reset_active && !atomic_read(&vha->loop_down_timer) && !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) { do { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); /* * Issue marker command only when we are going to start * the I/O. */ vha->marker_needed = 1; } while (!atomic_read(&vha->loop_down_timer) && (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags))); } } static bool qla_do_heartbeat(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; u32 cmpl_cnt; u16 i; bool do_heartbeat = false; /* * Allow do_heartbeat only if we don’t have any active interrupts, * but there are still IOs outstanding with firmware. */ cmpl_cnt = ha->base_qpair->cmd_completion_cnt; if (cmpl_cnt == ha->base_qpair->prev_completion_cnt && cmpl_cnt != ha->base_qpair->cmd_cnt) { do_heartbeat = true; goto skip; } ha->base_qpair->prev_completion_cnt = cmpl_cnt; for (i = 0; i < ha->max_qpairs; i++) { if (ha->queue_pair_map[i]) { cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt; if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt && cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) { do_heartbeat = true; break; } ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt; } } skip: return do_heartbeat; } static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started) { struct qla_hw_data *ha = vha->hw; if (vha->vp_idx) return; if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha)) return; /* * dpc thread cannot run if heartbeat is running at the same time. * We also do not want to starve heartbeat task. Therefore, do * heartbeat task at least once every 5 seconds. */ if (dpc_started && time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ)) return; if (qla_do_heartbeat(vha)) { ha->last_heartbeat_run_jiffies = jiffies; queue_work(ha->wq, &ha->heartbeat_work); } } static void qla_wind_down_chip(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (!ha->flags.eeh_busy) return; if (ha->pci_error_state) /* system is trying to recover */ return; /* * Current system is not handling PCIE error. At this point, this is * best effort to wind down the adapter. */ if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) && !ha->flags.eeh_flush) { ql_log(ql_log_info, vha, 0x9009, "PCI Error detected, attempting to reset hardware.\n"); ha->isp_ops->reset_chip(vha); ha->isp_ops->disable_intrs(ha); ha->flags.eeh_flush = EEH_FLUSH_RDY; ha->eeh_jif = jiffies; } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY && time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) { pci_clear_master(ha->pdev); /* flush all command */ qla2x00_abort_isp_cleanup(vha); ha->flags.eeh_flush = EEH_FLUSH_DONE; ql_log(ql_log_info, vha, 0x900a, "PCI Error handling complete, all IOs aborted.\n"); } } /************************************************************************** * qla2x00_timer * * Description: * One second timer * * Context: Interrupt ***************************************************************************/ void qla2x00_timer(struct timer_list *t) { scsi_qla_host_t *vha = from_timer(vha, t, timer); unsigned long cpu_flags = 0; int start_dpc = 0; int index; srb_t *sp; uint16_t w; struct qla_hw_data *ha = vha->hw; struct req_que *req; unsigned long flags; fc_port_t *fcport = NULL; if (ha->flags.eeh_busy) { qla_wind_down_chip(vha); ql_dbg(ql_dbg_timer, vha, 0x6000, "EEH = %d, restarting timer.\n", ha->flags.eeh_busy); qla2x00_restart_timer(vha, WATCH_INTERVAL); return; } /* * Hardware read to raise pending EEH errors during mailbox waits. If * the read returns -1 then disable the board. */ if (!pci_channel_offline(ha->pdev)) { pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); qla2x00_check_reg16_for_disconnect(vha, w); } /* Make sure qla82xx_watchdog is run only for physical port */ if (!vha->vp_idx && IS_P3P_TYPE(ha)) { if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) start_dpc++; if (IS_QLA82XX(ha)) qla82xx_watchdog(vha); else if (IS_QLA8044(ha)) qla8044_watchdog(vha); } if (!vha->vp_idx && IS_QLAFX00(ha)) qlafx00_timer_routine(vha); if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) vha->link_down_time++; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) fcport->tgt_link_down_time++; } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); /* Loop down handler. */ if (atomic_read(&vha->loop_down_timer) > 0 && !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) && vha->flags.online) { if (atomic_read(&vha->loop_down_timer) == vha->loop_down_abort_time) { ql_log(ql_log_info, vha, 0x6008, "Loop down - aborting the queues before time expires.\n"); if (!IS_QLA2100(ha) && vha->link_down_timeout) atomic_set(&vha->loop_state, LOOP_DEAD); /* * Schedule an ISP abort to return any FCP2-device * commands. */ /* NPIV - scan physical port only */ if (!vha->vp_idx) { spin_lock_irqsave(&ha->hardware_lock, cpu_flags); req = ha->req_q_map[0]; for (index = 1; index < req->num_outstanding_cmds; index++) { fc_port_t *sfcp; sp = req->outstanding_cmds[index]; if (!sp) continue; if (sp->cmd_type != TYPE_SRB) continue; if (sp->type != SRB_SCSI_CMD) continue; sfcp = sp->fcport; if (!(sfcp->flags & FCF_FCP2_DEVICE)) continue; if (IS_QLA82XX(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } spin_unlock_irqrestore(&ha->hardware_lock, cpu_flags); } start_dpc++; } /* if the loop has been down for 4 minutes, reinit adapter */ if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) { ql_log(ql_log_warn, vha, 0x6009, "Loop down - aborting ISP.\n"); if (IS_QLA82XX(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } } ql_dbg(ql_dbg_timer, vha, 0x600a, "Loop down - seconds remaining %d.\n", atomic_read(&vha->loop_down_timer)); } /* Check if beacon LED needs to be blinked for physical host only */ if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { /* There is no beacon_blink function for ISP82xx */ if (!IS_P3P_TYPE(ha)) { set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); start_dpc++; } } /* check if edif running */ if (vha->hw->flags.edif_enabled) qla_edif_timer(vha); /* Process any deferred work. */ if (!list_empty(&vha->work_list)) { unsigned long flags; bool q = false; spin_lock_irqsave(&vha->work_lock, flags); if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags)) q = true; spin_unlock_irqrestore(&vha->work_lock, flags); if (q) queue_work(vha->hw->wq, &vha->iocb_work); } /* * FC-NVME * see if the active AEN count has changed from what was last reported. */ index = atomic_read(&ha->nvme_active_aen_cnt); if (!vha->vp_idx && (index != ha->nvme_last_rptd_aen) && ha->zio_mode == QLA_ZIO_MODE_6 && !ha->flags.host_shutting_down) { ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); ql_log(ql_log_info, vha, 0x3002, "nvme: Sched: Set ZIO exchange threshold to %d.\n", ha->nvme_last_rptd_aen); set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); start_dpc++; } if (!vha->vp_idx && atomic_read(&ha->zio_threshold) != ha->last_zio_threshold && IS_ZIO_THRESHOLD_CAPABLE(ha)) { ql_log(ql_log_info, vha, 0x3002, "Sched: Set ZIO exchange threshold to %d.\n", ha->last_zio_threshold); ha->last_zio_threshold = atomic_read(&ha->zio_threshold); set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); start_dpc++; } qla_adjust_buf(vha); /* borrowing w to signify dpc will run */ w = 0; /* Schedule the DPC routine if needed */ if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || start_dpc || test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || test_bit(RELOGIN_NEEDED, &vha->dpc_flags) || test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) { ql_dbg(ql_dbg_timer, vha, 0x600b, "isp_abort_needed=%d loop_resync_needed=%d " "start_dpc=%d reset_marker_needed=%d", test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), start_dpc, test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); ql_dbg(ql_dbg_timer, vha, 0x600c, "beacon_blink_needed=%d isp_unrecoverable=%d " "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " "relogin_needed=%d, Process_purex_iocb=%d.\n", test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), test_bit(VP_DPC_NEEDED, &vha->dpc_flags), test_bit(RELOGIN_NEEDED, &vha->dpc_flags), test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)); qla2xxx_wake_dpc(vha); w = 1; } qla_heart_beat(vha, w); qla2x00_restart_timer(vha, WATCH_INTERVAL); } /* Firmware interface routines. */ #define FW_ISP21XX 0 #define FW_ISP22XX 1 #define FW_ISP2300 2 #define FW_ISP2322 3 #define FW_ISP24XX 4 #define FW_ISP25XX 5 #define FW_ISP81XX 6 #define FW_ISP82XX 7 #define FW_ISP2031 8 #define FW_ISP8031 9 #define FW_ISP27XX 10 #define FW_ISP28XX 11 #define FW_FILE_ISP21XX "ql2100_fw.bin" #define FW_FILE_ISP22XX "ql2200_fw.bin" #define FW_FILE_ISP2300 "ql2300_fw.bin" #define FW_FILE_ISP2322 "ql2322_fw.bin" #define FW_FILE_ISP24XX "ql2400_fw.bin" #define FW_FILE_ISP25XX "ql2500_fw.bin" #define FW_FILE_ISP81XX "ql8100_fw.bin" #define FW_FILE_ISP82XX "ql8200_fw.bin" #define FW_FILE_ISP2031 "ql2600_fw.bin" #define FW_FILE_ISP8031 "ql8300_fw.bin" #define FW_FILE_ISP27XX "ql2700_fw.bin" #define FW_FILE_ISP28XX "ql2800_fw.bin" static DEFINE_MUTEX(qla_fw_lock); static struct fw_blob qla_fw_blobs[] = { { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, { .name = FW_FILE_ISP24XX, }, { .name = FW_FILE_ISP25XX, }, { .name = FW_FILE_ISP81XX, }, { .name = FW_FILE_ISP82XX, }, { .name = FW_FILE_ISP2031, }, { .name = FW_FILE_ISP8031, }, { .name = FW_FILE_ISP27XX, }, { .name = FW_FILE_ISP28XX, }, { .name = NULL, }, }; struct fw_blob * qla2x00_request_firmware(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct fw_blob *blob; if (IS_QLA2100(ha)) { blob = &qla_fw_blobs[FW_ISP21XX]; } else if (IS_QLA2200(ha)) { blob = &qla_fw_blobs[FW_ISP22XX]; } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { blob = &qla_fw_blobs[FW_ISP2300]; } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) { blob = &qla_fw_blobs[FW_ISP2322]; } else if (IS_QLA24XX_TYPE(ha)) { blob = &qla_fw_blobs[FW_ISP24XX]; } else if (IS_QLA25XX(ha)) { blob = &qla_fw_blobs[FW_ISP25XX]; } else if (IS_QLA81XX(ha)) { blob = &qla_fw_blobs[FW_ISP81XX]; } else if (IS_QLA82XX(ha)) { blob = &qla_fw_blobs[FW_ISP82XX]; } else if (IS_QLA2031(ha)) { blob = &qla_fw_blobs[FW_ISP2031]; } else if (IS_QLA8031(ha)) { blob = &qla_fw_blobs[FW_ISP8031]; } else if (IS_QLA27XX(ha)) { blob = &qla_fw_blobs[FW_ISP27XX]; } else if (IS_QLA28XX(ha)) { blob = &qla_fw_blobs[FW_ISP28XX]; } else { return NULL; } if (!blob->name) return NULL; mutex_lock(&qla_fw_lock); if (blob->fw) goto out; if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { ql_log(ql_log_warn, vha, 0x0063, "Failed to load firmware image (%s).\n", blob->name); blob->fw = NULL; blob = NULL; } out: mutex_unlock(&qla_fw_lock); return blob; } static void qla2x00_release_firmware(void) { struct fw_blob *blob; mutex_lock(&qla_fw_lock); for (blob = qla_fw_blobs; blob->name; blob++) release_firmware(blob->fw); mutex_unlock(&qla_fw_lock); } static void qla_pci_error_cleanup(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); struct qla_qpair *qpair = NULL; struct scsi_qla_host *vp, *tvp; fc_port_t *fcport; int i; unsigned long flags; ql_dbg(ql_dbg_aer, vha, 0x9000, "%s\n", __func__); ha->chip_reset++; ha->base_qpair->chip_reset = ha->chip_reset; for (i = 0; i < ha->max_qpairs; i++) { if (ha->queue_pair_map[i]) ha->queue_pair_map[i]->chip_reset = ha->base_qpair->chip_reset; } /* * purge mailbox might take a while. Slot Reset/chip reset * will take care of the purge */ mutex_lock(&ha->mq_lock); ha->base_qpair->online = 0; list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) qpair->online = 0; wmb(); mutex_unlock(&ha->mq_lock); qla2x00_mark_all_devices_lost(vha); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_mark_all_devices_lost(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } spin_unlock_irqrestore(&ha->vport_slock, flags); /* Clear all async request states across all VPs. */ list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); list_for_each_entry(fcport, &vp->vp_fcports, list) fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } spin_unlock_irqrestore(&ha->vport_slock, flags); } static pci_ers_result_t qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { scsi_qla_host_t *vha = pci_get_drvdata(pdev); struct qla_hw_data *ha = vha->hw; pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET; ql_log(ql_log_warn, vha, 0x9000, "PCI error detected, state %x.\n", state); ha->pci_error_state = QLA_PCI_ERR_DETECTED; if (!atomic_read(&pdev->enable_cnt)) { ql_log(ql_log_info, vha, 0xffff, "PCI device is disabled,state %x\n", state); ret = PCI_ERS_RESULT_NEED_RESET; goto out; } switch (state) { case pci_channel_io_normal: qla_pci_set_eeh_busy(vha); if (ql2xmqsupport || ql2xnvmeenable) { set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } ret = PCI_ERS_RESULT_CAN_RECOVER; break; case pci_channel_io_frozen: qla_pci_set_eeh_busy(vha); ret = PCI_ERS_RESULT_NEED_RESET; break; case pci_channel_io_perm_failure: ha->flags.pci_channel_io_perm_failure = 1; qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); if (ql2xmqsupport || ql2xnvmeenable) { set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } ret = PCI_ERS_RESULT_DISCONNECT; } out: ql_dbg(ql_dbg_aer, vha, 0x600d, "PCI error detected returning [%x].\n", ret); return ret; } static pci_ers_result_t qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) { int risc_paused = 0; uint32_t stat; unsigned long flags; scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); struct qla_hw_data *ha = base_vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; ql_log(ql_log_warn, base_vha, 0x9000, "mmio enabled\n"); ha->pci_error_state = QLA_PCI_MMIO_ENABLED; if (IS_QLA82XX(ha)) return PCI_ERS_RESULT_RECOVERED; if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, base_vha, 0x803f, "During mmio enabled, PCI/Register disconnect still detected.\n"); goto out; } spin_lock_irqsave(&ha->hardware_lock, flags); if (IS_QLA2100(ha) || IS_QLA2200(ha)){ stat = rd_reg_word(&reg->hccr); if (stat & HCCR_RISC_PAUSE) risc_paused = 1; } else if (IS_QLA23XX(ha)) { stat = rd_reg_dword(&reg->u.isp2300.host_status); if (stat & HSR_RISC_PAUSED) risc_paused = 1; } else if (IS_FWI2_CAPABLE(ha)) { stat = rd_reg_dword(&reg24->host_status); if (stat & HSRX_RISC_PAUSED) risc_paused = 1; } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (risc_paused) { ql_log(ql_log_info, base_vha, 0x9003, "RISC paused -- mmio_enabled, Dumping firmware.\n"); qla2xxx_dump_fw(base_vha); } out: /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */ ql_dbg(ql_dbg_aer, base_vha, 0x600d, "mmio enabled returning.\n"); return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t qla2xxx_pci_slot_reset(struct pci_dev *pdev) { pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); struct qla_hw_data *ha = base_vha->hw; int rc; struct qla_qpair *qpair = NULL; ql_log(ql_log_warn, base_vha, 0x9004, "Slot Reset.\n"); ha->pci_error_state = QLA_PCI_SLOT_RESET; /* Workaround: qla2xxx driver which access hardware earlier * needs error state to be pci_channel_io_online. * Otherwise mailbox command timesout. */ pdev->error_state = pci_channel_io_normal; pci_restore_state(pdev); /* pci_restore_state() clears the saved_state flag of the device * save restored state which resets saved_state flag */ pci_save_state(pdev); if (ha->mem_only) rc = pci_enable_device_mem(pdev); else rc = pci_enable_device(pdev); if (rc) { ql_log(ql_log_warn, base_vha, 0x9005, "Can't re-enable PCI device after reset.\n"); goto exit_slot_reset; } if (ha->isp_ops->pci_config(base_vha)) goto exit_slot_reset; mutex_lock(&ha->mq_lock); list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) qpair->online = 1; mutex_unlock(&ha->mq_lock); ha->flags.eeh_busy = 0; base_vha->flags.online = 1; set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); ha->isp_ops->abort_isp(base_vha); clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); if (qla2x00_isp_reg_stat(ha)) { ha->flags.eeh_busy = 1; qla_pci_error_cleanup(base_vha); ql_log(ql_log_warn, base_vha, 0x9005, "Device unable to recover from PCI error.\n"); } else { ret = PCI_ERS_RESULT_RECOVERED; } exit_slot_reset: ql_dbg(ql_dbg_aer, base_vha, 0x900e, "Slot Reset returning %x.\n", ret); return ret; } static void qla2xxx_pci_resume(struct pci_dev *pdev) { scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); struct qla_hw_data *ha = base_vha->hw; int ret; ql_log(ql_log_warn, base_vha, 0x900f, "Pci Resume.\n"); ret = qla2x00_wait_for_hba_online(base_vha); if (ret != QLA_SUCCESS) { ql_log(ql_log_fatal, base_vha, 0x9002, "The device failed to resume I/O from slot/link_reset.\n"); } ha->pci_error_state = QLA_PCI_RESUME; ql_dbg(ql_dbg_aer, base_vha, 0x600d, "Pci Resume returning.\n"); } void qla_pci_set_eeh_busy(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); bool do_cleanup = false; unsigned long flags; if (ha->flags.eeh_busy) return; spin_lock_irqsave(&base_vha->work_lock, flags); if (!ha->flags.eeh_busy) { ha->eeh_jif = jiffies; ha->flags.eeh_flush = 0; ha->flags.eeh_busy = 1; do_cleanup = true; } spin_unlock_irqrestore(&base_vha->work_lock, flags); if (do_cleanup) qla_pci_error_cleanup(base_vha); } /* * this routine will schedule a task to pause IO from interrupt context * if caller sees a PCIE error event (register read = 0xf's) */ void qla_schedule_eeh_work(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); if (ha->flags.eeh_busy) return; set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags); qla2xxx_wake_dpc(base_vha); } static void qla_pci_reset_prepare(struct pci_dev *pdev) { scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); struct qla_hw_data *ha = base_vha->hw; struct qla_qpair *qpair; ql_log(ql_log_warn, base_vha, 0xffff, "%s.\n", __func__); /* * PCI FLR/function reset is about to reset the * slot. Stop the chip to stop all DMA access. * It is assumed that pci_reset_done will be called * after FLR to resume Chip operation. */ ha->flags.eeh_busy = 1; mutex_lock(&ha->mq_lock); list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) qpair->online = 0; mutex_unlock(&ha->mq_lock); set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); qla2x00_abort_isp_cleanup(base_vha); qla2x00_abort_all_cmds(base_vha, DID_RESET << 16); } static void qla_pci_reset_done(struct pci_dev *pdev) { scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); struct qla_hw_data *ha = base_vha->hw; struct qla_qpair *qpair; ql_log(ql_log_warn, base_vha, 0xffff, "%s.\n", __func__); /* * FLR just completed by PCI layer. Resume adapter */ ha->flags.eeh_busy = 0; mutex_lock(&ha->mq_lock); list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) qpair->online = 1; mutex_unlock(&ha->mq_lock); base_vha->flags.online = 1; ha->isp_ops->abort_isp(base_vha); clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); } static void qla2xxx_map_queues(struct Scsi_Host *shost) { scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) blk_mq_map_queues(qmap); else blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); } struct scsi_host_template qla2xxx_driver_template = { .module = THIS_MODULE, .name = QLA2XXX_DRIVER_NAME, .queuecommand = qla2xxx_queuecommand, .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = qla2xxx_eh_abort, .eh_should_retry_cmd = fc_eh_should_retry_cmd, .eh_device_reset_handler = qla2xxx_eh_device_reset, .eh_target_reset_handler = qla2xxx_eh_target_reset, .eh_bus_reset_handler = qla2xxx_eh_bus_reset, .eh_host_reset_handler = qla2xxx_eh_host_reset, .slave_configure = qla2xxx_slave_configure, .slave_alloc = qla2xxx_slave_alloc, .slave_destroy = qla2xxx_slave_destroy, .scan_finished = qla2xxx_scan_finished, .scan_start = qla2xxx_scan_start, .change_queue_depth = scsi_change_queue_depth, .map_queues = qla2xxx_map_queues, .this_id = -1, .cmd_per_lun = 3, .sg_tablesize = SG_ALL, .max_sectors = 0xFFFF, .shost_groups = qla2x00_host_groups, .supported_mode = MODE_INITIATOR, .track_queue_depth = 1, .cmd_size = sizeof(srb_t), }; static const struct pci_error_handlers qla2xxx_err_handler = { .error_detected = qla2xxx_pci_error_detected, .mmio_enabled = qla2xxx_pci_mmio_enabled, .slot_reset = qla2xxx_pci_slot_reset, .resume = qla2xxx_pci_resume, .reset_prepare = qla_pci_reset_prepare, .reset_done = qla_pci_reset_done, }; static struct pci_device_id qla2xxx_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) }, { 0 }, }; MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); static struct pci_driver qla2xxx_pci_driver = { .name = QLA2XXX_DRIVER_NAME, .driver = { .owner = THIS_MODULE, }, .id_table = qla2xxx_pci_tbl, .probe = qla2x00_probe_one, .remove = qla2x00_remove_one, .shutdown = qla2x00_shutdown, .err_handler = &qla2xxx_err_handler, }; static const struct file_operations apidev_fops = { .owner = THIS_MODULE, .llseek = noop_llseek, }; /** * qla2x00_module_init - Module initialization. **/ static int __init qla2x00_module_init(void) { int ret = 0; BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64); BUILD_BUG_ON(sizeof(cmd_entry_t) != 64); BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64); BUILD_BUG_ON(sizeof(cont_entry_t) != 64); BUILD_BUG_ON(sizeof(init_cb_t) != 96); BUILD_BUG_ON(sizeof(mrk_entry_t) != 64); BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64); BUILD_BUG_ON(sizeof(request_t) != 64); BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64); BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64); BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64); BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64); BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64); BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64); BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64); BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64); BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64); BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604); BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424); BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164); BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260); BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260); BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16); BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256); BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24); BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256); BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288); BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216); BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64); BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64); BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128); BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128); BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct mbx_entry) != 64); BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252); BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512); BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512); BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64); BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64); BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634); BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100); BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976); BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228); BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52); BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172); BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524); BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8); BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12); BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24); BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420); BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28); BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32); BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196); BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE); BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128); BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24); BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16); BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336); BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64); BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64); BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52); BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64); BUILD_BUG_ON(sizeof(sts21_entry_t) != 64); BUILD_BUG_ON(sizeof(sts22_entry_t) != 64); BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64); BUILD_BUG_ON(sizeof(sts_entry_t) != 64); BUILD_BUG_ON(sizeof(sw_info_t) != 32); BUILD_BUG_ON(sizeof(target_id_t) != 2); qla_trace_init(); /* Allocate cache for SRBs. */ srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, SLAB_HWCACHE_ALIGN, NULL); if (srb_cachep == NULL) { ql_log(ql_log_fatal, NULL, 0x0001, "Unable to allocate SRB cache...Failing load!.\n"); return -ENOMEM; } /* Initialize target kmem_cache and mem_pools */ ret = qlt_init(); if (ret < 0) { goto destroy_cache; } else if (ret > 0) { /* * If initiator mode is explictly disabled by qlt_init(), * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from * performing scsi_scan_target() during LOOP UP event. */ qla2xxx_transport_functions.disable_target_scan = 1; qla2xxx_transport_vport_functions.disable_target_scan = 1; } /* Derive version string. */ strcpy(qla2x00_version_str, QLA2XXX_VERSION); if (ql2xextended_error_logging) strcat(qla2x00_version_str, "-debug"); if (ql2xextended_error_logging == 1) ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; qla2xxx_transport_template = fc_attach_transport(&qla2xxx_transport_functions); if (!qla2xxx_transport_template) { ql_log(ql_log_fatal, NULL, 0x0002, "fc_attach_transport failed...Failing load!.\n"); ret = -ENODEV; goto qlt_exit; } apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); if (apidev_major < 0) { ql_log(ql_log_fatal, NULL, 0x0003, "Unable to register char device %s.\n", QLA2XXX_APIDEV); } qla2xxx_transport_vport_template = fc_attach_transport(&qla2xxx_transport_vport_functions); if (!qla2xxx_transport_vport_template) { ql_log(ql_log_fatal, NULL, 0x0004, "fc_attach_transport vport failed...Failing load!.\n"); ret = -ENODEV; goto unreg_chrdev; } ql_log(ql_log_info, NULL, 0x0005, "QLogic Fibre Channel HBA Driver: %s.\n", qla2x00_version_str); ret = pci_register_driver(&qla2xxx_pci_driver); if (ret) { ql_log(ql_log_fatal, NULL, 0x0006, "pci_register_driver failed...ret=%d Failing load!.\n", ret); goto release_vport_transport; } return ret; release_vport_transport: fc_release_transport(qla2xxx_transport_vport_template); unreg_chrdev: if (apidev_major >= 0) unregister_chrdev(apidev_major, QLA2XXX_APIDEV); fc_release_transport(qla2xxx_transport_template); qlt_exit: qlt_exit(); destroy_cache: kmem_cache_destroy(srb_cachep); qla_trace_uninit(); return ret; } /** * qla2x00_module_exit - Module cleanup. **/ static void __exit qla2x00_module_exit(void) { pci_unregister_driver(&qla2xxx_pci_driver); qla2x00_release_firmware(); kmem_cache_destroy(ctx_cachep); fc_release_transport(qla2xxx_transport_vport_template); if (apidev_major >= 0) unregister_chrdev(apidev_major, QLA2XXX_APIDEV); fc_release_transport(qla2xxx_transport_template); qlt_exit(); kmem_cache_destroy(srb_cachep); qla_trace_uninit(); } module_init(qla2x00_module_init); module_exit(qla2x00_module_exit); MODULE_AUTHOR("QLogic Corporation"); MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FW_FILE_ISP21XX); MODULE_FIRMWARE(FW_FILE_ISP22XX); MODULE_FIRMWARE(FW_FILE_ISP2300); MODULE_FIRMWARE(FW_FILE_ISP2322); MODULE_FIRMWARE(FW_FILE_ISP24XX); MODULE_FIRMWARE(FW_FILE_ISP25XX);
linux-master
drivers/scsi/qla2xxx/qla_os.c
// SPDX-License-Identifier: GPL-2.0-only /* * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx * * based on qla2x00t.c code: * * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <[email protected]> * Copyright (C) 2004 - 2005 Leonid Stoljar * Copyright (C) 2006 Nathaniel Clark <[email protected]> * Copyright (C) 2006 - 2010 ID7 Ltd. * * Forward port and refactoring to modern qla2xxx and target/configfs * * Copyright (C) 2010-2013 Nicholas A. Bellinger <[email protected]> */ #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/workqueue.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "qla_def.h" #include "qla_target.h" static int ql2xtgt_tape_enable; module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xtgt_tape_enable, "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER."); static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; module_param(qlini_mode, charp, S_IRUGO); MODULE_PARM_DESC(qlini_mode, "Determines when initiator mode will be enabled. Possible values: " "\"exclusive\" - initiator mode will be enabled on load, " "disabled on enabling target mode and then on disabling target mode " "enabled back; " "\"disabled\" - initiator mode will never be enabled; " "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " "when ready " "\"enabled\" (default) - initiator mode will always stay enabled."); int ql2xuctrlirq = 1; module_param(ql2xuctrlirq, int, 0644); MODULE_PARM_DESC(ql2xuctrlirq, "User to control IRQ placement via smp_affinity." "Valid with qlini_mode=disabled." "1(default): enable"); int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; static int qla_sam_status = SAM_STAT_BUSY; static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */ /* * From scsi/fc/fc_fcp.h */ enum fcp_resp_rsp_codes { FCP_TMF_CMPL = 0, FCP_DATA_LEN_INVALID = 1, FCP_CMND_FIELDS_INVALID = 2, FCP_DATA_PARAM_MISMATCH = 3, FCP_TMF_REJECTED = 4, FCP_TMF_FAILED = 5, FCP_TMF_INVALID_LUN = 9, }; /* * fc_pri_ta from scsi/fc/fc_fcp.h */ #define FCP_PTA_SIMPLE 0 /* simple task attribute */ #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ #define FCP_PTA_ORDERED 2 /* ordered task attribute */ #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ #define FCP_PTA_MASK 7 /* mask for task attribute field */ #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ /* * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which * must be called under HW lock and could unlock/lock it inside. * It isn't an issue, since in the current implementation on the time when * those functions are called: * * - Either context is IRQ and only IRQ handler can modify HW data, * including rings related fields, * * - Or access to target mode variables from struct qla_tgt doesn't * cross those functions boundaries, except tgt_stop, which * additionally protected by irq_cmd_count. */ /* Predefs for callbacks handed to qla2xxx LLD */ static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, struct atio_from_isp *pkt, uint8_t); static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp, response_t *pkt); static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, int fn, void *iocb, int flags); static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint16_t status, int qfull); static void qlt_disable_vha(struct scsi_qla_host *vha); static void qlt_clear_tgt_db(struct qla_tgt *tgt); static void qlt_send_notify_ack(struct qla_qpair *qpair, struct imm_ntfy_from_isp *ntfy, uint32_t add_flags, uint16_t resp_code, int resp_code_valid, uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *imm, int ha_locked); static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, fc_port_t *fcport, bool local); void qlt_unreg_sess(struct fc_port *sess); static void qlt_24xx_handle_abts(struct scsi_qla_host *, struct abts_recv_from_24xx *); static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *, uint16_t); static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t); static inline uint32_t qlt_make_handle(struct qla_qpair *); /* * Global Variables */ static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; struct kmem_cache *qla_tgt_plogi_cachep; static mempool_t *qla_tgt_mgmt_cmd_mempool; static struct workqueue_struct *qla_tgt_wq; static DEFINE_MUTEX(qla_tgt_mutex); static LIST_HEAD(qla_tgt_glist); static const char *prot_op_str(u32 prot_op) { switch (prot_op) { case TARGET_PROT_NORMAL: return "NORMAL"; case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; case TARGET_PROT_DIN_PASS: return "DIN_PASS"; case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; default: return "UNKNOWN"; } } /* This API intentionally takes dest as a parameter, rather than returning * int value to avoid caller forgetting to issue wmb() after the store */ void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) { scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); *dest = atomic_inc_return(&base_vha->generation_tick); /* memory barrier */ wmb(); } /* Might release hw lock, then reaquire!! */ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) { /* Send marker if required */ if (unlikely(vha->marker_needed != 0)) { int rc = qla2x00_issue_marker(vha, vha_locked); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt, vha, 0xe03d, "qla_target(%d): issue_marker() failed\n", vha->vp_idx); } return rc; } return QLA_SUCCESS; } struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, be_id_t d_id) { struct scsi_qla_host *host; uint32_t key; if (vha->d_id.b.area == d_id.area && vha->d_id.b.domain == d_id.domain && vha->d_id.b.al_pa == d_id.al_pa) return vha; key = be_to_port_id(d_id).b24; host = btree_lookup32(&vha->hw->host_map, key); if (!host) ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005, "Unable to find host %06x\n", key); return host; } static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) { unsigned long flags; spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); vha->hw->tgt.num_pend_cmds++; if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds) vha->qla_stats.stat_max_pend_cmds = vha->hw->tgt.num_pend_cmds; spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) { unsigned long flags; spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); vha->hw->tgt.num_pend_cmds--; spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, struct atio_from_isp *atio, uint8_t ha_locked) { struct qla_tgt_sess_op *u; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; unsigned long flags; if (tgt->tgt_stop) { ql_dbg(ql_dbg_async, vha, 0x502c, "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped", vha->vp_idx); goto out_term; } u = kzalloc(sizeof(*u), GFP_ATOMIC); if (u == NULL) goto out_term; u->vha = vha; memcpy(&u->atio, atio, sizeof(*atio)); INIT_LIST_HEAD(&u->cmd_list); spin_lock_irqsave(&vha->cmd_list_lock, flags); list_add_tail(&u->cmd_list, &vha->unknown_atio_list); spin_unlock_irqrestore(&vha->cmd_list_lock, flags); schedule_delayed_work(&vha->unknown_atio_work, 1); out: return; out_term: qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0); goto out; } static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, uint8_t ha_locked) { struct qla_tgt_sess_op *u, *t; scsi_qla_host_t *host; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; unsigned long flags; uint8_t queued = 0; list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { if (u->aborted) { ql_dbg(ql_dbg_async, vha, 0x502e, "Freeing unknown %s %p, because of Abort\n", "ATIO_TYPE7", u); qlt_send_term_exchange(vha->hw->base_qpair, NULL, &u->atio, ha_locked, 0); goto abort; } host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); if (host != NULL) { ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f, "Requeuing unknown ATIO_TYPE7 %p\n", u); qlt_24xx_atio_pkt(host, &u->atio, ha_locked); } else if (tgt->tgt_stop) { ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a, "Freeing unknown %s %p, because tgt is being stopped\n", "ATIO_TYPE7", u); qlt_send_term_exchange(vha->hw->base_qpair, NULL, &u->atio, ha_locked, 0); } else { ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d, "Reschedule u %p, vha %p, host %p\n", u, vha, host); if (!queued) { queued = 1; schedule_delayed_work(&vha->unknown_atio_work, 1); } continue; } abort: spin_lock_irqsave(&vha->cmd_list_lock, flags); list_del(&u->cmd_list); spin_unlock_irqrestore(&vha->cmd_list_lock, flags); kfree(u); } } void qlt_unknown_atio_work_fn(struct work_struct *work) { struct scsi_qla_host *vha = container_of(to_delayed_work(work), struct scsi_qla_host, unknown_atio_work); qlt_try_to_dequeue_unknown_atios(vha, 0); } static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint8_t ha_locked) { ql_dbg(ql_dbg_tgt, vha, 0xe072, "%s: qla_target(%d): type %x ox_id %04x\n", __func__, vha->vp_idx, atio->u.raw.entry_type, be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); switch (atio->u.raw.entry_type) { case ATIO_TYPE7: { struct scsi_qla_host *host = qla_find_host_by_d_id(vha, atio->u.isp24.fcp_hdr.d_id); if (unlikely(NULL == host)) { ql_dbg(ql_dbg_tgt, vha, 0xe03e, "qla_target(%d): Received ATIO_TYPE7 " "with unknown d_id %x:%x:%x\n", vha->vp_idx, atio->u.isp24.fcp_hdr.d_id.domain, atio->u.isp24.fcp_hdr.d_id.area, atio->u.isp24.fcp_hdr.d_id.al_pa); qlt_queue_unknown_atio(vha, atio, ha_locked); break; } if (unlikely(!list_empty(&vha->unknown_atio_list))) qlt_try_to_dequeue_unknown_atios(vha, ha_locked); qlt_24xx_atio_pkt(host, atio, ha_locked); break; } case IMMED_NOTIFY_TYPE: { struct scsi_qla_host *host = vha; struct imm_ntfy_from_isp *entry = (struct imm_ntfy_from_isp *)atio; qlt_issue_marker(vha, ha_locked); if ((entry->u.isp24.vp_index != 0xFF) && (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) { host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe03f, "qla_target(%d): Received " "ATIO (IMMED_NOTIFY_TYPE) " "with unknown vp_index %d\n", vha->vp_idx, entry->u.isp24.vp_index); break; } } qlt_24xx_atio_pkt(host, atio, ha_locked); break; } case VP_RPT_ID_IOCB_TYPE: qla24xx_report_id_acquisition(vha, (struct vp_rpt_id_entry_24xx *)atio); break; case ABTS_RECV_24XX: { struct abts_recv_from_24xx *entry = (struct abts_recv_from_24xx *)atio; struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, entry->vp_index); unsigned long flags; if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe00a, "qla_target(%d): Response pkt (ABTS_RECV_24XX) " "received, with unknown vp_index %d\n", vha->vp_idx, entry->vp_index); break; } if (!ha_locked) spin_lock_irqsave(&host->hw->hardware_lock, flags); qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); if (!ha_locked) spin_unlock_irqrestore(&host->hw->hardware_lock, flags); break; } /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ default: ql_dbg(ql_dbg_tgt, vha, 0xe040, "qla_target(%d): Received unknown ATIO atio " "type %x\n", vha->vp_idx, atio->u.raw.entry_type); break; } return false; } void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, struct rsp_que *rsp, response_t *pkt) { switch (pkt->entry_type) { case CTIO_CRC2: ql_dbg(ql_dbg_tgt, vha, 0xe073, "qla_target(%d):%s: CRC2 Response pkt\n", vha->vp_idx, __func__); fallthrough; case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, entry->vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe041, "qla_target(%d): Response pkt (CTIO_TYPE7) " "received, with unknown vp_index %d\n", vha->vp_idx, entry->vp_index); break; } qlt_response_pkt(host, rsp, pkt); break; } case IMMED_NOTIFY_TYPE: { struct scsi_qla_host *host; struct imm_ntfy_from_isp *entry = (struct imm_ntfy_from_isp *)pkt; host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe042, "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " "received, with unknown vp_index %d\n", vha->vp_idx, entry->u.isp24.vp_index); break; } qlt_response_pkt(host, rsp, pkt); break; } case NOTIFY_ACK_TYPE: { struct scsi_qla_host *host = vha; struct nack_to_isp *entry = (struct nack_to_isp *)pkt; if (0xFF != entry->u.isp24.vp_index) { host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe043, "qla_target(%d): Response " "pkt (NOTIFY_ACK_TYPE) " "received, with unknown " "vp_index %d\n", vha->vp_idx, entry->u.isp24.vp_index); break; } } qlt_response_pkt(host, rsp, pkt); break; } case ABTS_RECV_24XX: { struct abts_recv_from_24xx *entry = (struct abts_recv_from_24xx *)pkt; struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, entry->vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe044, "qla_target(%d): Response pkt " "(ABTS_RECV_24XX) received, with unknown " "vp_index %d\n", vha->vp_idx, entry->vp_index); break; } qlt_response_pkt(host, rsp, pkt); break; } case ABTS_RESP_24XX: { struct abts_resp_to_24xx *entry = (struct abts_resp_to_24xx *)pkt; struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, entry->vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe045, "qla_target(%d): Response pkt " "(ABTS_RECV_24XX) received, with unknown " "vp_index %d\n", vha->vp_idx, entry->vp_index); break; } qlt_response_pkt(host, rsp, pkt); break; } default: qlt_response_pkt(vha, rsp, pkt); break; } } /* * All qlt_plogi_ack_t operations are protected by hardware_lock */ static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, struct imm_ntfy_from_isp *ntfy, int type) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_NACK); if (!e) return QLA_FUNCTION_FAILED; e->u.nack.fcport = fcport; e->u.nack.type = type; memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); return qla2x00_post_work(vha, e); } static void qla2x00_async_nack_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; unsigned long flags; ql_dbg(ql_dbg_disc, vha, 0x20f2, "Async done-%s res %x %8phC type %d\n", sp->name, res, sp->fcport->port_name, sp->type); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); sp->fcport->flags &= ~FCF_ASYNC_SENT; sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset; switch (sp->type) { case SRB_NACK_PLOGI: sp->fcport->login_gen++; sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; sp->fcport->logout_on_delete = 1; sp->fcport->plogi_nack_done_deadline = jiffies + HZ; sp->fcport->send_els_logo = 0; if (sp->fcport->flags & FCF_FCSP_DEVICE) { ql_dbg(ql_dbg_edif, vha, 0x20ef, "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__, sp->fcport->port_name); qla2x00_set_fcport_disc_state(sp->fcport, DSC_LOGIN_AUTH_PEND); qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, sp->fcport->d_id.b24); qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24, 0, sp->fcport); } break; case SRB_NACK_PRLI: sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; sp->fcport->deleted = 0; sp->fcport->send_els_logo = 0; if (!sp->fcport->login_succ && !IS_SW_RESV_ADDR(sp->fcport->d_id)) { sp->fcport->login_succ = 1; vha->fcport_count++; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); qla24xx_sched_upd_fcport(sp->fcport); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); } else { sp->fcport->login_retry = 0; qla2x00_set_fcport_disc_state(sp->fcport, DSC_LOGIN_COMPLETE); sp->fcport->deleted = 0; sp->fcport->logout_on_delete = 1; } break; case SRB_NACK_LOGO: sp->fcport->login_gen++; sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); break; } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, struct imm_ntfy_from_isp *ntfy, int type) { int rval = QLA_FUNCTION_FAILED; srb_t *sp; char *c = NULL; fcport->flags |= FCF_ASYNC_SENT; switch (type) { case SRB_NACK_PLOGI: fcport->fw_login_state = DSC_LS_PLOGI_PEND; c = "PLOGI"; if (vha->hw->flags.edif_enabled && (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) fcport->flags |= FCF_FCSP_DEVICE; break; case SRB_NACK_PRLI: fcport->fw_login_state = DSC_LS_PRLI_PEND; fcport->deleted = 0; c = "PRLI"; break; case SRB_NACK_LOGO: fcport->fw_login_state = DSC_LS_LOGO_PEND; c = "LOGO"; break; } sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); if (!sp) goto done; sp->type = type; sp->name = "nack"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, qla2x00_async_nack_sp_done); sp->u.iocb_cmd.u.nack.ntfy = ntfy; ql_dbg(ql_dbg_disc, vha, 0x20f4, "Async-%s %8phC hndl %x %s\n", sp->name, fcport->port_name, sp->handle, c); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~FCF_ASYNC_SENT; return rval; } void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) { fc_port_t *t; switch (e->u.nack.type) { case SRB_NACK_PRLI: t = e->u.nack.fcport; flush_work(&t->del_work); flush_work(&t->free_work); mutex_lock(&vha->vha_tgt.tgt_mutex); t = qlt_create_sess(vha, e->u.nack.fcport, 0); mutex_unlock(&vha->vha_tgt.tgt_mutex); if (t) { ql_log(ql_log_info, vha, 0xd034, "%s create sess success %p", __func__, t); /* create sess has an extra kref */ vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); } break; } qla24xx_async_notify_ack(vha, e->u.nack.fcport, (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type); } void qla24xx_delete_sess_fn(struct work_struct *work) { fc_port_t *fcport = container_of(work, struct fc_port, del_work); struct qla_hw_data *ha = NULL; if (!fcport || !fcport->vha || !fcport->vha->hw) return; ha = fcport->vha->hw; if (fcport->se_sess) { ha->tgt.tgt_ops->shutdown_sess(fcport); ha->tgt.tgt_ops->put_sess(fcport); } else { qlt_unreg_sess(fcport); } } /* * Called from qla2x00_reg_remote_port() */ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct fc_port *sess = fcport; unsigned long flags; if (!vha->hw->tgt.tgt_ops) return; spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (tgt->tgt_stop) { spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; } if (fcport->disc_state == DSC_DELETE_PEND) { spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; } if (!sess->se_sess) { spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); mutex_lock(&vha->vha_tgt.tgt_mutex); sess = qlt_create_sess(vha, fcport, false); mutex_unlock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->tgt.sess_lock, flags); } else { if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; } if (!kref_get_unless_zero(&sess->sess_kref)) { ql_dbg(ql_dbg_disc, vha, 0x2107, "%s: kref_get fail sess %8phC \n", __func__, sess->port_name); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, "qla_target(%u): %ssession for port %8phC " "(loop ID %d) reappeared\n", vha->vp_idx, sess->local ? "local " : "", sess->port_name, sess->loop_id); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, "Reappeared sess %p\n", sess); ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, (fcport->flags & FCF_CONF_COMP_SUPPORTED)); } if (sess && sess->local) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, "qla_target(%u): local session for " "port %8phC (loop ID %d) became global\n", vha->vp_idx, fcport->port_name, sess->loop_id); sess->local = 0; } spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); ha->tgt.tgt_ops->put_sess(sess); } /* * This is a zero-base ref-counting solution, since hardware_lock * guarantees that ref_count is not modified concurrently. * Upon successful return content of iocb is undefined */ static struct qlt_plogi_ack_t * qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, struct imm_ntfy_from_isp *iocb) { struct qlt_plogi_ack_t *pla; lockdep_assert_held(&vha->hw->hardware_lock); list_for_each_entry(pla, &vha->plogi_ack_list, list) { if (pla->id.b24 == id->b24) { ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d, "%s %d %8phC Term INOT due to new INOT", __func__, __LINE__, pla->iocb.u.isp24.port_name); qlt_send_term_imm_notif(vha, &pla->iocb, 1); memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); return pla; } } pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC); if (!pla) { ql_dbg(ql_dbg_async, vha, 0x5088, "qla_target(%d): Allocation of plogi_ack failed\n", vha->vp_idx); return NULL; } memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); pla->id = *id; list_add_tail(&pla->list, &vha->plogi_ack_list); return pla; } void qlt_plogi_ack_unref(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla) { struct imm_ntfy_from_isp *iocb = &pla->iocb; port_id_t port_id; uint16_t loop_id; fc_port_t *fcport = pla->fcport; BUG_ON(!pla->ref_count); pla->ref_count--; if (pla->ref_count) return; ql_dbg(ql_dbg_disc, vha, 0x5089, "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], le16_to_cpu(iocb->u.isp24.nport_handle), iocb->u.isp24.exchange_address, iocb->ox_id); port_id.b.domain = iocb->u.isp24.port_id[2]; port_id.b.area = iocb->u.isp24.port_id[1]; port_id.b.al_pa = iocb->u.isp24.port_id[0]; port_id.b.rsvd_1 = 0; loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); fcport->loop_id = loop_id; fcport->d_id = port_id; if (iocb->u.isp24.status_subcode == ELS_PLOGI) qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); else qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; } list_del(&pla->list); kmem_cache_free(qla_tgt_plogi_cachep, pla); } void qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, struct fc_port *sess, enum qlt_plogi_link_t link) { struct imm_ntfy_from_isp *iocb = &pla->iocb; /* Inc ref_count first because link might already be pointing at pla */ pla->ref_count++; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", sess, link, sess->port_name, iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], pla->ref_count, pla, link); if (link == QLT_PLOGI_LINK_CONFLICT) { switch (sess->disc_state) { case DSC_DELETED: case DSC_DELETE_PEND: pla->ref_count--; return; default: break; } } if (sess->plogi_link[link]) qlt_plogi_ack_unref(vha, sess->plogi_link[link]); if (link == QLT_PLOGI_LINK_SAME_WWN) pla->fcport = sess; sess->plogi_link[link] = pla; } typedef struct { /* These fields must be initialized by the caller */ port_id_t id; /* * number of cmds dropped while we were waiting for * initiator to ack LOGO initialize to 1 if LOGO is * triggered by a command, otherwise, to 0 */ int cmd_count; /* These fields are used by callee */ struct list_head list; } qlt_port_logo_t; static void qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) { qlt_port_logo_t *tmp; int res; if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { res = 0; goto out; } mutex_lock(&vha->vha_tgt.tgt_mutex); list_for_each_entry(tmp, &vha->logo_list, list) { if (tmp->id.b24 == logo->id.b24) { tmp->cmd_count += logo->cmd_count; mutex_unlock(&vha->vha_tgt.tgt_mutex); return; } } list_add_tail(&logo->list, &vha->logo_list); mutex_unlock(&vha->vha_tgt.tgt_mutex); res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id); mutex_lock(&vha->vha_tgt.tgt_mutex); list_del(&logo->list); mutex_unlock(&vha->vha_tgt.tgt_mutex); out: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, logo->cmd_count, res); } void qlt_free_session_done(struct work_struct *work) { struct fc_port *sess = container_of(work, struct fc_port, free_work); struct qla_tgt *tgt = sess->tgt; struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags; bool logout_started = false; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); struct qlt_plogi_ack_t *own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; ql_dbg(ql_dbg_disc, vha, 0xf084, "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, sess->logout_on_delete, sess->keep_nport_handle, sess->send_els_logo); if (!IS_SW_RESV_ADDR(sess->d_id)) { qla2x00_mark_device_lost(vha, sess, 0); if (sess->send_els_logo) { qlt_port_logo_t logo; logo.id = sess->d_id; logo.cmd_count = 0; INIT_LIST_HEAD(&logo.list); if (!own) qlt_send_first_logo(vha, &logo); sess->send_els_logo = 0; } if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { int rc; if (!own || (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) { sess->logout_completed = 0; rc = qla2x00_post_async_logout_work(vha, sess, NULL); if (rc != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0xf085, "Schedule logo failed sess %p rc %d\n", sess, rc); else logout_started = true; } else if (own && (own->iocb.u.isp24.status_subcode == ELS_PRLI) && ha->flags.rida_fmt2) { rc = qla2x00_post_async_prlo_work(vha, sess, NULL); if (rc != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0xf085, "Schedule PRLO failed sess %p rc %d\n", sess, rc); else logout_started = true; } } /* if sess->logout_on_delete */ if (sess->nvme_flag & NVME_FLAG_REGISTERED && !(sess->nvme_flag & NVME_FLAG_DELETING)) { sess->nvme_flag |= NVME_FLAG_DELETING; qla_nvme_unregister_remote_port(sess); } if (ha->flags.edif_enabled && (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) { sess->edif.authok = 0; if (!ha->flags.host_shutting_down) { ql_dbg(ql_dbg_edif, vha, 0x911e, "%s wwpn %8phC calling qla2x00_release_all_sadb\n", __func__, sess->port_name); qla2x00_release_all_sadb(vha, sess); } else { ql_dbg(ql_dbg_edif, vha, 0x911e, "%s bypassing release_all_sadb\n", __func__); } qla_edif_clear_appdata(vha, sess); qla_edif_sess_down(vha, sess); } } /* * Release the target session for FC Nexus from fabric module code. */ if (sess->se_sess != NULL) ha->tgt.tgt_ops->free_session(sess); if (logout_started) { bool traced = false; u16 cnt = 0; while (!READ_ONCE(sess->logout_completed)) { if (!traced) { ql_dbg(ql_dbg_disc, vha, 0xf086, "%s: waiting for sess %p logout\n", __func__, sess); traced = true; } msleep(100); cnt++; /* * Driver timeout is set to 22 Sec, update count value to loop * long enough for log-out to complete before advancing. Otherwise, * straddling logout can interfere with re-login attempt. */ if (cnt > 230) break; } ql_dbg(ql_dbg_disc, vha, 0xf087, "%s: sess %p logout completed\n", __func__, sess); } if (sess->logo_ack_needed) { sess->logo_ack_needed = 0; qla24xx_async_notify_ack(vha, sess, (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); } spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (sess->se_sess) { sess->se_sess = NULL; if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) tgt->sess_count--; } qla2x00_set_fcport_disc_state(sess, DSC_DELETED); sess->fw_login_state = DSC_LS_PORT_UNAVAIL; if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { vha->fcport_count--; sess->login_succ = 0; } qla2x00_clear_loop_id(sess); if (sess->conflict) { sess->conflict->login_pause = 0; sess->conflict = NULL; if (!test_bit(UNLOADING, &vha->dpc_flags)) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } { struct qlt_plogi_ack_t *con = sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; struct imm_ntfy_from_isp *iocb; own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; if (con) { iocb = &con->iocb; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, "se_sess %p / sess %p port %8phC is gone," " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", sess->se_sess, sess, sess->port_name, own ? "releasing own PLOGI" : "no own PLOGI pending", own ? own->ref_count : -1, iocb->u.isp24.port_name, con->ref_count); qlt_plogi_ack_unref(vha, con); sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", sess->se_sess, sess, sess->port_name, own ? "releasing own PLOGI" : "no own PLOGI pending", own ? own->ref_count : -1); } if (own) { sess->fw_login_state = DSC_LS_PLOGI_PEND; qlt_plogi_ack_unref(vha, own); sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; } } sess->explicit_logout = 0; spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); qla2x00_dfs_remove_rport(vha, sess); spin_lock_irqsave(&vha->work_lock, flags); sess->flags &= ~FCF_ASYNC_SENT; sess->deleted = QLA_SESS_DELETED; sess->free_pending = 0; spin_unlock_irqrestore(&vha->work_lock, flags); ql_dbg(ql_dbg_disc, vha, 0xf001, "Unregistration of sess %p %8phC finished fcp_cnt %d\n", sess, sess->port_name, vha->fcport_count); if (tgt && (tgt->sess_count == 0)) wake_up_all(&tgt->waitQ); if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) && !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) && (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { switch (vha->host->active_mode) { case MODE_INITIATOR: case MODE_DUAL: set_bit(RELOGIN_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); break; case MODE_TARGET: default: /* no-op */ break; } } if (vha->fcport_count == 0) wake_up_all(&vha->fcport_waitQ); } /* ha->tgt.sess_lock supposed to be held on entry */ void qlt_unreg_sess(struct fc_port *sess) { struct scsi_qla_host *vha = sess->vha; unsigned long flags; ql_dbg(ql_dbg_disc, sess->vha, 0x210a, "%s sess %p for deletion %8phC\n", __func__, sess, sess->port_name); spin_lock_irqsave(&sess->vha->work_lock, flags); if (sess->free_pending) { spin_unlock_irqrestore(&sess->vha->work_lock, flags); return; } sess->free_pending = 1; /* * Use FCF_ASYNC_SENT flag to block other cmds used in sess * management from being sent. */ sess->flags |= FCF_ASYNC_SENT; sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; spin_unlock_irqrestore(&sess->vha->work_lock, flags); if (sess->se_sess) vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); sess->last_rscn_gen = sess->rscn_gen; sess->last_login_gen = sess->login_gen; queue_work(sess->vha->hw->wq, &sess->free_work); } EXPORT_SYMBOL(qlt_unreg_sess); static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) { struct qla_hw_data *ha = vha->hw; struct fc_port *sess = NULL; uint16_t loop_id; int res = 0; struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; unsigned long flags; loop_id = le16_to_cpu(n->u.isp24.nport_handle); if (loop_id == 0xFFFF) { /* Global event */ atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); spin_lock_irqsave(&ha->tgt.sess_lock, flags); qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } else { spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } ql_dbg(ql_dbg_tgt, vha, 0xe000, "Using sess for qla_tgt_reset: %p\n", sess); if (!sess) { res = -ESRCH; return res; } ql_dbg(ql_dbg_tgt, vha, 0xe047, "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " "loop_id %d)\n", vha->host_no, sess, sess->port_name, mcmd, loop_id); return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); } static void qla24xx_chk_fcp_state(struct fc_port *sess) { if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) { sess->logout_on_delete = 0; sess->logo_ack_needed = 0; sess->fw_login_state = DSC_LS_PORT_UNAVAIL; } } void qlt_schedule_sess_for_deletion(struct fc_port *sess) { struct qla_tgt *tgt = sess->tgt; unsigned long flags; u16 sec; switch (sess->disc_state) { case DSC_DELETE_PEND: return; case DSC_DELETED: if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) { if (tgt && tgt->tgt_stop && tgt->sess_count == 0) wake_up_all(&tgt->waitQ); if (sess->vha->fcport_count == 0) wake_up_all(&sess->vha->fcport_waitQ); return; } break; case DSC_UPD_FCPORT: /* * This port is not done reporting to upper layer. * let it finish */ sess->next_disc_state = DSC_DELETE_PEND; sec = jiffies_to_msecs(jiffies - sess->jiffies_at_registration)/1000; if (sess->sec_since_registration < sec && sec && !(sec % 5)) { sess->sec_since_registration = sec; ql_dbg(ql_dbg_disc, sess->vha, 0xffff, "%s %8phC : Slow Rport registration(%d Sec)\n", __func__, sess->port_name, sec); } return; default: break; } spin_lock_irqsave(&sess->vha->work_lock, flags); if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { spin_unlock_irqrestore(&sess->vha->work_lock, flags); return; } sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; spin_unlock_irqrestore(&sess->vha->work_lock, flags); sess->prli_pend_timer = 0; qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); qla24xx_chk_fcp_state(sess); ql_dbg(ql_log_warn, sess->vha, 0xe001, "Scheduling sess %p for deletion %8phC fc4_type %x\n", sess, sess->port_name, sess->fc4_type); WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); } static void qlt_clear_tgt_db(struct qla_tgt *tgt) { struct fc_port *sess; scsi_qla_host_t *vha = tgt->vha; list_for_each_entry(sess, &vha->vp_fcports, list) { if (sess->se_sess) qlt_schedule_sess_for_deletion(sess); } /* At this point tgt could be already dead */ } static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id, uint16_t *loop_id) { struct qla_hw_data *ha = vha->hw; dma_addr_t gid_list_dma; struct gid_list_info *gid_list, *gid; int res, rc, i; uint16_t entries; gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), &gid_list_dma, GFP_KERNEL); if (!gid_list) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, "qla_target(%d): DMA Alloc failed of %u\n", vha->vp_idx, qla2x00_gid_list_size(ha)); return -ENOMEM; } /* Get list of logged in devices */ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, "qla_target(%d): get_id_list() failed: %x\n", vha->vp_idx, rc); res = -EBUSY; goto out_free_id_list; } gid = gid_list; res = -ENOENT; for (i = 0; i < entries; i++) { if (gid->al_pa == s_id.al_pa && gid->area == s_id.area && gid->domain == s_id.domain) { *loop_id = le16_to_cpu(gid->loop_id); res = 0; break; } gid = (void *)gid + ha->gid_list_info_size; } out_free_id_list: dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), gid_list, gid_list_dma); return res; } /* * Adds an extra ref to allow to drop hw lock after adding sess to the list. * Caller must put it. */ static struct fc_port *qlt_create_sess( struct scsi_qla_host *vha, fc_port_t *fcport, bool local) { struct qla_hw_data *ha = vha->hw; struct fc_port *sess = fcport; unsigned long flags; if (vha->vha_tgt.qla_tgt->tgt_stop) return NULL; if (fcport->se_sess) { if (!kref_get_unless_zero(&sess->sess_kref)) { ql_dbg(ql_dbg_disc, vha, 0x20f6, "%s: kref_get_unless_zero failed for %8phC\n", __func__, sess->port_name); return NULL; } return fcport; } sess->tgt = vha->vha_tgt.qla_tgt; sess->local = local; /* * Under normal circumstances we want to logout from firmware when * session eventually ends and release corresponding nport handle. * In the exception cases (e.g. when new PLOGI is waiting) corresponding * code will adjust these flags as necessary. */ sess->logout_on_delete = 1; sess->keep_nport_handle = 0; sess->logout_completed = 0; if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, &fcport->port_name[0], sess) < 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015, "(%d) %8phC check_initiator_node_acl failed\n", vha->vp_idx, fcport->port_name); return NULL; } else { kref_init(&fcport->sess_kref); /* * Take an extra reference to ->sess_kref here to handle * fc_port access across ->tgt.sess_lock reaquire. */ if (!kref_get_unless_zero(&sess->sess_kref)) { ql_dbg(ql_dbg_disc, vha, 0x20f7, "%s: kref_get_unless_zero failed for %8phC\n", __func__, sess->port_name); return NULL; } spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (!IS_SW_RESV_ADDR(sess->d_id)) vha->vha_tgt.qla_tgt->sess_count++; qlt_do_generation_tick(vha, &sess->generation); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, "Adding sess %p se_sess %p to tgt %p sess_count %d\n", sess, sess->se_sess, vha->vha_tgt.qla_tgt, vha->vha_tgt.qla_tgt->sess_count); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " "s_id %x:%x:%x, confirmed completion %ssupported) added\n", vha->vp_idx, local ? "local " : "", fcport->port_name, fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); return sess; } /* * max_gen - specifies maximum session generation * at which this deletion requestion is still valid */ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct fc_port *sess = fcport; unsigned long flags; if (!vha->hw->tgt.tgt_ops) return; if (!tgt) return; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); if (tgt->tgt_stop) { spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return; } if (!sess->se_sess) { spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return; } if (max_gen - sess->generation < 0) { spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, "Ignoring stale deletion request for se_sess %p / sess %p" " for port %8phC, req_gen %d, sess_gen %d\n", sess->se_sess, sess, sess->port_name, max_gen, sess->generation); return; } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); sess->local = 1; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); qlt_schedule_sess_for_deletion(sess); } static inline int test_tgt_sess_count(struct qla_tgt *tgt) { struct qla_hw_data *ha = tgt->ha; unsigned long flags; int res; /* * We need to protect against race, when tgt is freed before or * inside wake_up() */ spin_lock_irqsave(&ha->tgt.sess_lock, flags); ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, "tgt %p, sess_count=%d\n", tgt, tgt->sess_count); res = (tgt->sess_count == 0); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return res; } /* Called by tcm_qla2xxx configfs code */ int qlt_stop_phase1(struct qla_tgt *tgt) { struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = tgt->ha; unsigned long flags; mutex_lock(&ha->optrom_mutex); mutex_lock(&qla_tgt_mutex); if (tgt->tgt_stop || tgt->tgt_stopped) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, "Already in tgt->tgt_stop or tgt_stopped state\n"); mutex_unlock(&qla_tgt_mutex); mutex_unlock(&ha->optrom_mutex); return -EPERM; } ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", vha->host_no, vha); /* * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. * Lock is needed, because we still can get an incoming packet. */ mutex_lock(&vha->vha_tgt.tgt_mutex); tgt->tgt_stop = 1; qlt_clear_tgt_db(tgt); mutex_unlock(&vha->vha_tgt.tgt_mutex); mutex_unlock(&qla_tgt_mutex); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, "Waiting for sess works (tgt %p)", tgt); spin_lock_irqsave(&tgt->sess_work_lock, flags); do { spin_unlock_irqrestore(&tgt->sess_work_lock, flags); flush_work(&tgt->sess_work); spin_lock_irqsave(&tgt->sess_work_lock, flags); } while (!list_empty(&tgt->sess_works_list)); spin_unlock_irqrestore(&tgt->sess_work_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); /* Big hammer */ if (!ha->flags.host_shutting_down && (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) qlt_disable_vha(vha); /* Wait for sessions to clear out (just in case) */ wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); mutex_unlock(&ha->optrom_mutex); return 0; } EXPORT_SYMBOL(qlt_stop_phase1); /* Called by tcm_qla2xxx configfs code */ void qlt_stop_phase2(struct qla_tgt *tgt) { scsi_qla_host_t *vha = tgt->vha; if (tgt->tgt_stopped) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, "Already in tgt->tgt_stopped state\n"); dump_stack(); return; } if (!tgt->tgt_stop) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, "%s: phase1 stop is not completed\n", __func__); dump_stack(); return; } mutex_lock(&tgt->ha->optrom_mutex); mutex_lock(&vha->vha_tgt.tgt_mutex); tgt->tgt_stop = 0; tgt->tgt_stopped = 1; mutex_unlock(&vha->vha_tgt.tgt_mutex); mutex_unlock(&tgt->ha->optrom_mutex); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", tgt); switch (vha->qlini_mode) { case QLA2XXX_INI_MODE_EXCLUSIVE: vha->flags.online = 1; set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; default: break; } } EXPORT_SYMBOL(qlt_stop_phase2); /* Called from qlt_remove_target() -> qla2x00_remove_one() */ static void qlt_release(struct qla_tgt *tgt) { scsi_qla_host_t *vha = tgt->vha; void *node; u64 key = 0; u16 i; struct qla_qpair_hint *h; struct qla_hw_data *ha = vha->hw; if (!tgt->tgt_stop && !tgt->tgt_stopped) qlt_stop_phase1(tgt); if (!tgt->tgt_stopped) qlt_stop_phase2(tgt); for (i = 0; i < vha->hw->max_qpairs + 1; i++) { unsigned long flags; h = &tgt->qphints[i]; if (h->qpair) { spin_lock_irqsave(h->qpair->qp_lock_ptr, flags); list_del(&h->hint_elem); spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags); h->qpair = NULL; } } kfree(tgt->qphints); mutex_lock(&qla_tgt_mutex); list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); mutex_unlock(&qla_tgt_mutex); btree_for_each_safe64(&tgt->lun_qpair_map, key, node) btree_remove64(&tgt->lun_qpair_map, key); btree_destroy64(&tgt->lun_qpair_map); if (vha->vp_idx) if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target && vha->vha_tgt.target_lport_ptr) ha->tgt.tgt_ops->remove_target(vha); vha->vha_tgt.qla_tgt = NULL; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, "Release of tgt %p finished\n", tgt); kfree(tgt); } /* ha->hardware_lock supposed to be held on entry */ static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, const void *param, unsigned int param_size) { struct qla_tgt_sess_work_param *prm; unsigned long flags; prm = kzalloc(sizeof(*prm), GFP_ATOMIC); if (!prm) { ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, "qla_target(%d): Unable to create session " "work, command will be refused", 0); return -ENOMEM; } ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, "Scheduling work (type %d, prm %p)" " to find session for param %p (size %d, tgt %p)\n", type, prm, param, param_size, tgt); prm->type = type; memcpy(&prm->tm_iocb, param, param_size); spin_lock_irqsave(&tgt->sess_work_lock, flags); list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); spin_unlock_irqrestore(&tgt->sess_work_lock, flags); schedule_work(&tgt->sess_work); return 0; } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_send_notify_ack(struct qla_qpair *qpair, struct imm_ntfy_from_isp *ntfy, uint32_t add_flags, uint16_t resp_code, int resp_code_valid, uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) { struct scsi_qla_host *vha = qpair->vha; struct qla_hw_data *ha = vha->hw; request_t *pkt; struct nack_to_isp *nack; if (!ha->flags.fw_started) return; ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); if (!pkt) { ql_dbg(ql_dbg_tgt, vha, 0xe049, "qla_target(%d): %s failed: unable to allocate " "request packet\n", vha->vp_idx, __func__); return; } if (vha->vha_tgt.qla_tgt != NULL) vha->vha_tgt.qla_tgt->notify_ack_expected++; pkt->entry_type = NOTIFY_ACK_TYPE; pkt->entry_count = 1; nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { nack->u.isp24.flags = ntfy->u.isp24.flags & cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); } nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; nack->u.isp24.status = ntfy->u.isp24.status; nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); nack->u.isp24.srr_reject_code = srr_reject_code; nack->u.isp24.srr_reject_code_expl = srr_explan; nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; /* TODO qualify this with EDIF enable */ if (ntfy->u.isp24.status_subcode == ELS_PLOGI && (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP); } ql_dbg(ql_dbg_tgt, vha, 0xe005, "qla_target(%d): Sending 24xx Notify Ack %d\n", vha->vp_idx, nack->u.isp24.status); /* Memory Barrier */ wmb(); qla2x00_start_iocbs(vha, qpair->req); } static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd) { struct scsi_qla_host *vha = mcmd->vha; struct qla_hw_data *ha = vha->hw; struct abts_resp_to_24xx *resp; __le32 f_ctl; uint32_t h; uint8_t *p; int rc; struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts; struct qla_qpair *qpair = mcmd->qpair; ql_dbg(ql_dbg_tgt, vha, 0xe006, "Sending task mgmt ABTS response (ha=%p, status=%x)\n", ha, mcmd->fc_tm_rsp); rc = qlt_check_reserve_free_req(qpair, 1); if (rc) { ql_dbg(ql_dbg_tgt, vha, 0xe04a, "qla_target(%d): %s failed: unable to allocate request packet\n", vha->vp_idx, __func__); return -EAGAIN; } resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr; memset(resp, 0, sizeof(*resp)); h = qlt_make_handle(qpair); if (unlikely(h == QLA_TGT_NULL_HANDLE)) { /* * CTIO type 7 from the firmware doesn't provide a way to * know the initiator's LOOP ID, hence we can't find * the session and, so, the command. */ return -EAGAIN; } else { qpair->req->outstanding_cmds[h] = (srb_t *)mcmd; } resp->handle = make_handle(qpair->req->id, h); resp->entry_type = ABTS_RESP_24XX; resp->entry_count = 1; resp->nport_handle = abts->nport_handle; resp->vp_index = vha->vp_idx; resp->sof_type = abts->sof_type; resp->exchange_address = abts->exchange_address; resp->fcp_hdr_le = abts->fcp_hdr_le; f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | F_CTL_LAST_SEQ | F_CTL_END_SEQ | F_CTL_SEQ_INITIATIVE); p = (uint8_t *)&f_ctl; resp->fcp_hdr_le.f_ctl[0] = *p++; resp->fcp_hdr_le.f_ctl[1] = *p++; resp->fcp_hdr_le.f_ctl[2] = *p; resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) { resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; resp->payload.ba_acct.low_seq_cnt = 0x0000; resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; } else { resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; resp->payload.ba_rjt.reason_code = BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; /* Other bytes are zero */ } vha->vha_tgt.qla_tgt->abts_resp_expected++; /* Memory Barrier */ wmb(); if (qpair->reqq_start_iocbs) qpair->reqq_start_iocbs(qpair); else qla2x00_start_iocbs(vha, qpair->req); return rc; } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, struct abts_recv_from_24xx *abts, uint32_t status, bool ids_reversed) { struct scsi_qla_host *vha = qpair->vha; struct qla_hw_data *ha = vha->hw; struct abts_resp_to_24xx *resp; __le32 f_ctl; uint8_t *p; ql_dbg(ql_dbg_tgt, vha, 0xe006, "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", ha, abts, status); resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL); if (!resp) { ql_dbg(ql_dbg_tgt, vha, 0xe04a, "qla_target(%d): %s failed: unable to allocate " "request packet", vha->vp_idx, __func__); return; } resp->entry_type = ABTS_RESP_24XX; resp->handle = QLA_TGT_SKIP_HANDLE; resp->entry_count = 1; resp->nport_handle = abts->nport_handle; resp->vp_index = vha->vp_idx; resp->sof_type = abts->sof_type; resp->exchange_address = abts->exchange_address; resp->fcp_hdr_le = abts->fcp_hdr_le; f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | F_CTL_LAST_SEQ | F_CTL_END_SEQ | F_CTL_SEQ_INITIATIVE); p = (uint8_t *)&f_ctl; resp->fcp_hdr_le.f_ctl[0] = *p++; resp->fcp_hdr_le.f_ctl[1] = *p++; resp->fcp_hdr_le.f_ctl[2] = *p; if (ids_reversed) { resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id; resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id; } else { resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; } resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; if (status == FCP_TMF_CMPL) { resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; resp->payload.ba_acct.low_seq_cnt = 0x0000; resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; } else { resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; resp->payload.ba_rjt.reason_code = BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; /* Other bytes are zero */ } vha->vha_tgt.qla_tgt->abts_resp_expected++; /* Memory Barrier */ wmb(); if (qpair->reqq_start_iocbs) qpair->reqq_start_iocbs(qpair); else qla2x00_start_iocbs(vha, qpair->req); } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd) { struct ctio7_to_24xx *ctio; u16 tmp; struct abts_recv_from_24xx *entry; ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL); if (ctio == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe04b, "qla_target(%d): %s failed: unable to allocate " "request packet\n", vha->vp_idx, __func__); return; } if (mcmd) /* abts from remote port */ entry = &mcmd->orig_iocb.abts; else /* abts from this driver. */ entry = (struct abts_recv_from_24xx *)pkt; /* * We've got on entrance firmware's response on by us generated * ABTS response. So, in it ID fields are reversed. */ ctio->entry_type = CTIO_TYPE7; ctio->entry_count = 1; ctio->nport_handle = entry->nport_handle; ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio->vp_index = vha->vp_idx; ctio->exchange_addr = entry->exchange_addr_to_abort; tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); if (mcmd) { ctio->initiator_id = entry->fcp_hdr_le.s_id; if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) tmp |= (mcmd->abort_io_attr << 9); else if (qpair->retry_term_cnt & 1) tmp |= (0x4 << 9); } else { ctio->initiator_id = entry->fcp_hdr_le.d_id; if (qpair->retry_term_cnt & 1) tmp |= (0x4 << 9); } ctio->u.status1.flags = cpu_to_le16(tmp); ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; ql_dbg(ql_dbg_tgt, vha, 0xe007, "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n", le16_to_cpu(ctio->u.status1.flags), le16_to_cpu(ctio->u.status1.ox_id), (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0); /* Memory Barrier */ wmb(); if (qpair->reqq_start_iocbs) qpair->reqq_start_iocbs(qpair); else qla2x00_start_iocbs(vha, qpair->req); if (mcmd) qlt_build_abts_resp_iocb(mcmd); else qlt_24xx_send_abts_resp(qpair, (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true); } /* drop cmds for the given lun * XXX only looks for cmds on the port through which lun reset was recieved * XXX does not go through the list of other port (which may have cmds * for the same lun) */ static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id) { struct qla_tgt_sess_op *op; struct qla_tgt_cmd *cmd; uint32_t key; unsigned long flags; key = sid_to_key(s_id); spin_lock_irqsave(&vha->cmd_list_lock, flags); list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { uint32_t op_key; u64 op_lun; op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); op_lun = scsilun_to_int( (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); if (op_key == key && op_lun == lun) op->aborted = true; } list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { uint32_t cmd_key; u64 cmd_lun; cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); cmd_lun = scsilun_to_int( (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); if (cmd_key == key && cmd_lun == lun) cmd->aborted = 1; } spin_unlock_irqrestore(&vha->cmd_list_lock, flags); } static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha, uint64_t unpacked_lun) { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_qpair_hint *h = NULL; if (vha->flags.qpairs_available) { h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun); if (!h) h = &tgt->qphints[0]; } else { h = &tgt->qphints[0]; } return h; } static void qlt_do_tmr_work(struct work_struct *work) { struct qla_tgt_mgmt_cmd *mcmd = container_of(work, struct qla_tgt_mgmt_cmd, work); struct qla_hw_data *ha = mcmd->vha->hw; int rc; uint32_t tag; unsigned long flags; switch (mcmd->tmr_func) { case QLA_TGT_ABTS: tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort); break; default: tag = 0; break; } rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun, mcmd->tmr_func, tag); if (rc != 0) { spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags); switch (mcmd->tmr_func) { case QLA_TGT_ABTS: mcmd->fc_tm_rsp = FCP_TMF_REJECTED; qlt_build_abts_resp_iocb(mcmd); break; case QLA_TGT_LUN_RESET: case QLA_TGT_CLEAR_TS: case QLA_TGT_ABORT_TS: case QLA_TGT_CLEAR_ACA: case QLA_TGT_TARGET_RESET: qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio, qla_sam_status); break; case QLA_TGT_ABORT_ALL: case QLA_TGT_NEXUS_LOSS_SESS: case QLA_TGT_NEXUS_LOSS: qlt_send_notify_ack(mcmd->qpair, &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); break; } spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags); ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052, "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", mcmd->vha->vp_idx, rc); mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); } } /* ha->hardware_lock supposed to be held on entry */ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct abts_recv_from_24xx *abts, struct fc_port *sess) { struct qla_hw_data *ha = vha->hw; struct qla_tgt_mgmt_cmd *mcmd; struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; struct qla_tgt_cmd *abort_cmd; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, "qla_target(%d): task abort (tag=%d)\n", vha->vp_idx, abts->exchange_addr_to_abort); mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); if (mcmd == NULL) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, "qla_target(%d): %s: Allocation of ABORT cmd failed", vha->vp_idx, __func__); return -ENOMEM; } memset(mcmd, 0, sizeof(*mcmd)); mcmd->cmd_type = TYPE_TGT_TMCMD; mcmd->sess = sess; memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); mcmd->reset_count = ha->base_qpair->chip_reset; mcmd->tmr_func = QLA_TGT_ABTS; mcmd->qpair = h->qpair; mcmd->vha = vha; /* * LUN is looked up by target-core internally based on the passed * abts->exchange_addr_to_abort tag. */ mcmd->se_cmd.cpuid = h->cpuid; abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, le32_to_cpu(abts->exchange_addr_to_abort)); if (!abort_cmd) { mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); return -EIO; } mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun; if (abort_cmd->qpair) { mcmd->qpair = abort_cmd->qpair; mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid; mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr; mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID; } INIT_WORK(&mcmd->work, qlt_do_tmr_work); queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work); return 0; } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct abts_recv_from_24xx *abts) { struct qla_hw_data *ha = vha->hw; struct fc_port *sess; uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort); be_id_t s_id; int rc; unsigned long flags; if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, "qla_target(%d): ABTS: Abort Sequence not " "supported\n", vha->vp_idx); qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, false); return; } if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, "qla_target(%d): ABTS: Unknown Exchange " "Address received\n", vha->vp_idx); qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, false); return; } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, "qla_target(%d): task abort (s_id=%x:%x:%x, " "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain, abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag, le32_to_cpu(abts->fcp_hdr_le.parameter)); s_id = le_id_to_be(abts->fcp_hdr_le.s_id); spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); if (!sess) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, "qla_target(%d): task abort for non-existent session\n", vha->vp_idx); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, false); return; } spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); if (sess->deleted) { qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, false); return; } rc = __qlt_24xx_handle_abts(vha, abts, sess); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", vha->vp_idx, rc); qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, false); return; } } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) { struct scsi_qla_host *ha = mcmd->vha; struct atio_from_isp *atio = &mcmd->orig_iocb.atio; struct ctio7_to_24xx *ctio; uint16_t temp; ql_dbg(ql_dbg_tgt, ha, 0xe008, "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", ha, atio, resp_code); ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL); if (ctio == NULL) { ql_dbg(ql_dbg_tgt, ha, 0xe04c, "qla_target(%d): %s failed: unable to allocate " "request packet\n", ha->vp_idx, __func__); return; } ctio->entry_type = CTIO_TYPE7; ctio->entry_count = 1; ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id); ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio->vp_index = ha->vp_idx; ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); ctio->exchange_addr = atio->u.isp24.exchange_addr; temp = (atio->u.isp24.attr << 9)| CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; ctio->u.status1.flags = cpu_to_le16(temp); temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); ctio->u.status1.ox_id = cpu_to_le16(temp); ctio->u.status1.scsi_status = cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); ctio->u.status1.response_len = cpu_to_le16(8); ctio->u.status1.sense_data[0] = resp_code; /* Memory Barrier */ wmb(); if (qpair->reqq_start_iocbs) qpair->reqq_start_iocbs(qpair); else qla2x00_start_iocbs(ha, qpair->req); } void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) { mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); } EXPORT_SYMBOL(qlt_free_mcmd); /* * ha->hardware_lock supposed to be held on entry. Might drop it, then * reacquire */ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) { struct atio_from_isp *atio = &cmd->atio; struct ctio7_to_24xx *ctio; uint16_t temp; struct scsi_qla_host *vha = cmd->vha; ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " "sense_key=%02x, asc=%02x, ascq=%02x", vha, atio, scsi_status, sense_key, asc, ascq); ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); if (!ctio) { ql_dbg(ql_dbg_async, vha, 0x3067, "qla2x00t(%ld): %s failed: unable to allocate request packet", vha->host_no, __func__); goto out; } ctio->entry_type = CTIO_TYPE7; ctio->entry_count = 1; ctio->handle = QLA_TGT_SKIP_HANDLE; ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id); ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio->vp_index = vha->vp_idx; ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); ctio->exchange_addr = atio->u.isp24.exchange_addr; temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; ctio->u.status1.flags = cpu_to_le16(temp); temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); ctio->u.status1.ox_id = cpu_to_le16(temp); ctio->u.status1.scsi_status = cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); ctio->u.status1.response_len = cpu_to_le16(18); ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); if (ctio->u.status1.residual != 0) ctio->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER); /* Fixed format sense data. */ ctio->u.status1.sense_data[0] = 0x70; ctio->u.status1.sense_data[2] = sense_key; /* Additional sense length */ ctio->u.status1.sense_data[7] = 0xa; /* ASC and ASCQ */ ctio->u.status1.sense_data[12] = asc; ctio->u.status1.sense_data[13] = ascq; /* Memory Barrier */ wmb(); if (qpair->reqq_start_iocbs) qpair->reqq_start_iocbs(qpair); else qla2x00_start_iocbs(vha, qpair->req); out: return; } /* callback from target fabric module code */ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) { struct scsi_qla_host *vha = mcmd->sess->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags; struct qla_qpair *qpair = mcmd->qpair; bool free_mcmd = true; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, "TM response mcmd (%p) status %#x state %#x", mcmd, mcmd->fc_tm_rsp, mcmd->flags); spin_lock_irqsave(qpair->qp_lock_ptr, flags); if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) { /* * Either the port is not online or this request was from * previous life, just abort the processing. */ ql_dbg(ql_dbg_async, vha, 0xe100, "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), mcmd->reset_count, qpair->chip_reset); ha->tgt.tgt_ops->free_mcmd(mcmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return; } if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) { case ELS_LOGO: case ELS_PRLO: case ELS_TPRLO: ql_dbg(ql_dbg_disc, vha, 0x2106, "TM response logo %8phC status %#x state %#x", mcmd->sess->port_name, mcmd->fc_tm_rsp, mcmd->flags); qlt_schedule_sess_for_deletion(mcmd->sess); break; default: qlt_send_notify_ack(vha->hw->base_qpair, &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); break; } } else { if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) { qlt_build_abts_resp_iocb(mcmd); free_mcmd = false; } else qlt_24xx_send_task_mgmt_ctio(qpair, mcmd, mcmd->fc_tm_rsp); } /* * Make the callback for ->free_mcmd() to queue_work() and invoke * target_put_sess_cmd() to drop cmd_kref to 1. The final * target_put_sess_cmd() call will be made from TFO->check_stop_free() * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> * qlt_xmit_tm_rsp() returns here.. */ if (free_mcmd) ha->tgt.tgt_ops->free_mcmd(mcmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); } EXPORT_SYMBOL(qlt_xmit_tm_rsp); /* No locks */ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) { struct qla_tgt_cmd *cmd = prm->cmd; BUG_ON(cmd->sg_cnt == 0); prm->sg = (struct scatterlist *)cmd->sg; prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); if (unlikely(prm->seg_cnt == 0)) goto out_err; prm->cmd->sg_mapped = 1; if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { /* * If greater than four sg entries then we need to allocate * the continuation entries */ if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX) prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - QLA_TGT_DATASEGS_PER_CMD_24XX, QLA_TGT_DATASEGS_PER_CONT_24XX); } else { /* DIF */ if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); prm->tot_dsds = prm->seg_cnt; } else prm->tot_dsds = prm->seg_cnt; if (cmd->prot_sg_cnt) { prm->prot_sg = cmd->prot_sg; prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt, cmd->dma_data_direction); if (unlikely(prm->prot_seg_cnt == 0)) goto out_err; if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { /* Dif Bundling not support here */ prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); prm->tot_dsds += prm->prot_seg_cnt; } else prm->tot_dsds += prm->prot_seg_cnt; } } return 0; out_err: ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d, "qla_target(%d): PCI mapping failed: sg_cnt=%d", 0, prm->cmd->sg_cnt); return -1; } static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) { struct qla_hw_data *ha; struct qla_qpair *qpair; if (!cmd->sg_mapped) return; qpair = cmd->qpair; dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); cmd->sg_mapped = 0; if (cmd->prot_sg_cnt) dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt, cmd->dma_data_direction); if (!cmd->ctx) return; ha = vha->hw; if (cmd->ctx_dsd_alloced) qla2x00_clean_dsd_pool(ha, cmd->ctx); dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); } static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t req_cnt) { uint32_t cnt; struct req_que *req = qpair->req; if (req->cnt < (req_cnt + 2)) { cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr : rd_reg_dword_relaxed(req->req_q_out)); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); if (unlikely(req->cnt < (req_cnt + 2))) return -EAGAIN; } req->cnt -= req_cnt; return 0; } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static inline void *qlt_get_req_pkt(struct req_que *req) { /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else { req->ring_ptr++; } return (cont_entry_t *)req->ring_ptr; } /* ha->hardware_lock supposed to be held on entry */ static inline uint32_t qlt_make_handle(struct qla_qpair *qpair) { uint32_t h; int index; uint8_t found = 0; struct req_que *req = qpair->req; h = req->current_outstanding_cmd; for (index = 1; index < req->num_outstanding_cmds; index++) { h++; if (h == req->num_outstanding_cmds) h = 1; if (h == QLA_TGT_SKIP_HANDLE) continue; if (!req->outstanding_cmds[h]) { found = 1; break; } } if (found) { req->current_outstanding_cmd = h; } else { ql_dbg(ql_dbg_io, qpair->vha, 0x305b, "qla_target(%d): Ran out of empty cmd slots\n", qpair->vha->vp_idx); h = QLA_TGT_NULL_HANDLE; } return h; } /* ha->hardware_lock supposed to be held on entry */ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) { uint32_t h; struct ctio7_to_24xx *pkt; struct atio_from_isp *atio = &prm->cmd->atio; uint16_t temp; struct qla_tgt_cmd *cmd = prm->cmd; pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; prm->pkt = pkt; memset(pkt, 0, sizeof(*pkt)); pkt->entry_type = CTIO_TYPE7; pkt->entry_count = (uint8_t)prm->req_cnt; pkt->vp_index = prm->cmd->vp_idx; h = qlt_make_handle(qpair); if (unlikely(h == QLA_TGT_NULL_HANDLE)) { /* * CTIO type 7 from the firmware doesn't provide a way to * know the initiator's LOOP ID, hence we can't find * the session and, so, the command. */ return -EAGAIN; } else qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; pkt->handle = make_handle(qpair->req->id, h); pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); pkt->exchange_addr = atio->u.isp24.exchange_addr; temp = atio->u.isp24.attr << 9; pkt->u.status0.flags |= cpu_to_le16(temp); temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); pkt->u.status0.ox_id = cpu_to_le16(temp); pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); if (cmd->edif) { if (cmd->dma_data_direction == DMA_TO_DEVICE) prm->cmd->sess->edif.rx_bytes += cmd->bufflen; if (cmd->dma_data_direction == DMA_FROM_DEVICE) prm->cmd->sess->edif.tx_bytes += cmd->bufflen; pkt->u.status0.edif_flags |= EF_EN_EDIF; } return 0; } /* * ha->hardware_lock supposed to be held on entry. We have already made sure * that there is sufficient amount of request entries to not drop it. */ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm) { int cnt; struct dsd64 *cur_dsd; /* Build continuation packets */ while (prm->seg_cnt > 0) { cont_a64_entry_t *cont_pkt64 = (cont_a64_entry_t *)qlt_get_req_pkt( prm->cmd->qpair->req); /* * Make sure that from cont_pkt64 none of * 64-bit specific fields used for 32-bit * addressing. Cast to (cont_entry_t *) for * that. */ memset(cont_pkt64, 0, sizeof(*cont_pkt64)); cont_pkt64->entry_count = 1; cont_pkt64->sys_define = 0; cont_pkt64->entry_type = CONTINUE_A64_TYPE; cur_dsd = cont_pkt64->dsd; /* Load continuation entry data segments */ for (cnt = 0; cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt; cnt++, prm->seg_cnt--) { append_dsd64(&cur_dsd, prm->sg); prm->sg = sg_next(prm->sg); } } } /* * ha->hardware_lock supposed to be held on entry. We have already made sure * that there is sufficient amount of request entries to not drop it. */ static void qlt_load_data_segments(struct qla_tgt_prm *prm) { int cnt; struct dsd64 *cur_dsd; struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); /* Setup packet address segment pointer */ cur_dsd = &pkt24->u.status0.dsd; /* Set total data segment count */ if (prm->seg_cnt) pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); if (prm->seg_cnt == 0) { /* No data transfer */ cur_dsd->address = 0; cur_dsd->length = 0; return; } /* If scatter gather */ /* Load command entry data segments */ for (cnt = 0; (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt; cnt++, prm->seg_cnt--) { append_dsd64(&cur_dsd, prm->sg); prm->sg = sg_next(prm->sg); } qlt_load_cont_data_segments(prm); } static inline int qlt_has_data(struct qla_tgt_cmd *cmd) { return cmd->bufflen > 0; } static void qlt_print_dif_err(struct qla_tgt_prm *prm) { struct qla_tgt_cmd *cmd; struct scsi_qla_host *vha; /* asc 0x10=dif error */ if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { cmd = prm->cmd; vha = cmd->vha; /* ASCQ */ switch (prm->sense_buffer[13]) { case 1: ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b, "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " "se_cmd=%p tag[%x]", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr); break; case 2: ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c, "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " "se_cmd=%p tag[%x]", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr); break; case 3: ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f, "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " "se_cmd=%p tag[%x]", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr); break; default: ql_dbg(ql_dbg_tgt_dif, vha, 0xe010, "BE detected Dif ERR: lba[%llx|%lld] len[%x] " "se_cmd=%p tag[%x]", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr); break; } ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16); } } /* * Called without ha->hardware_lock held */ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, uint32_t *full_req_cnt) { struct se_cmd *se_cmd = &cmd->se_cmd; struct qla_qpair *qpair = cmd->qpair; prm->cmd = cmd; prm->tgt = cmd->tgt; prm->pkt = NULL; prm->rq_result = scsi_status; prm->sense_buffer = &cmd->sense_buffer[0]; prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; prm->sg = NULL; prm->seg_cnt = -1; prm->req_cnt = 1; prm->residual = 0; prm->add_status_pkt = 0; prm->prot_sg = NULL; prm->prot_seg_cnt = 0; prm->tot_dsds = 0; if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { if (qlt_pci_map_calc_cnt(prm) != 0) return -EAGAIN; } *full_req_cnt = prm->req_cnt; if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { prm->residual = se_cmd->residual_count; ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c, "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); prm->rq_result |= SS_RESIDUAL_UNDER; } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { prm->residual = se_cmd->residual_count; ql_dbg_qp(ql_dbg_io, qpair, 0x305d, "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); prm->rq_result |= SS_RESIDUAL_OVER; } if (xmit_type & QLA_TGT_XMIT_STATUS) { /* * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be * ignored in *xmit_response() below */ if (qlt_has_data(cmd)) { if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || (IS_FWI2_CAPABLE(cmd->vha->hw) && (prm->rq_result != 0))) { prm->add_status_pkt = 1; (*full_req_cnt)++; } } } return 0; } static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd, int sending_sense) { if (cmd->qpair->enable_class_2) return 0; if (sending_sense) return cmd->conf_compl_supported; else return cmd->qpair->enable_explicit_conf && cmd->conf_compl_supported; } static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, struct qla_tgt_prm *prm) { prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, (uint32_t)sizeof(ctio->u.status1.sense_data)); ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); if (qlt_need_explicit_conf(prm->cmd, 0)) { ctio->u.status0.flags |= cpu_to_le16( CTIO7_FLAGS_EXPLICIT_CONFORM | CTIO7_FLAGS_CONFORM_REQ); } ctio->u.status0.residual = cpu_to_le32(prm->residual); ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { int i; if (qlt_need_explicit_conf(prm->cmd, 1)) { if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017, "Skipping EXPLICIT_CONFORM and " "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " "non GOOD status\n"); goto skip_explict_conf; } ctio->u.status1.flags |= cpu_to_le16( CTIO7_FLAGS_EXPLICIT_CONFORM | CTIO7_FLAGS_CONFORM_REQ); } skip_explict_conf: ctio->u.status1.flags &= ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); ctio->u.status1.flags |= cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); ctio->u.status1.scsi_status |= cpu_to_le16(SS_SENSE_LEN_VALID); ctio->u.status1.sense_length = cpu_to_le16(prm->sense_buffer_len); for (i = 0; i < prm->sense_buffer_len/4; i++) { uint32_t v; v = get_unaligned_be32( &((uint32_t *)prm->sense_buffer)[i]); put_unaligned_le32(v, &((uint32_t *)ctio->u.status1.sense_data)[i]); } qlt_print_dif_err(prm); } else { ctio->u.status1.flags &= ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); ctio->u.status1.flags |= cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); ctio->u.status1.sense_length = 0; memset(ctio->u.status1.sense_data, 0, sizeof(ctio->u.status1.sense_data)); } /* Sense with len > 24, is it possible ??? */ } static inline int qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) { switch (se_cmd->prot_op) { case TARGET_PROT_DOUT_INSERT: case TARGET_PROT_DIN_STRIP: if (ql2xenablehba_err_chk >= 1) return 1; break; case TARGET_PROT_DOUT_PASS: case TARGET_PROT_DIN_PASS: if (ql2xenablehba_err_chk >= 2) return 1; break; case TARGET_PROT_DIN_INSERT: case TARGET_PROT_DOUT_STRIP: return 1; default: break; } return 0; } static inline int qla_tgt_ref_mask_check(struct se_cmd *se_cmd) { switch (se_cmd->prot_op) { case TARGET_PROT_DIN_INSERT: case TARGET_PROT_DOUT_INSERT: case TARGET_PROT_DIN_STRIP: case TARGET_PROT_DOUT_STRIP: case TARGET_PROT_DIN_PASS: case TARGET_PROT_DOUT_PASS: return 1; default: return 0; } return 0; } /* * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command */ static void qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, uint16_t *pfw_prot_opts) { struct se_cmd *se_cmd = &cmd->se_cmd; uint32_t lba = 0xffffffff & se_cmd->t_task_lba; scsi_qla_host_t *vha = cmd->tgt->vha; struct qla_hw_data *ha = vha->hw; uint32_t t32 = 0; /* * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 * have been immplemented by TCM, before AppTag is avail. * Look for modesense_handlers[] */ ctx->app_tag = 0; ctx->app_tag_mask[0] = 0x0; ctx->app_tag_mask[1] = 0x0; if (IS_PI_UNINIT_CAPABLE(ha)) { if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; } t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); switch (se_cmd->prot_type) { case TARGET_DIF_TYPE0_PROT: /* * No check for ql2xenablehba_err_chk, as it * would be an I/O error if hba tag generation * is not done. */ ctx->ref_tag = cpu_to_le32(lba); /* enable ALL bytes of the ref tag */ ctx->ref_tag_mask[0] = 0xff; ctx->ref_tag_mask[1] = 0xff; ctx->ref_tag_mask[2] = 0xff; ctx->ref_tag_mask[3] = 0xff; break; case TARGET_DIF_TYPE1_PROT: /* * For TYPE 1 protection: 16 bit GUARD tag, 32 bit * REF tag, and 16 bit app tag. */ ctx->ref_tag = cpu_to_le32(lba); if (!qla_tgt_ref_mask_check(se_cmd) || !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; break; } /* enable ALL bytes of the ref tag */ ctx->ref_tag_mask[0] = 0xff; ctx->ref_tag_mask[1] = 0xff; ctx->ref_tag_mask[2] = 0xff; ctx->ref_tag_mask[3] = 0xff; break; case TARGET_DIF_TYPE2_PROT: /* * For TYPE 2 protection: 16 bit GUARD + 32 bit REF * tag has to match LBA in CDB + N */ ctx->ref_tag = cpu_to_le32(lba); if (!qla_tgt_ref_mask_check(se_cmd) || !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; break; } /* enable ALL bytes of the ref tag */ ctx->ref_tag_mask[0] = 0xff; ctx->ref_tag_mask[1] = 0xff; ctx->ref_tag_mask[2] = 0xff; ctx->ref_tag_mask[3] = 0xff; break; case TARGET_DIF_TYPE3_PROT: /* For TYPE 3 protection: 16 bit GUARD only */ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; break; } } static inline int qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) { struct dsd64 *cur_dsd; uint32_t transfer_length = 0; uint32_t data_bytes; uint32_t dif_bytes; uint8_t bundling = 1; struct crc_context *crc_ctx_pkt = NULL; struct qla_hw_data *ha; struct ctio_crc2_to_fw *pkt; dma_addr_t crc_ctx_dma; uint16_t fw_prot_opts = 0; struct qla_tgt_cmd *cmd = prm->cmd; struct se_cmd *se_cmd = &cmd->se_cmd; uint32_t h; struct atio_from_isp *atio = &prm->cmd->atio; struct qla_tc_param tc; uint16_t t16; scsi_qla_host_t *vha = cmd->vha; ha = vha->hw; pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr; prm->pkt = pkt; memset(pkt, 0, sizeof(*pkt)); ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071, "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op, prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) bundling = 0; /* Compute dif len and adjust data len to incude protection */ data_bytes = cmd->bufflen; dif_bytes = (data_bytes / cmd->blk_sz) * 8; switch (se_cmd->prot_op) { case TARGET_PROT_DIN_INSERT: case TARGET_PROT_DOUT_STRIP: transfer_length = data_bytes; if (cmd->prot_sg_cnt) data_bytes += dif_bytes; break; case TARGET_PROT_DIN_STRIP: case TARGET_PROT_DOUT_INSERT: case TARGET_PROT_DIN_PASS: case TARGET_PROT_DOUT_PASS: transfer_length = data_bytes + dif_bytes; break; default: BUG(); break; } if (!qlt_hba_err_chk_enabled(se_cmd)) fw_prot_opts |= 0x10; /* Disable Guard tag checking */ /* HBA error checking enabled */ else if (IS_PI_UNINIT_CAPABLE(ha)) { if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) fw_prot_opts |= PO_DIS_VALD_APP_ESC; else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; } switch (se_cmd->prot_op) { case TARGET_PROT_DIN_INSERT: case TARGET_PROT_DOUT_INSERT: fw_prot_opts |= PO_MODE_DIF_INSERT; break; case TARGET_PROT_DIN_STRIP: case TARGET_PROT_DOUT_STRIP: fw_prot_opts |= PO_MODE_DIF_REMOVE; break; case TARGET_PROT_DIN_PASS: case TARGET_PROT_DOUT_PASS: fw_prot_opts |= PO_MODE_DIF_PASS; /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ break; default:/* Normal Request */ fw_prot_opts |= PO_MODE_DIF_PASS; break; } /* ---- PKT ---- */ /* Update entry type to indicate Command Type CRC_2 IOCB */ pkt->entry_type = CTIO_CRC2; pkt->entry_count = 1; pkt->vp_index = cmd->vp_idx; h = qlt_make_handle(qpair); if (unlikely(h == QLA_TGT_NULL_HANDLE)) { /* * CTIO type 7 from the firmware doesn't provide a way to * know the initiator's LOOP ID, hence we can't find * the session and, so, the command. */ return -EAGAIN; } else qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; pkt->handle = make_handle(qpair->req->id, h); pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); pkt->exchange_addr = atio->u.isp24.exchange_addr; /* silence compile warning */ t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); pkt->ox_id = cpu_to_le16(t16); t16 = (atio->u.isp24.attr << 9); pkt->flags |= cpu_to_le16(t16); pkt->relative_offset = cpu_to_le32(prm->cmd->offset); /* Set transfer direction */ if (cmd->dma_data_direction == DMA_TO_DEVICE) pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN); else if (cmd->dma_data_direction == DMA_FROM_DEVICE) pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); pkt->dseg_count = cpu_to_le16(prm->tot_dsds); /* Fibre channel byte count */ pkt->transfer_length = cpu_to_le32(transfer_length); /* ----- CRC context -------- */ /* Allocate CRC context from global pool */ crc_ctx_pkt = cmd->ctx = dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); if (!crc_ctx_pkt) goto crc_queuing_error; crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); /* Set handle */ crc_ctx_pkt->handle = pkt->handle; qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address); pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); if (!bundling) { cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; } else { /* * Configure Bundling if we need to fetch interlaving * protection PCI accesses */ fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; } /* Finish the common fields of CRC pkt */ crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); crc_ctx_pkt->guard_seed = cpu_to_le16(0); memset((uint8_t *)&tc, 0 , sizeof(tc)); tc.vha = vha; tc.blk_sz = cmd->blk_sz; tc.bufflen = cmd->bufflen; tc.sg = cmd->sg; tc.prot_sg = cmd->prot_sg; tc.ctx = crc_ctx_pkt; tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; /* Walks data segments */ pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); if (!bundling && prm->prot_seg_cnt) { if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, prm->tot_dsds, &tc)) goto crc_queuing_error; } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, (prm->tot_dsds - prm->prot_seg_cnt), &tc)) goto crc_queuing_error; if (bundling && prm->prot_seg_cnt) { /* Walks dif segments */ pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, prm->prot_seg_cnt, cmd)) goto crc_queuing_error; } return QLA_SUCCESS; crc_queuing_error: /* Cleanup will be performed by the caller */ qpair->req->outstanding_cmds[h] = NULL; return QLA_FUNCTION_FAILED; } /* * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * * QLA_TGT_XMIT_STATUS for >= 24xx silicon */ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, uint8_t scsi_status) { struct scsi_qla_host *vha = cmd->vha; struct qla_qpair *qpair = cmd->qpair; struct ctio7_to_24xx *pkt; struct qla_tgt_prm prm; uint32_t full_req_cnt = 0; unsigned long flags = 0; int res; if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || (cmd->sess && cmd->sess->deleted)) { cmd->state = QLA_TGT_STATE_PROCESSED; return 0; } ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, &cmd->se_cmd, qpair->id); res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); if (unlikely(res != 0)) { return res; } spin_lock_irqsave(qpair->qp_lock_ptr, flags); if (xmit_type == QLA_TGT_XMIT_STATUS) qpair->tgt_counters.core_qla_snd_status++; else qpair->tgt_counters.core_qla_que_buf++; if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) { /* * Either the port is not online or this request was from * previous life, just abort the processing. */ cmd->state = QLA_TGT_STATE_PROCESSED; ql_dbg_qp(ql_dbg_async, qpair, 0xe101, "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, qpair->chip_reset); res = 0; goto out_unmap_unlock; } /* Does F/W have an IOCBs for this request */ res = qlt_check_reserve_free_req(qpair, full_req_cnt); if (unlikely(res)) goto out_unmap_unlock; if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) res = qlt_build_ctio_crc2_pkt(qpair, &prm); else res = qlt_24xx_build_ctio_pkt(qpair, &prm); if (unlikely(res != 0)) { qpair->req->cnt += full_req_cnt; goto out_unmap_unlock; } pkt = (struct ctio7_to_24xx *)prm.pkt; if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_IN | CTIO7_FLAGS_STATUS_MODE_0); if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) qlt_load_data_segments(&prm); if (prm.add_status_pkt == 0) { if (xmit_type & QLA_TGT_XMIT_STATUS) { pkt->u.status0.scsi_status = cpu_to_le16(prm.rq_result); if (!cmd->edif) pkt->u.status0.residual = cpu_to_le32(prm.residual); pkt->u.status0.flags |= cpu_to_le16( CTIO7_FLAGS_SEND_STATUS); if (qlt_need_explicit_conf(cmd, 0)) { pkt->u.status0.flags |= cpu_to_le16( CTIO7_FLAGS_EXPLICIT_CONFORM | CTIO7_FLAGS_CONFORM_REQ); } } } else { /* * We have already made sure that there is sufficient * amount of request entries to not drop HW lock in * req_pkt(). */ struct ctio7_to_24xx *ctio = (struct ctio7_to_24xx *)qlt_get_req_pkt( qpair->req); ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e, "Building additional status packet 0x%p.\n", ctio); /* * T10Dif: ctio_crc2_to_fw overlay ontop of * ctio7_to_24xx */ memcpy(ctio, pkt, sizeof(*ctio)); /* reset back to CTIO7 */ ctio->entry_count = 1; ctio->entry_type = CTIO_TYPE7; ctio->dseg_count = 0; ctio->u.status1.flags &= ~cpu_to_le16( CTIO7_FLAGS_DATA_IN); /* Real finish is ctio_m1's finish */ pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; pkt->u.status0.flags |= cpu_to_le16( CTIO7_FLAGS_DONT_RET_CTIO); /* qlt_24xx_init_ctio_to_isp will correct * all neccessary fields that's part of CTIO7. * There should be no residual of CTIO-CRC2 data. */ qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, &prm); } } else qlt_24xx_init_ctio_to_isp(pkt, &prm); cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ cmd->cmd_sent_to_fw = 1; cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); /* Memory Barrier */ wmb(); if (qpair->reqq_start_iocbs) qpair->reqq_start_iocbs(qpair); else qla2x00_start_iocbs(vha, qpair->req); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return 0; out_unmap_unlock: qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return res; } EXPORT_SYMBOL(qlt_xmit_response); int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) { struct ctio7_to_24xx *pkt; struct scsi_qla_host *vha = cmd->vha; struct qla_tgt *tgt = cmd->tgt; struct qla_tgt_prm prm; unsigned long flags = 0; int res = 0; struct qla_qpair *qpair = cmd->qpair; memset(&prm, 0, sizeof(prm)); prm.cmd = cmd; prm.tgt = tgt; prm.sg = NULL; prm.req_cnt = 1; if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || (cmd->sess && cmd->sess->deleted)) { /* * Either the port is not online or this request was from * previous life, just abort the processing. */ cmd->aborted = 1; cmd->write_data_transferred = 0; cmd->state = QLA_TGT_STATE_DATA_IN; vha->hw->tgt.tgt_ops->handle_data(cmd); ql_dbg_qp(ql_dbg_async, qpair, 0xe102, "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, qpair->chip_reset); return 0; } /* Calculate number of entries and segments required */ if (qlt_pci_map_calc_cnt(&prm) != 0) return -EAGAIN; spin_lock_irqsave(qpair->qp_lock_ptr, flags); /* Does F/W have an IOCBs for this request */ res = qlt_check_reserve_free_req(qpair, prm.req_cnt); if (res != 0) goto out_unlock_free_unmap; if (cmd->se_cmd.prot_op) res = qlt_build_ctio_crc2_pkt(qpair, &prm); else res = qlt_24xx_build_ctio_pkt(qpair, &prm); if (unlikely(res != 0)) { qpair->req->cnt += prm.req_cnt; goto out_unlock_free_unmap; } pkt = (struct ctio7_to_24xx *)prm.pkt; pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT | CTIO7_FLAGS_STATUS_MODE_0); if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) qlt_load_data_segments(&prm); cmd->state = QLA_TGT_STATE_NEED_DATA; cmd->cmd_sent_to_fw = 1; cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags); /* Memory Barrier */ wmb(); if (qpair->reqq_start_iocbs) qpair->reqq_start_iocbs(qpair); else qla2x00_start_iocbs(vha, qpair->req); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return res; out_unlock_free_unmap: qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return res; } EXPORT_SYMBOL(qlt_rdy_to_xfer); /* * it is assumed either hardware_lock or qpair lock is held. */ static void qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, struct ctio_crc_from_fw *sts) { uint8_t *ap = &sts->actual_dif[0]; uint8_t *ep = &sts->expected_dif[0]; uint64_t lba = cmd->se_cmd.t_task_lba; uint8_t scsi_status, sense_key, asc, ascq; unsigned long flags; struct scsi_qla_host *vha = cmd->vha; cmd->trc_flags |= TRC_DIF_ERR; cmd->a_guard = get_unaligned_be16(ap + 0); cmd->a_app_tag = get_unaligned_be16(ap + 2); cmd->a_ref_tag = get_unaligned_be32(ap + 4); cmd->e_guard = get_unaligned_be16(ep + 0); cmd->e_app_tag = get_unaligned_be16(ep + 2); cmd->e_ref_tag = get_unaligned_be32(ep + 4); ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); scsi_status = sense_key = asc = ascq = 0; /* check appl tag */ if (cmd->e_app_tag != cmd->a_app_tag) { ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d, "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); cmd->dif_err_code = DIF_ERR_APP; scsi_status = SAM_STAT_CHECK_CONDITION; sense_key = ABORTED_COMMAND; asc = 0x10; ascq = 0x2; } /* check ref tag */ if (cmd->e_ref_tag != cmd->a_ref_tag) { ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e, "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ", cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); cmd->dif_err_code = DIF_ERR_REF; scsi_status = SAM_STAT_CHECK_CONDITION; sense_key = ABORTED_COMMAND; asc = 0x10; ascq = 0x3; goto out; } /* check guard */ if (cmd->e_guard != cmd->a_guard) { ql_dbg(ql_dbg_tgt_dif, vha, 0xe012, "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); cmd->dif_err_code = DIF_ERR_GRD; scsi_status = SAM_STAT_CHECK_CONDITION; sense_key = ABORTED_COMMAND; asc = 0x10; ascq = 0x1; } out: switch (cmd->state) { case QLA_TGT_STATE_NEED_DATA: /* handle_data will load DIF error code */ cmd->state = QLA_TGT_STATE_DATA_IN; vha->hw->tgt.tgt_ops->handle_data(cmd); break; default: spin_lock_irqsave(&cmd->cmd_lock, flags); if (cmd->aborted) { spin_unlock_irqrestore(&cmd->cmd_lock, flags); vha->hw->tgt.tgt_ops->free_cmd(cmd); break; } spin_unlock_irqrestore(&cmd->cmd_lock, flags); qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc, ascq); /* assume scsi status gets out on the wire. * Will not wait for completion. */ vha->hw->tgt.tgt_ops->free_cmd(cmd); break; } } /* If hardware_lock held on entry, might drop it, then reaquire */ /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *ntfy) { struct nack_to_isp *nack; struct qla_hw_data *ha = vha->hw; request_t *pkt; int ret = 0; ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, "Sending TERM ELS CTIO (ha=%p)\n", ha); pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); if (pkt == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe080, "qla_target(%d): %s failed: unable to allocate " "request packet\n", vha->vp_idx, __func__); return -ENOMEM; } pkt->entry_type = NOTIFY_ACK_TYPE; pkt->entry_count = 1; pkt->handle = QLA_TGT_SKIP_HANDLE; nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { nack->u.isp24.flags = ntfy->u.isp24.flags & cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); } /* terminate */ nack->u.isp24.flags |= __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; nack->u.isp24.status = ntfy->u.isp24.status; nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; qla2x00_start_iocbs(vha, vha->req); return ret; } static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *imm, int ha_locked) { int rc; WARN_ON_ONCE(!ha_locked); rc = __qlt_send_term_imm_notif(vha, imm); pr_debug("rc = %d\n", rc); } /* * If hardware_lock held on entry, might drop it, then reaquire * This function sends the appropriate CTIO to ISP 2xxx or 24xx */ static int __qlt_send_term_exchange(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio) { struct scsi_qla_host *vha = qpair->vha; struct ctio7_to_24xx *ctio24; struct qla_hw_data *ha = vha->hw; request_t *pkt; int ret = 0; uint16_t temp; ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); if (cmd) vha = cmd->vha; pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL); if (pkt == NULL) { ql_dbg(ql_dbg_tgt, vha, 0xe050, "qla_target(%d): %s failed: unable to allocate " "request packet\n", vha->vp_idx, __func__); return -ENOMEM; } if (cmd != NULL) { if (cmd->state < QLA_TGT_STATE_PROCESSED) { ql_dbg(ql_dbg_tgt, vha, 0xe051, "qla_target(%d): Terminating cmd %p with " "incorrect state %d\n", vha->vp_idx, cmd, cmd->state); } else ret = 1; } qpair->tgt_counters.num_term_xchg_sent++; pkt->entry_count = 1; pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = CTIO_TYPE7; ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED); ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio24->vp_index = vha->vp_idx; ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); ctio24->exchange_addr = atio->u.isp24.exchange_addr; temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE; ctio24->u.status1.flags = cpu_to_le16(temp); temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); ctio24->u.status1.ox_id = cpu_to_le16(temp); /* Memory Barrier */ wmb(); if (qpair->reqq_start_iocbs) qpair->reqq_start_iocbs(qpair); else qla2x00_start_iocbs(vha, qpair->req); return ret; } static void qlt_send_term_exchange(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort) { struct scsi_qla_host *vha; unsigned long flags = 0; int rc; /* why use different vha? NPIV */ if (cmd) vha = cmd->vha; else vha = qpair->vha; if (ha_locked) { rc = __qlt_send_term_exchange(qpair, cmd, atio); if (rc == -ENOMEM) qlt_alloc_qfull_cmd(vha, atio, 0, 0); goto done; } spin_lock_irqsave(qpair->qp_lock_ptr, flags); rc = __qlt_send_term_exchange(qpair, cmd, atio); if (rc == -ENOMEM) qlt_alloc_qfull_cmd(vha, atio, 0, 0); done: if (cmd && !ul_abort && !cmd->aborted) { if (cmd->sg_mapped) qlt_unmap_sg(vha, cmd); vha->hw->tgt.tgt_ops->free_cmd(cmd); } if (!ha_locked) spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return; } static void qlt_init_term_exchange(struct scsi_qla_host *vha) { struct list_head free_list; struct qla_tgt_cmd *cmd, *tcmd; vha->hw->tgt.leak_exchg_thresh_hold = (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; cmd = tcmd = NULL; if (!list_empty(&vha->hw->tgt.q_full_list)) { INIT_LIST_HEAD(&free_list); list_splice_init(&vha->hw->tgt.q_full_list, &free_list); list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { list_del(&cmd->cmd_list); /* This cmd was never sent to TCM. There is no need * to schedule free or call free_cmd */ qlt_free_cmd(cmd); vha->hw->tgt.num_qfull_cmds_alloc--; } } vha->hw->tgt.num_qfull_cmds_dropped = 0; } static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) { uint32_t total_leaked; total_leaked = vha->hw->tgt.num_qfull_cmds_dropped; if (vha->hw->tgt.leak_exchg_thresh_hold && (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) { ql_dbg(ql_dbg_tgt, vha, 0xe079, "Chip reset due to exchange starvation: %d/%d.\n", total_leaked, vha->hw->cur_fw_xcb_count); if (IS_P3P_TYPE(vha->hw)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } int qlt_abort_cmd(struct qla_tgt_cmd *cmd) { struct qla_tgt *tgt = cmd->tgt; struct scsi_qla_host *vha = tgt->vha; struct se_cmd *se_cmd = &cmd->se_cmd; unsigned long flags; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, "qla_target(%d): terminating exchange for aborted cmd=%p " "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, se_cmd->tag); spin_lock_irqsave(&cmd->cmd_lock, flags); if (cmd->aborted) { if (cmd->sg_mapped) qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(&cmd->cmd_lock, flags); /* * It's normal to see 2 calls in this path: * 1) XFER Rdy completion + CMD_T_ABORT * 2) TCM TMR - drain_state_list */ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016, "multiple abort. %p transport_state %x, t_state %x, " "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); return -EIO; } cmd->aborted = 1; cmd->trc_flags |= TRC_ABORT; spin_unlock_irqrestore(&cmd->cmd_lock, flags); qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1); return 0; } EXPORT_SYMBOL(qlt_abort_cmd); void qlt_free_cmd(struct qla_tgt_cmd *cmd) { struct fc_port *sess = cmd->sess; ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, "%s: se_cmd[%p] ox_id %04x\n", __func__, &cmd->se_cmd, be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); BUG_ON(cmd->cmd_in_wq); if (!cmd->q_full) qlt_decr_num_pend_cmds(cmd->vha); BUG_ON(cmd->sg_mapped); cmd->jiffies_at_free = get_jiffies_64(); if (!sess || !sess->se_sess) { WARN_ON(1); return; } cmd->jiffies_at_free = get_jiffies_64(); cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd); } EXPORT_SYMBOL(qlt_free_cmd); /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, struct qla_tgt_cmd *cmd, uint32_t status) { int term = 0; struct scsi_qla_host *vha = qpair->vha; if (cmd->se_cmd.prot_op) ql_dbg(ql_dbg_tgt_dif, vha, 0xe013, "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " "se_cmd=%p tag[%x] op %#x/%s", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr, cmd->se_cmd.prot_op, prot_op_str(cmd->se_cmd.prot_op)); if (ctio != NULL) { struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; term = !(c->flags & cpu_to_le16(OF_TERM_EXCH)); } else term = 1; if (term) qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0); return term; } /* ha->hardware_lock supposed to be held on entry */ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha, struct rsp_que *rsp, uint32_t handle, void *ctio) { void *cmd = NULL; struct req_que *req; int qid = GET_QID(handle); uint32_t h = handle & ~QLA_TGT_HANDLE_MASK; if (unlikely(h == QLA_TGT_SKIP_HANDLE)) return NULL; if (qid == rsp->req->id) { req = rsp->req; } else if (vha->hw->req_q_map[qid]) { ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a, "qla_target(%d): CTIO completion with different QID %d handle %x\n", vha->vp_idx, rsp->id, handle); req = vha->hw->req_q_map[qid]; } else { return NULL; } h &= QLA_CMD_HANDLE_MASK; if (h != QLA_TGT_NULL_HANDLE) { if (unlikely(h >= req->num_outstanding_cmds)) { ql_dbg(ql_dbg_tgt, vha, 0xe052, "qla_target(%d): Wrong handle %x received\n", vha->vp_idx, handle); return NULL; } cmd = req->outstanding_cmds[h]; if (unlikely(cmd == NULL)) { ql_dbg(ql_dbg_async, vha, 0xe053, "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", vha->vp_idx, handle, req->id, rsp->id); return NULL; } req->outstanding_cmds[h] = NULL; } else if (ctio != NULL) { /* We can't get loop ID from CTIO7 */ ql_dbg(ql_dbg_tgt, vha, 0xe054, "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " "support NULL handles\n", vha->vp_idx); return NULL; } return cmd; } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio) { struct qla_hw_data *ha = vha->hw; struct se_cmd *se_cmd; struct qla_tgt_cmd *cmd; struct qla_qpair *qpair = rsp->qpair; if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { /* That could happen only in case of an error/reset/abort */ if (status != CTIO_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, "Intermediate CTIO received" " (status %x)\n", status); } return; } cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio); if (cmd == NULL) return; if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) && cmd->sess) { qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess, (struct ctio7_from_24xx *)ctio); } se_cmd = &cmd->se_cmd; cmd->cmd_sent_to_fw = 0; qlt_unmap_sg(vha, cmd); if (unlikely(status != CTIO_SUCCESS)) { switch (status & 0xFFFF) { case CTIO_INVALID_RX_ID: if (printk_ratelimit()) dev_info(&vha->hw->pdev->dev, "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n", vha->vp_idx, cmd->atio.u.isp24.attr, ((cmd->ctio_flags >> 9) & 0xf), cmd->ctio_flags); break; case CTIO_LIP_RESET: case CTIO_TARGET_RESET: case CTIO_ABORTED: /* driver request abort via Terminate exchange */ case CTIO_TIMEOUT: /* They are OK */ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, "qla_target(%d): CTIO with " "status %#x received, state %x, se_cmd %p, " "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, status, cmd->state, se_cmd); break; case CTIO_PORT_LOGGED_OUT: case CTIO_PORT_UNAVAILABLE: { int logged_out = (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, "qla_target(%d): CTIO with %s status %x " "received (state %x, se_cmd %p)\n", vha->vp_idx, logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", status, cmd->state, se_cmd); if (logged_out && cmd->sess) { /* * Session is already logged out, but we need * to notify initiator, who's not aware of this */ cmd->sess->send_els_logo = 1; ql_dbg(ql_dbg_disc, vha, 0x20f8, "%s %d %8phC post del sess\n", __func__, __LINE__, cmd->sess->port_name); qlt_schedule_sess_for_deletion(cmd->sess); } break; } case CTIO_DIF_ERROR: { struct ctio_crc_from_fw *crc = (struct ctio_crc_from_fw *)ctio; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, "qla_target(%d): CTIO with DIF_ERROR status %x " "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " "expect_dif[0x%llx]\n", vha->vp_idx, status, cmd->state, se_cmd, *((u64 *)&crc->actual_dif[0]), *((u64 *)&crc->expected_dif[0])); qlt_handle_dif_error(qpair, cmd, ctio); return; } case CTIO_FAST_AUTH_ERR: case CTIO_FAST_INCOMP_PAD_LEN: case CTIO_FAST_INVALID_REQ: case CTIO_FAST_SPI_ERR: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n", vha->vp_idx, status, cmd->state, se_cmd); break; default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", vha->vp_idx, status, cmd->state, se_cmd); break; } /* "cmd->aborted" means * cmd is already aborted/terminated, we don't * need to terminate again. The exchange is already * cleaned up/freed at FW level. Just cleanup at driver * level. */ if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && (!cmd->aborted)) { cmd->trc_flags |= TRC_CTIO_ERR; if (qlt_term_ctio_exchange(qpair, ctio, cmd, status)) return; } } if (cmd->state == QLA_TGT_STATE_PROCESSED) { cmd->trc_flags |= TRC_CTIO_DONE; } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { cmd->state = QLA_TGT_STATE_DATA_IN; if (status == CTIO_SUCCESS) cmd->write_data_transferred = 1; ha->tgt.tgt_ops->handle_data(cmd); return; } else if (cmd->aborted) { cmd->trc_flags |= TRC_CTIO_ABORTED; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); } else { cmd->trc_flags |= TRC_CTIO_STRANGE; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, "qla_target(%d): A command in state (%d) should " "not return a CTIO complete\n", vha->vp_idx, cmd->state); } if (unlikely(status != CTIO_SUCCESS) && !cmd->aborted) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); dump_stack(); } ha->tgt.tgt_ops->free_cmd(cmd); } static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, uint8_t task_codes) { int fcp_task_attr; switch (task_codes) { case ATIO_SIMPLE_QUEUE: fcp_task_attr = TCM_SIMPLE_TAG; break; case ATIO_HEAD_OF_QUEUE: fcp_task_attr = TCM_HEAD_TAG; break; case ATIO_ORDERED_QUEUE: fcp_task_attr = TCM_ORDERED_TAG; break; case ATIO_ACA_QUEUE: fcp_task_attr = TCM_ACA_TAG; break; case ATIO_UNTAGGED: fcp_task_attr = TCM_SIMPLE_TAG; break; default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, "qla_target: unknown task code %x, use ORDERED instead\n", task_codes); fcp_task_attr = TCM_ORDERED_TAG; break; } return fcp_task_attr; } /* * Process context for I/O path into tcm_qla2xxx code */ static void __qlt_do_work(struct qla_tgt_cmd *cmd) { scsi_qla_host_t *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; struct fc_port *sess = cmd->sess; struct atio_from_isp *atio = &cmd->atio; unsigned char *cdb; unsigned long flags; uint32_t data_length; int ret, fcp_task_attr, data_dir, bidi = 0; struct qla_qpair *qpair = cmd->qpair; cmd->cmd_in_wq = 0; cmd->trc_flags |= TRC_DO_WORK; if (cmd->aborted) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, "cmd with tag %u is aborted\n", cmd->atio.u.isp24.exchange_addr); goto out_term; } spin_lock_init(&cmd->cmd_lock); cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr); if (atio->u.isp24.fcp_cmnd.rddata && atio->u.isp24.fcp_cmnd.wrdata) { bidi = 1; data_dir = DMA_TO_DEVICE; } else if (atio->u.isp24.fcp_cmnd.rddata) data_dir = DMA_FROM_DEVICE; else if (atio->u.isp24.fcp_cmnd.wrdata) data_dir = DMA_TO_DEVICE; else data_dir = DMA_NONE; fcp_task_attr = qlt_get_fcp_task_attr(vha, atio->u.isp24.fcp_cmnd.task_attr); data_length = get_datalen_for_atio(atio); ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, fcp_task_attr, data_dir, bidi); if (ret != 0) goto out_term; /* * Drop extra session reference from qlt_handle_cmd_for_atio(). */ ha->tgt.tgt_ops->put_sess(sess); return; out_term: ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); /* * cmd has not sent to target yet, so pass NULL as the second * argument to qlt_send_term_exchange() and free the memory here. */ cmd->trc_flags |= TRC_DO_WORK_ERR; spin_lock_irqsave(qpair->qp_lock_ptr, flags); qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); qlt_decr_num_pend_cmds(vha); cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); ha->tgt.tgt_ops->put_sess(sess); } static void qlt_do_work(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); scsi_qla_host_t *vha = cmd->vha; unsigned long flags; spin_lock_irqsave(&vha->cmd_list_lock, flags); list_del(&cmd->cmd_list); spin_unlock_irqrestore(&vha->cmd_list_lock, flags); __qlt_do_work(cmd); } void qlt_clr_qp_table(struct scsi_qla_host *vha) { unsigned long flags; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; void *node; u64 key = 0; ql_log(ql_log_info, vha, 0x706c, "User update Number of Active Qpairs %d\n", ha->tgt.num_act_qpairs); spin_lock_irqsave(&ha->tgt.atio_lock, flags); btree_for_each_safe64(&tgt->lun_qpair_map, key, node) btree_remove64(&tgt->lun_qpair_map, key); ha->base_qpair->lun_cnt = 0; for (key = 0; key < ha->max_qpairs; key++) if (ha->queue_pair_map[key]) ha->queue_pair_map[key]->lun_cnt = 0; spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); } static void qlt_assign_qpair(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) { struct qla_qpair *qpair, *qp; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_qpair_hint *h; if (vha->flags.qpairs_available) { h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun); if (unlikely(!h)) { /* spread lun to qpair ratio evently */ int lcnt = 0, rc; struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); qpair = vha->hw->base_qpair; if (qpair->lun_cnt == 0) { qpair->lun_cnt++; h = qla_qpair_to_hint(tgt, qpair); BUG_ON(!h); rc = btree_insert64(&tgt->lun_qpair_map, cmd->unpacked_lun, h, GFP_ATOMIC); if (rc) { qpair->lun_cnt--; ql_log(ql_log_info, vha, 0xd037, "Unable to insert lun %llx into lun_qpair_map\n", cmd->unpacked_lun); } goto out; } else { lcnt = qpair->lun_cnt; } h = NULL; list_for_each_entry(qp, &base_vha->qp_list, qp_list_elem) { if (qp->lun_cnt == 0) { qp->lun_cnt++; h = qla_qpair_to_hint(tgt, qp); BUG_ON(!h); rc = btree_insert64(&tgt->lun_qpair_map, cmd->unpacked_lun, h, GFP_ATOMIC); if (rc) { qp->lun_cnt--; ql_log(ql_log_info, vha, 0xd038, "Unable to insert lun %llx into lun_qpair_map\n", cmd->unpacked_lun); } qpair = qp; goto out; } else { if (qp->lun_cnt < lcnt) { lcnt = qp->lun_cnt; qpair = qp; continue; } } } BUG_ON(!qpair); qpair->lun_cnt++; h = qla_qpair_to_hint(tgt, qpair); BUG_ON(!h); rc = btree_insert64(&tgt->lun_qpair_map, cmd->unpacked_lun, h, GFP_ATOMIC); if (rc) { qpair->lun_cnt--; ql_log(ql_log_info, vha, 0xd039, "Unable to insert lun %llx into lun_qpair_map\n", cmd->unpacked_lun); } } } else { h = &tgt->qphints[0]; } out: cmd->qpair = h->qpair; cmd->se_cmd.cpuid = h->cpuid; } static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, struct fc_port *sess, struct atio_from_isp *atio) { struct qla_tgt_cmd *cmd; cmd = vha->hw->tgt.tgt_ops->get_cmd(sess); if (!cmd) return NULL; cmd->cmd_type = TYPE_TGT_CMD; memcpy(&cmd->atio, atio, sizeof(*atio)); INIT_LIST_HEAD(&cmd->sess_cmd_list); cmd->state = QLA_TGT_STATE_NEW; cmd->tgt = vha->vha_tgt.qla_tgt; qlt_incr_num_pend_cmds(vha); cmd->vha = vha; cmd->sess = sess; cmd->loop_id = sess->loop_id; cmd->conf_compl_supported = sess->conf_compl_supported; cmd->trc_flags = 0; cmd->jiffies_at_alloc = get_jiffies_64(); cmd->unpacked_lun = scsilun_to_int( (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); qlt_assign_qpair(vha, cmd); cmd->reset_count = vha->hw->base_qpair->chip_reset; cmd->vp_idx = vha->vp_idx; cmd->edif = sess->edif.enable; return cmd; } /* ha->hardware_lock supposed to be held on entry */ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, struct atio_from_isp *atio) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct fc_port *sess; struct qla_tgt_cmd *cmd; unsigned long flags; port_id_t id; if (unlikely(tgt->tgt_stop)) { ql_dbg(ql_dbg_io, vha, 0x3061, "New command while device %p is shutting down\n", tgt); return -ENODEV; } id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); if (IS_SW_RESV_ADDR(id)) return -EBUSY; sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); if (unlikely(!sess)) return -EFAULT; /* Another WWN used to have our s_id. Our PLOGI scheduled its * session deletion, but it's still in sess_del_work wq */ if (sess->deleted) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, "New command while old session %p is being deleted\n", sess); return -EFAULT; } /* * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. */ if (!kref_get_unless_zero(&sess->sess_kref)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, "%s: kref_get fail, %8phC oxid %x \n", __func__, sess->port_name, be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); return -EFAULT; } cmd = qlt_get_tag(vha, sess, atio); if (!cmd) { ql_dbg(ql_dbg_io, vha, 0x3062, "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); ha->tgt.tgt_ops->put_sess(sess); return -EBUSY; } cmd->cmd_in_wq = 1; cmd->trc_flags |= TRC_NEW_CMD; spin_lock_irqsave(&vha->cmd_list_lock, flags); list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); spin_unlock_irqrestore(&vha->cmd_list_lock, flags); INIT_WORK(&cmd->work, qlt_do_work); if (vha->flags.qpairs_available) { queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); } else if (ha->msix_count) { if (cmd->atio.u.isp24.fcp_cmnd.rddata) queue_work(qla_tgt_wq, &cmd->work); else queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); } else { queue_work(qla_tgt_wq, &cmd->work); } return 0; } /* ha->hardware_lock supposed to be held on entry */ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, int fn, void *iocb, int flags) { struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt_mgmt_cmd *mcmd; struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); if (!mcmd) { ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, "qla_target(%d): Allocation of management " "command failed, some commands and their data could " "leak\n", vha->vp_idx); return -ENOMEM; } memset(mcmd, 0, sizeof(*mcmd)); mcmd->sess = sess; if (iocb) { memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, sizeof(mcmd->orig_iocb.imm_ntfy)); } mcmd->tmr_func = fn; mcmd->flags = flags; mcmd->reset_count = ha->base_qpair->chip_reset; mcmd->qpair = h->qpair; mcmd->vha = vha; mcmd->se_cmd.cpuid = h->cpuid; mcmd->unpacked_lun = lun; switch (fn) { case QLA_TGT_LUN_RESET: case QLA_TGT_CLEAR_TS: case QLA_TGT_ABORT_TS: abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); fallthrough; case QLA_TGT_CLEAR_ACA: h = qlt_find_qphint(vha, mcmd->unpacked_lun); mcmd->qpair = h->qpair; mcmd->se_cmd.cpuid = h->cpuid; break; case QLA_TGT_TARGET_RESET: case QLA_TGT_NEXUS_LOSS_SESS: case QLA_TGT_NEXUS_LOSS: case QLA_TGT_ABORT_ALL: default: /* no-op */ break; } INIT_WORK(&mcmd->work, qlt_do_tmr_work); queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work); return 0; } /* ha->hardware_lock supposed to be held on entry */ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) { struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct qla_hw_data *ha = vha->hw; struct fc_port *sess; u64 unpacked_lun; int fn; unsigned long flags; fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, a->u.isp24.fcp_hdr.s_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); unpacked_lun = scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); if (sess == NULL || sess->deleted) return -EFAULT; return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); } /* ha->hardware_lock supposed to be held on entry */ static int __qlt_abort_task(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb, struct fc_port *sess) { struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct qla_hw_data *ha = vha->hw; struct qla_tgt_mgmt_cmd *mcmd; u64 unpacked_lun; int rc; mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); if (mcmd == NULL) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, "qla_target(%d): %s: Allocation of ABORT cmd failed\n", vha->vp_idx, __func__); return -ENOMEM; } memset(mcmd, 0, sizeof(*mcmd)); mcmd->sess = sess; memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, sizeof(mcmd->orig_iocb.imm_ntfy)); unpacked_lun = scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); mcmd->reset_count = ha->base_qpair->chip_reset; mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; mcmd->qpair = ha->base_qpair; rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, le16_to_cpu(iocb->u.isp2x.seq_id)); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", vha->vp_idx, rc); mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); return -EFAULT; } return 0; } /* ha->hardware_lock supposed to be held on entry */ static int qlt_abort_task(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct qla_hw_data *ha = vha->hw; struct fc_port *sess; int loop_id; unsigned long flags; loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); if (sess == NULL) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, "qla_target(%d): task abort for unexisting " "session\n", vha->vp_idx); return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); } return __qlt_abort_task(vha, iocb, sess); } void qlt_logo_completion_handler(fc_port_t *fcport, int rc) { if (rc != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, "%s: se_sess %p / sess %p from" " port %8phC loop_id %#04x s_id %02x:%02x:%02x" " LOGO failed: %#x\n", __func__, fcport->se_sess, fcport, fcport->port_name, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, rc); } fcport->logout_completed = 1; } /* * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) * * Schedules sessions with matching port_id/loop_id but different wwn for * deletion. Returns existing session with matching wwn if present. * Null otherwise. */ struct fc_port * qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) { struct fc_port *sess = NULL, *other_sess; uint64_t other_wwn; *conflict_sess = NULL; list_for_each_entry(other_sess, &vha->vp_fcports, list) { other_wwn = wwn_to_u64(other_sess->port_name); if (wwn == other_wwn) { WARN_ON(sess); sess = other_sess; continue; } /* find other sess with nport_id collision */ if (port_id.b24 == other_sess->d_id.b24) { if (loop_id != other_sess->loop_id) { ql_dbg(ql_dbg_disc, vha, 0x1000c, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); /* * logout_on_delete is set by default, but another * session that has the same s_id/loop_id combo * might have cleared it when requested this session * deletion, so don't touch it */ qlt_schedule_sess_for_deletion(other_sess); } else { /* * Another wwn used to have our s_id/loop_id * kill the session, but don't free the loop_id */ ql_dbg(ql_dbg_disc, vha, 0xf01b, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); other_sess->keep_nport_handle = 1; if (other_sess->disc_state != DSC_DELETED) *conflict_sess = other_sess; qlt_schedule_sess_for_deletion(other_sess); } continue; } /* find other sess with nport handle collision */ if ((loop_id == other_sess->loop_id) && (loop_id != FC_NO_LOOP_ID)) { ql_dbg(ql_dbg_disc, vha, 0x1000d, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); /* Same loop_id but different s_id * Ok to kill and logout */ qlt_schedule_sess_for_deletion(other_sess); } } return sess; } /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) { struct qla_tgt_sess_op *op; struct qla_tgt_cmd *cmd; uint32_t key; int count = 0; unsigned long flags; key = (((u32)s_id->b.domain << 16) | ((u32)s_id->b.area << 8) | ((u32)s_id->b.al_pa)); spin_lock_irqsave(&vha->cmd_list_lock, flags); list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); if (op_key == key) { op->aborted = true; count++; } } list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); if (cmd_key == key) { cmd->aborted = 1; count++; } } spin_unlock_irqrestore(&vha->cmd_list_lock, flags); return count; } static int qlt_handle_login(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct fc_port *sess = NULL, *conflict_sess = NULL; uint64_t wwn; port_id_t port_id; uint16_t loop_id, wd3_lo; int res = 0; struct qlt_plogi_ack_t *pla; unsigned long flags; lockdep_assert_held(&vha->hw->hardware_lock); wwn = wwn_to_u64(iocb->u.isp24.port_name); port_id.b.domain = iocb->u.isp24.port_id[2]; port_id.b.area = iocb->u.isp24.port_id[1]; port_id.b.al_pa = iocb->u.isp24.port_id[0]; port_id.b.rsvd_1 = 0; loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); /* Mark all stale commands sitting in qla_tgt_wq for deletion */ abort_cmds_for_s_id(vha, &port_id); if (wwn) { spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, loop_id, &conflict_sess); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } else { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ", __func__, __LINE__, loop_id, port_id.b24); qlt_send_term_imm_notif(vha, iocb, 1); goto out; } if (IS_SW_RESV_ADDR(port_id)) { res = 1; goto out; } if (vha->hw->flags.edif_enabled && !(vha->e_dbell.db_flags & EDB_ACTIVE) && iocb->u.isp24.status_subcode == ELS_PLOGI && !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d Term INOT due to app not available lid=%d, NportID %06X ", __func__, __LINE__, loop_id, port_id.b24); qlt_send_term_imm_notif(vha, iocb, 1); goto out; } if (vha->hw->flags.edif_enabled) { if (DBELL_INACTIVE(vha)) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d Term INOT due to app not started lid=%d, NportID %06X ", __func__, __LINE__, loop_id, port_id.b24); qlt_send_term_imm_notif(vha, iocb, 1); goto out; } else if (iocb->u.isp24.status_subcode == ELS_PLOGI && !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d Term INOT due to unsecure lid=%d, NportID %06X ", __func__, __LINE__, loop_id, port_id.b24); qlt_send_term_imm_notif(vha, iocb, 1); goto out; } } pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); if (!pla) { ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s %d %8phC Term INOT due to mem alloc fail", __func__, __LINE__, iocb->u.isp24.port_name); qlt_send_term_imm_notif(vha, iocb, 1); goto out; } if (conflict_sess) { conflict_sess->login_gen++; qlt_plogi_ack_link(vha, pla, conflict_sess, QLT_PLOGI_LINK_CONFLICT); } if (!sess) { pla->ref_count++; ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post new sess\n", __func__, __LINE__, iocb->u.isp24.port_name); if (iocb->u.isp24.status_subcode == ELS_PLOGI) qla24xx_post_newsess_work(vha, &port_id, iocb->u.isp24.port_name, iocb->u.isp24.u.plogi.node_name, pla, 0); else qla24xx_post_newsess_work(vha, &port_id, iocb->u.isp24.port_name, NULL, pla, 0); goto out; } if (sess->disc_state == DSC_UPD_FCPORT) { u16 sec; /* * Remote port registration is still going on from * previous login. Allow it to finish before we * accept the new login. */ sess->next_disc_state = DSC_DELETE_PEND; sec = jiffies_to_msecs(jiffies - sess->jiffies_at_registration) / 1000; if (sess->sec_since_registration < sec && sec && !(sec % 5)) { sess->sec_since_registration = sec; ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC - Slow Rport registration (%d Sec)\n", __func__, sess->port_name, sec); } if (!conflict_sess) { list_del(&pla->list); kmem_cache_free(qla_tgt_plogi_cachep, pla); } qlt_send_term_imm_notif(vha, iocb, 1); goto out; } qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); sess->d_id = port_id; sess->login_gen++; sess->loop_id = loop_id; if (iocb->u.isp24.status_subcode == ELS_PLOGI) { /* remote port has assigned Port ID */ if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess)) vha->d_id = sess->d_id; ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC - send port online\n", __func__, sess->port_name); qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, sess->d_id.b24); } if (iocb->u.isp24.status_subcode == ELS_PRLI) { sess->fw_login_state = DSC_LS_PRLI_PEND; sess->local = 0; sess->loop_id = loop_id; sess->d_id = port_id; sess->fw_login_state = DSC_LS_PRLI_PEND; wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); if (wd3_lo & BIT_7) sess->conf_compl_supported = 1; if ((wd3_lo & BIT_4) == 0) sess->port_type = FCT_INITIATOR; else sess->port_type = FCT_TARGET; } else sess->fw_login_state = DSC_LS_PLOGI_PEND; ql_dbg(ql_dbg_disc, vha, 0x20f9, "%s %d %8phC DS %d\n", __func__, __LINE__, sess->port_name, sess->disc_state); switch (sess->disc_state) { case DSC_DELETED: case DSC_LOGIN_PEND: qlt_plogi_ack_unref(vha, pla); break; default: /* * Under normal circumstances we want to release nport handle * during LOGO process to avoid nport handle leaks inside FW. * The exception is when LOGO is done while another PLOGI with * the same nport handle is waiting as might be the case here. * Note: there is always a possibily of a race where session * deletion has already started for other reasons (e.g. ACL * removal) and now PLOGI arrives: * 1. if PLOGI arrived in FW after nport handle has been freed, * FW must have assigned this PLOGI a new/same handle and we * can proceed ACK'ing it as usual when session deletion * completes. * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT * bit reached it, the handle has now been released. We'll * get an error when we ACK this PLOGI. Nothing will be sent * back to initiator. Initiator should eventually retry * PLOGI and situation will correct itself. */ sess->keep_nport_handle = ((sess->loop_id == loop_id) && (sess->d_id.b24 == port_id.b24)); ql_dbg(ql_dbg_disc, vha, 0x20f9, "%s %d %8phC post del sess\n", __func__, __LINE__, sess->port_name); qlt_schedule_sess_for_deletion(sess); break; } out: return res; } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_hw_data *ha = vha->hw; struct fc_port *sess = NULL, *conflict_sess = NULL; uint64_t wwn; port_id_t port_id; uint16_t loop_id; uint16_t wd3_lo; int res = 0; unsigned long flags; lockdep_assert_held(&ha->hardware_lock); wwn = wwn_to_u64(iocb->u.isp24.port_name); port_id.b.domain = iocb->u.isp24.port_id[2]; port_id.b.area = iocb->u.isp24.port_id[1]; port_id.b.al_pa = iocb->u.isp24.port_id[0]; port_id.b.rsvd_1 = 0; loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); ql_dbg(ql_dbg_disc, vha, 0xf026, "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", vha->vp_idx, iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], iocb->u.isp24.status_subcode, loop_id, iocb->u.isp24.port_name); /* res = 1 means ack at the end of thread * res = 0 means ack async/later. */ switch (iocb->u.isp24.status_subcode) { case ELS_PLOGI: res = qlt_handle_login(vha, iocb); break; case ELS_PRLI: if (N2N_TOPO(ha)) { sess = qla2x00_find_fcport_by_wwpn(vha, iocb->u.isp24.port_name, 1); if (vha->hw->flags.edif_enabled && sess && (!(sess->flags & FCF_FCSP_DEVICE) || !sess->edif.authok)) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC Term PRLI due to unauthorize PRLI\n", __func__, __LINE__, iocb->u.isp24.port_name); qlt_send_term_imm_notif(vha, iocb, 1); break; } if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n", __func__, __LINE__, iocb->u.isp24.port_name); qlt_send_term_imm_notif(vha, iocb, 1); break; } res = qlt_handle_login(vha, iocb); break; } if (IS_SW_RESV_ADDR(port_id)) { res = 1; break; } wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); if (wwn) { spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, loop_id, &conflict_sess); spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); } if (conflict_sess) { switch (conflict_sess->disc_state) { case DSC_DELETED: case DSC_DELETE_PEND: break; default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, "PRLI with conflicting sess %p port %8phC\n", conflict_sess, conflict_sess->port_name); conflict_sess->fw_login_state = DSC_LS_PORT_UNAVAIL; qlt_send_term_imm_notif(vha, iocb, 1); res = 0; break; } } if (sess != NULL) { bool delete = false; int sec; if (vha->hw->flags.edif_enabled && sess && (!(sess->flags & FCF_FCSP_DEVICE) || !sess->edif.authok)) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC Term PRLI due to unauthorize prli\n", __func__, __LINE__, iocb->u.isp24.port_name); qlt_send_term_imm_notif(vha, iocb, 1); break; } spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); switch (sess->fw_login_state) { case DSC_LS_PLOGI_PEND: case DSC_LS_PLOGI_COMP: case DSC_LS_PRLI_COMP: break; default: delete = true; break; } switch (sess->disc_state) { case DSC_UPD_FCPORT: spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); sec = jiffies_to_msecs(jiffies - sess->jiffies_at_registration)/1000; if (sess->sec_since_registration < sec && sec && !(sec % 5)) { sess->sec_since_registration = sec; ql_dbg(ql_dbg_disc, sess->vha, 0xffff, "%s %8phC : Slow Rport registration(%d Sec)\n", __func__, sess->port_name, sec); } qlt_send_term_imm_notif(vha, iocb, 1); return 0; case DSC_LOGIN_PEND: case DSC_GPDB: case DSC_LOGIN_COMPLETE: case DSC_ADISC: delete = false; break; default: break; } if (delete) { spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); /* * Impatient initiator sent PRLI before last * PLOGI could finish. Will force him to re-try, * while last one finishes. */ ql_log(ql_log_warn, sess->vha, 0xf095, "sess %p PRLI received, before plogi ack.\n", sess); qlt_send_term_imm_notif(vha, iocb, 1); res = 0; break; } /* * This shouldn't happen under normal circumstances, * since we have deleted the old session during PLOGI */ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", sess->loop_id, sess, iocb->u.isp24.nport_handle); sess->local = 0; sess->loop_id = loop_id; sess->d_id = port_id; sess->fw_login_state = DSC_LS_PRLI_PEND; if (wd3_lo & BIT_7) sess->conf_compl_supported = 1; if ((wd3_lo & BIT_4) == 0) sess->port_type = FCT_INITIATOR; else sess->port_type = FCT_TARGET; spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); } res = 1; /* send notify ack */ /* Make session global (not used in fabric mode) */ if (ha->current_topology != ISP_CFG_F) { if (sess) { ql_dbg(ql_dbg_disc, vha, 0x20fa, "%s %d %8phC post nack\n", __func__, __LINE__, sess->port_name); qla24xx_post_nack_work(vha, sess, iocb, SRB_NACK_PRLI); res = 0; } else { set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } else { if (sess) { ql_dbg(ql_dbg_disc, vha, 0x20fb, "%s %d %8phC post nack\n", __func__, __LINE__, sess->port_name); qla24xx_post_nack_work(vha, sess, iocb, SRB_NACK_PRLI); res = 0; } } break; case ELS_TPRLO: if (le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { loop_id = 0xFFFF; qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); res = 1; break; } fallthrough; case ELS_LOGO: case ELS_PRLO: spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = qla2x00_find_fcport_by_loopid(vha, loop_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); if (sess) { sess->login_gen++; sess->fw_login_state = DSC_LS_LOGO_PEND; sess->logo_ack_needed = 1; memcpy(sess->iocb, iocb, IOCB_SIZE); } res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); ql_dbg(ql_dbg_disc, vha, 0x20fc, "%s: logo %llx res %d sess %p ", __func__, wwn, res, sess); if (res == 0) { /* * cmd went upper layer, look for qlt_xmit_tm_rsp() * for LOGO_ACK & sess delete */ BUG_ON(!sess); res = 0; } else { /* cmd did not go to upper layer. */ if (sess) { qlt_schedule_sess_for_deletion(sess); res = 0; } /* else logo will be ack */ } break; case ELS_PDISC: case ELS_ADISC: { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; if (tgt->link_reinit_iocb_pending) { qlt_send_notify_ack(ha->base_qpair, &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0; } sess = qla2x00_find_fcport_by_wwpn(vha, iocb->u.isp24.port_name, 1); if (sess) { ql_dbg(ql_dbg_disc, vha, 0x20fd, "sess %p lid %d|%d DS %d LS %d\n", sess, sess->loop_id, loop_id, sess->disc_state, sess->fw_login_state); } res = 1; /* send notify ack */ break; } case ELS_FLOGI: /* should never happen */ default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, "qla_target(%d): Unsupported ELS command %x " "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); break; } ql_dbg(ql_dbg_disc, vha, 0xf026, "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n", vha->vp_idx, iocb->u.isp24.status_subcode, res); return res; } /* * ha->hardware_lock supposed to be held on entry. * Might drop it, then reacquire. */ static void qlt_handle_imm_notify(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct qla_hw_data *ha = vha->hw; uint32_t add_flags = 0; int send_notify_ack = 1; uint16_t status; lockdep_assert_held(&ha->hardware_lock); status = le16_to_cpu(iocb->u.isp2x.status); switch (status) { case IMM_NTFY_LIP_RESET: { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, "qla_target(%d): LIP reset (loop %#x), subcode %x\n", vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), iocb->u.isp24.status_subcode); if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) send_notify_ack = 0; break; } case IMM_NTFY_LIP_LINK_REINIT: { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, "qla_target(%d): LINK REINIT (loop %#x, " "subcode %x)\n", vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), iocb->u.isp24.status_subcode); if (tgt->link_reinit_iocb_pending) { qlt_send_notify_ack(ha->base_qpair, &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); } memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); tgt->link_reinit_iocb_pending = 1; /* * QLogic requires to wait after LINK REINIT for possible * PDISC or ADISC ELS commands */ send_notify_ack = 0; break; } case IMM_NTFY_PORT_LOGOUT: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, "qla_target(%d): Port logout (loop " "%#x, subcode %x)\n", vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), iocb->u.isp24.status_subcode); if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) send_notify_ack = 0; /* The sessions will be cleared in the callback, if needed */ break; case IMM_NTFY_GLBL_TPRLO: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) send_notify_ack = 0; /* The sessions will be cleared in the callback, if needed */ break; case IMM_NTFY_PORT_CONFIG: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, status); if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) send_notify_ack = 0; /* The sessions will be cleared in the callback, if needed */ break; case IMM_NTFY_GLBL_LOGO: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, "qla_target(%d): Link failure detected\n", vha->vp_idx); /* I_T nexus loss */ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) send_notify_ack = 0; break; case IMM_NTFY_IOCB_OVERFLOW: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, "qla_target(%d): Cannot provide requested " "capability (IOCB overflowed the immediate notify " "resource count)\n", vha->vp_idx); break; case IMM_NTFY_ABORT_TASK: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, "qla_target(%d): Abort Task (S %08x I %#x -> " "L %#x)\n", vha->vp_idx, le16_to_cpu(iocb->u.isp2x.seq_id), GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), le16_to_cpu(iocb->u.isp2x.lun)); if (qlt_abort_task(vha, iocb) == 0) send_notify_ack = 0; break; case IMM_NTFY_RESOURCE: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, "qla_target(%d): Out of resources, host %ld\n", vha->vp_idx, vha->host_no); break; case IMM_NTFY_MSG_RX: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, "qla_target(%d): Immediate notify task %x\n", vha->vp_idx, iocb->u.isp2x.task_flags); break; case IMM_NTFY_ELS: if (qlt_24xx_handle_els(vha, iocb) == 0) send_notify_ack = 0; break; default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, "qla_target(%d): Received unknown immediate " "notify status %x\n", vha->vp_idx, status); break; } if (send_notify_ack) qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0, 0, 0); } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire * This function sends busy to ISP 2xxx or 24xx. */ static int __qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, uint16_t status) { struct scsi_qla_host *vha = qpair->vha; struct ctio7_to_24xx *ctio24; struct qla_hw_data *ha = vha->hw; request_t *pkt; struct fc_port *sess = NULL; unsigned long flags; u16 temp; port_id_t id; id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = qla2x00_find_fcport_by_nportid(vha, &id, 1); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); if (!sess) { qlt_send_term_exchange(qpair, NULL, atio, 1, 0); return 0; } /* Sending marker isn't necessary, since we called from ISR */ pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); if (!pkt) { ql_dbg(ql_dbg_io, vha, 0x3063, "qla_target(%d): %s failed: unable to allocate " "request packet", vha->vp_idx, __func__); return -ENOMEM; } qpair->tgt_counters.num_q_full_sent++; pkt->entry_count = 1; pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = CTIO_TYPE7; ctio24->nport_handle = cpu_to_le16(sess->loop_id); ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio24->vp_index = vha->vp_idx; ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); ctio24->exchange_addr = atio->u.isp24.exchange_addr; temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | CTIO7_FLAGS_DONT_RET_CTIO; ctio24->u.status1.flags = cpu_to_le16(temp); /* * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, * if the explicit conformation is used. */ ctio24->u.status1.ox_id = cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); ctio24->u.status1.scsi_status = cpu_to_le16(status); ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); if (ctio24->u.status1.residual != 0) ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER); /* Memory Barrier */ wmb(); if (qpair->reqq_start_iocbs) qpair->reqq_start_iocbs(qpair); else qla2x00_start_iocbs(vha, qpair->req); return 0; } /* * This routine is used to allocate a command for either a QFull condition * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go * out previously. */ static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint16_t status, int qfull) { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_hw_data *ha = vha->hw; struct fc_port *sess; struct qla_tgt_cmd *cmd; unsigned long flags; if (unlikely(tgt->tgt_stop)) { ql_dbg(ql_dbg_io, vha, 0x300a, "New command while device %p is shutting down\n", tgt); return; } if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { vha->hw->tgt.num_qfull_cmds_dropped++; if (vha->hw->tgt.num_qfull_cmds_dropped > vha->qla_stats.stat_max_qfull_cmds_dropped) vha->qla_stats.stat_max_qfull_cmds_dropped = vha->hw->tgt.num_qfull_cmds_dropped; ql_dbg(ql_dbg_io, vha, 0x3068, "qla_target(%d): %s: QFull CMD dropped[%d]\n", vha->vp_idx, __func__, vha->hw->tgt.num_qfull_cmds_dropped); qlt_chk_exch_leak_thresh_hold(vha); return; } sess = ha->tgt.tgt_ops->find_sess_by_s_id (vha, atio->u.isp24.fcp_hdr.s_id); if (!sess) return; cmd = ha->tgt.tgt_ops->get_cmd(sess); if (!cmd) { ql_dbg(ql_dbg_io, vha, 0x3009, "qla_target(%d): %s: Allocation of cmd failed\n", vha->vp_idx, __func__); vha->hw->tgt.num_qfull_cmds_dropped++; if (vha->hw->tgt.num_qfull_cmds_dropped > vha->qla_stats.stat_max_qfull_cmds_dropped) vha->qla_stats.stat_max_qfull_cmds_dropped = vha->hw->tgt.num_qfull_cmds_dropped; qlt_chk_exch_leak_thresh_hold(vha); return; } qlt_incr_num_pend_cmds(vha); INIT_LIST_HEAD(&cmd->cmd_list); memcpy(&cmd->atio, atio, sizeof(*atio)); cmd->tgt = vha->vha_tgt.qla_tgt; cmd->vha = vha; cmd->reset_count = ha->base_qpair->chip_reset; cmd->q_full = 1; cmd->qpair = ha->base_qpair; if (qfull) { cmd->q_full = 1; /* NOTE: borrowing the state field to carry the status */ cmd->state = status; } else cmd->term_exchg = 1; spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); vha->hw->tgt.num_qfull_cmds_alloc++; if (vha->hw->tgt.num_qfull_cmds_alloc > vha->qla_stats.stat_max_qfull_cmds_alloc) vha->qla_stats.stat_max_qfull_cmds_alloc = vha->hw->tgt.num_qfull_cmds_alloc; spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } int qlt_free_qfull_cmds(struct qla_qpair *qpair) { struct scsi_qla_host *vha = qpair->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags; struct qla_tgt_cmd *cmd, *tcmd; struct list_head free_list, q_full_list; int rc = 0; if (list_empty(&ha->tgt.q_full_list)) return 0; INIT_LIST_HEAD(&free_list); INIT_LIST_HEAD(&q_full_list); spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); if (list_empty(&ha->tgt.q_full_list)) { spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); return 0; } list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list); spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); spin_lock_irqsave(qpair->qp_lock_ptr, flags); list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) { if (cmd->q_full) /* cmd->state is a borrowed field to hold status */ rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state); else if (cmd->term_exchg) rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio); if (rc == -ENOMEM) break; if (cmd->q_full) ql_dbg(ql_dbg_io, vha, 0x3006, "%s: busy sent for ox_id[%04x]\n", __func__, be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); else if (cmd->term_exchg) ql_dbg(ql_dbg_io, vha, 0x3007, "%s: Term exchg sent for ox_id[%04x]\n", __func__, be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); else ql_dbg(ql_dbg_io, vha, 0x3008, "%s: Unexpected cmd in QFull list %p\n", __func__, cmd); list_move_tail(&cmd->cmd_list, &free_list); /* piggy back on hardware_lock for protection */ vha->hw->tgt.num_qfull_cmds_alloc--; } spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); cmd = NULL; list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { list_del(&cmd->cmd_list); /* This cmd was never sent to TCM. There is no need * to schedule free or call free_cmd */ qlt_free_cmd(cmd); } if (!list_empty(&q_full_list)) { spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); list_splice(&q_full_list, &vha->hw->tgt.q_full_list); spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } return rc; } static void qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, uint16_t status) { int rc = 0; struct scsi_qla_host *vha = qpair->vha; rc = __qlt_send_busy(qpair, atio, status); if (rc == -ENOMEM) qlt_alloc_qfull_cmd(vha, atio, status, 1); } static int qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair, struct atio_from_isp *atio, uint8_t ha_locked) { struct qla_hw_data *ha = vha->hw; unsigned long flags; if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) return 0; if (!ha_locked) spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_busy(qpair, atio, qla_sam_status); if (!ha_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); return 1; } /* ha->hardware_lock supposed to be held on entry */ /* called via callback from qla2xxx */ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint8_t ha_locked) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; int rc; unsigned long flags = 0; if (unlikely(tgt == NULL)) { ql_dbg(ql_dbg_tgt, vha, 0x3064, "ATIO pkt, but no tgt (ha %p)", ha); return; } /* * In tgt_stop mode we also should allow all requests to pass. * Otherwise, some commands can stuck. */ tgt->atio_irq_cmd_count++; switch (atio->u.raw.entry_type) { case ATIO_TYPE7: if (unlikely(atio->u.isp24.exchange_addr == cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) { ql_dbg(ql_dbg_io, vha, 0x3065, "qla_target(%d): ATIO_TYPE7 " "received with UNKNOWN exchange address, " "sending QUEUE_FULL\n", vha->vp_idx); if (!ha_locked) spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_busy(ha->base_qpair, atio, qla_sam_status); if (!ha_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); break; } if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair, atio, ha_locked); if (rc != 0) { tgt->atio_irq_cmd_count--; return; } rc = qlt_handle_cmd_for_atio(vha, atio); } else { rc = qlt_handle_task_mgmt(vha, atio); } if (unlikely(rc != 0)) { if (!ha_locked) spin_lock_irqsave(&ha->hardware_lock, flags); switch (rc) { case -ENODEV: ql_dbg(ql_dbg_tgt, vha, 0xe05f, "qla_target: Unable to send command to target\n"); break; case -EBADF: ql_dbg(ql_dbg_tgt, vha, 0xe05f, "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); qlt_send_term_exchange(ha->base_qpair, NULL, atio, 1, 0); break; case -EBUSY: ql_dbg(ql_dbg_tgt, vha, 0xe060, "qla_target(%d): Unable to send command to target, sending BUSY status\n", vha->vp_idx); qlt_send_busy(ha->base_qpair, atio, tc_sam_status); break; default: ql_dbg(ql_dbg_tgt, vha, 0xe060, "qla_target(%d): Unable to send command to target, sending BUSY status\n", vha->vp_idx); qlt_send_busy(ha->base_qpair, atio, qla_sam_status); break; } if (!ha_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); } break; case IMMED_NOTIFY_TYPE: { if (unlikely(atio->u.isp2x.entry_status != 0)) { ql_dbg(ql_dbg_tgt, vha, 0xe05b, "qla_target(%d): Received ATIO packet %x " "with error status %x\n", vha->vp_idx, atio->u.raw.entry_type, atio->u.isp2x.entry_status); break; } ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); if (!ha_locked) spin_lock_irqsave(&ha->hardware_lock, flags); qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); if (!ha_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); break; } default: ql_dbg(ql_dbg_tgt, vha, 0xe05c, "qla_target(%d): Received unknown ATIO atio " "type %x\n", vha->vp_idx, atio->u.raw.entry_type); break; } tgt->atio_irq_cmd_count--; } /* * qpair lock is assume to be held * rc = 0 : send terminate & abts respond * rc != 0: do not send term & abts respond */ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha, struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry) { struct qla_hw_data *ha = vha->hw; int rc = 0; /* * Detect unresolved exchange. If the same ABTS is unable * to terminate an existing command and the same ABTS loops * between FW & Driver, then force FW dump. Under 1 jiff, * we should see multiple loops. */ if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort && qpair->retry_term_jiff == jiffies) { /* found existing exchange */ qpair->retry_term_cnt++; if (qpair->retry_term_cnt >= 5) { rc = -EIO; qpair->retry_term_cnt = 0; ql_log(ql_log_warn, vha, 0xffff, "Unable to send ABTS Respond. Dumping firmware.\n"); ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer, vha, 0xffff, (uint8_t *)entry, sizeof(*entry)); if (qpair == ha->base_qpair) ha->isp_ops->fw_dump(vha); else qla2xxx_dump_fw(vha); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } else if (qpair->retry_term_jiff != jiffies) { qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort; qpair->retry_term_cnt = 0; qpair->retry_term_jiff = jiffies; } return rc; } static void qlt_handle_abts_completion(struct scsi_qla_host *vha, struct rsp_que *rsp, response_t *pkt) { struct abts_resp_from_24xx_fw *entry = (struct abts_resp_from_24xx_fw *)pkt; u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK; struct qla_tgt_mgmt_cmd *mcmd; struct qla_hw_data *ha = vha->hw; mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt); if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) { ql_dbg(ql_dbg_async, vha, 0xe064, "qla_target(%d): ABTS Comp without mcmd\n", vha->vp_idx); return; } if (mcmd) vha = mcmd->vha; vha->vha_tgt.qla_tgt->abts_resp_expected--; ql_dbg(ql_dbg_tgt, vha, 0xe038, "ABTS_RESP_24XX: compl_status %x\n", entry->compl_status); if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) { if (le32_to_cpu(entry->error_subcode1) == 0x1E && le32_to_cpu(entry->error_subcode2) == 0) { if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) { ha->tgt.tgt_ops->free_mcmd(mcmd); return; } qlt_24xx_retry_term_exchange(vha, rsp->qpair, pkt, mcmd); } else { ql_dbg(ql_dbg_tgt, vha, 0xe063, "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)", vha->vp_idx, entry->compl_status, entry->error_subcode1, entry->error_subcode2); ha->tgt.tgt_ops->free_mcmd(mcmd); } } else if (mcmd) { ha->tgt.tgt_ops->free_mcmd(mcmd); } } /* ha->hardware_lock supposed to be held on entry */ /* called via callback from qla2xxx */ static void qlt_response_pkt(struct scsi_qla_host *vha, struct rsp_que *rsp, response_t *pkt) { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; if (unlikely(tgt == NULL)) { ql_dbg(ql_dbg_tgt, vha, 0xe05d, "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, vha->hw); return; } /* * In tgt_stop mode we also should allow all requests to pass. * Otherwise, some commands can stuck. */ switch (pkt->entry_type) { case CTIO_CRC2: case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; qlt_do_ctio_completion(vha, rsp, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); break; } case ACCEPT_TGT_IO_TYPE: { struct atio_from_isp *atio = (struct atio_from_isp *)pkt; int rc; if (atio->u.isp2x.status != cpu_to_le16(ATIO_CDB_VALID)) { ql_dbg(ql_dbg_tgt, vha, 0xe05e, "qla_target(%d): ATIO with error " "status %x received\n", vha->vp_idx, le16_to_cpu(atio->u.isp2x.status)); break; } rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1); if (rc != 0) return; rc = qlt_handle_cmd_for_atio(vha, atio); if (unlikely(rc != 0)) { switch (rc) { case -ENODEV: ql_dbg(ql_dbg_tgt, vha, 0xe05f, "qla_target: Unable to send command to target\n"); break; case -EBADF: ql_dbg(ql_dbg_tgt, vha, 0xe05f, "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0); break; case -EBUSY: ql_dbg(ql_dbg_tgt, vha, 0xe060, "qla_target(%d): Unable to send command to target, sending BUSY status\n", vha->vp_idx); qlt_send_busy(rsp->qpair, atio, tc_sam_status); break; default: ql_dbg(ql_dbg_tgt, vha, 0xe060, "qla_target(%d): Unable to send command to target, sending BUSY status\n", vha->vp_idx); qlt_send_busy(rsp->qpair, atio, qla_sam_status); break; } } } break; case CONTINUE_TGT_IO_TYPE: { struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; qlt_do_ctio_completion(vha, rsp, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); break; } case CTIO_A64_TYPE: { struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; qlt_do_ctio_completion(vha, rsp, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); break; } case IMMED_NOTIFY_TYPE: ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); break; case NOTIFY_ACK_TYPE: if (tgt->notify_ack_expected > 0) { struct nack_to_isp *entry = (struct nack_to_isp *)pkt; ql_dbg(ql_dbg_tgt, vha, 0xe036, "NOTIFY_ACK seq %08x status %x\n", le16_to_cpu(entry->u.isp2x.seq_id), le16_to_cpu(entry->u.isp2x.status)); tgt->notify_ack_expected--; if (entry->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) { ql_dbg(ql_dbg_tgt, vha, 0xe061, "qla_target(%d): NOTIFY_ACK " "failed %x\n", vha->vp_idx, le16_to_cpu(entry->u.isp2x.status)); } } else { ql_dbg(ql_dbg_tgt, vha, 0xe062, "qla_target(%d): Unexpected NOTIFY_ACK received\n", vha->vp_idx); } break; case ABTS_RECV_24XX: ql_dbg(ql_dbg_tgt, vha, 0xe037, "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); break; case ABTS_RESP_24XX: if (tgt->abts_resp_expected > 0) { qlt_handle_abts_completion(vha, rsp, pkt); } else { ql_dbg(ql_dbg_tgt, vha, 0xe064, "qla_target(%d): Unexpected ABTS_RESP_24XX " "received\n", vha->vp_idx); } break; default: ql_dbg(ql_dbg_tgt, vha, 0xe065, "qla_target(%d): Received unknown response pkt " "type %x\n", vha->vp_idx, pkt->entry_type); break; } } /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, uint16_t *mailbox) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; int login_code; if (!tgt || tgt->tgt_stop || tgt->tgt_stopped) return; if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && IS_QLA2100(ha)) return; /* * In tgt_stop mode we also should allow all requests to pass. * Otherwise, some commands can stuck. */ switch (code) { case MBA_RESET: /* Reset */ case MBA_SYSTEM_ERR: /* System Error */ case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, "qla_target(%d): System error async event %#x " "occurred", vha->vp_idx, code); break; case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_LOOP_UP: { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, "qla_target(%d): Async LOOP_UP occurred " "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, mailbox[0], mailbox[1], mailbox[2], mailbox[3]); if (tgt->link_reinit_iocb_pending) { qlt_send_notify_ack(ha->base_qpair, &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0; } break; } case MBA_LIP_OCCURRED: case MBA_LOOP_DOWN: case MBA_LIP_RESET: case MBA_RSCN_UPDATE: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, "qla_target(%d): Async event %#x occurred " "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, mailbox[0], mailbox[1], mailbox[2], mailbox[3]); break; case MBA_REJECTED_FCP_CMD: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017, "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, mailbox[0], mailbox[1], mailbox[2], mailbox[3]); if (mailbox[3] == 1) { /* exchange starvation. */ vha->hw->exch_starvation++; if (vha->hw->exch_starvation > 5) { ql_log(ql_log_warn, vha, 0xd03a, "Exchange starvation-. Resetting RISC\n"); vha->hw->exch_starvation = 0; if (IS_P3P_TYPE(vha->hw)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } break; case MBA_PORT_UPDATE: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, "qla_target(%d): Port update async event %#x " "occurred: updating the ports database (m[0]=%x, m[1]=%x, " "m[2]=%x, m[3]=%x)", vha->vp_idx, code, mailbox[0], mailbox[1], mailbox[2], mailbox[3]); login_code = mailbox[2]; if (login_code == 0x4) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, "Async MB 2: Got PLOGI Complete\n"); vha->hw->exch_starvation = 0; } else if (login_code == 0x7) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, "Async MB 2: Port Logged Out\n"); break; default: break; } } static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, uint16_t loop_id) { fc_port_t *fcport, *tfcp, *del; int rc; unsigned long flags; u8 newfcport = 0; fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (!fcport) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, "qla_target(%d): Allocation of tmp FC port failed", vha->vp_idx); return NULL; } fcport->loop_id = loop_id; rc = qla24xx_gpdb_wait(vha, fcport, 0); if (rc != QLA_SUCCESS) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, "qla_target(%d): Failed to retrieve fcport " "information -- get_port_database() returned %x " "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); kfree(fcport); return NULL; } del = NULL; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); if (tfcp) { tfcp->d_id = fcport->d_id; tfcp->port_type = fcport->port_type; tfcp->supported_classes = fcport->supported_classes; tfcp->flags |= fcport->flags; tfcp->scan_state = QLA_FCPORT_FOUND; del = fcport; fcport = tfcp; } else { if (vha->hw->current_topology == ISP_CFG_F) fcport->flags |= FCF_FABRIC_DEVICE; list_add_tail(&fcport->list, &vha->vp_fcports); if (!IS_SW_RESV_ADDR(fcport->d_id)) vha->fcport_count++; fcport->login_gen++; qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); fcport->login_succ = 1; newfcport = 1; } fcport->deleted = 0; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); switch (vha->host->active_mode) { case MODE_INITIATOR: case MODE_DUAL: if (newfcport) { if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { qla24xx_sched_upd_fcport(fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20ff, "%s %d %8phC post gpsc fcp_cnt %d\n", __func__, __LINE__, fcport->port_name, vha->fcport_count); qla24xx_post_gpsc_work(vha, fcport); } } break; case MODE_TARGET: default: break; } if (del) qla2x00_free_fcport(del); return fcport; } /* Must be called under tgt_mutex */ static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, be_id_t s_id) { struct fc_port *sess = NULL; fc_port_t *fcport = NULL; int rc, global_resets; uint16_t loop_id = 0; if (s_id.domain == 0xFF && s_id.area == 0xFC) { /* * This is Domain Controller, so it should be * OK to drop SCSI commands from it. */ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, "Unable to find initiator with S_ID %x:%x:%x", s_id.domain, s_id.area, s_id.al_pa); return NULL; } mutex_lock(&vha->vha_tgt.tgt_mutex); retry: global_resets = atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); rc = qla24xx_get_loop_id(vha, s_id, &loop_id); if (rc != 0) { mutex_unlock(&vha->vha_tgt.tgt_mutex); ql_log(ql_log_info, vha, 0xf071, "qla_target(%d): Unable to find " "initiator with S_ID %x:%x:%x", vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa); if (rc == -ENOENT) { qlt_port_logo_t logo; logo.id = be_to_port_id(s_id); logo.cmd_count = 1; qlt_send_first_logo(vha, &logo); } return NULL; } fcport = qlt_get_port_database(vha, loop_id); if (!fcport) { mutex_unlock(&vha->vha_tgt.tgt_mutex); return NULL; } if (global_resets != atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, "qla_target(%d): global reset during session discovery " "(counter was %d, new %d), retrying", vha->vp_idx, global_resets, atomic_read(&vha->vha_tgt. qla_tgt->tgt_global_resets_count)); goto retry; } sess = qlt_create_sess(vha, fcport, true); mutex_unlock(&vha->vha_tgt.tgt_mutex); return sess; } static void qlt_abort_work(struct qla_tgt *tgt, struct qla_tgt_sess_work_param *prm) { struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; struct fc_port *sess = NULL; unsigned long flags = 0, flags2 = 0; be_id_t s_id; int rc; spin_lock_irqsave(&ha->tgt.sess_lock, flags2); if (tgt->tgt_stop) goto out_term2; s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id); sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); if (!sess) { spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); sess = qlt_make_local_sess(vha, s_id); /* sess has got an extra creation ref */ spin_lock_irqsave(&ha->tgt.sess_lock, flags2); if (!sess) goto out_term2; } else { if (sess->deleted) { sess = NULL; goto out_term2; } if (!kref_get_unless_zero(&sess->sess_kref)) { ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c, "%s: kref_get fail %8phC \n", __func__, sess->port_name); sess = NULL; goto out_term2; } } rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); ha->tgt.tgt_ops->put_sess(sess); if (rc != 0) goto out_term; return; out_term2: spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); out_term: spin_lock_irqsave(&ha->hardware_lock, flags); qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts, FCP_TMF_REJECTED, false); spin_unlock_irqrestore(&ha->hardware_lock, flags); } static void qlt_sess_work_fn(struct work_struct *work) { struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); struct scsi_qla_host *vha = tgt->vha; unsigned long flags; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); spin_lock_irqsave(&tgt->sess_work_lock, flags); while (!list_empty(&tgt->sess_works_list)) { struct qla_tgt_sess_work_param *prm = list_entry( tgt->sess_works_list.next, typeof(*prm), sess_works_list_entry); /* * This work can be scheduled on several CPUs at time, so we * must delete the entry to eliminate double processing */ list_del(&prm->sess_works_list_entry); spin_unlock_irqrestore(&tgt->sess_work_lock, flags); switch (prm->type) { case QLA_TGT_SESS_WORK_ABORT: qlt_abort_work(tgt, prm); break; default: BUG_ON(1); break; } spin_lock_irqsave(&tgt->sess_work_lock, flags); kfree(prm); } spin_unlock_irqrestore(&tgt->sess_work_lock, flags); } /* Must be called under tgt_host_action_mutex */ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) { struct qla_tgt *tgt; int rc, i; struct qla_qpair_hint *h; if (!QLA_TGT_MODE_ENABLED()) return 0; if (!IS_TGT_MODE_CAPABLE(ha)) { ql_log(ql_log_warn, base_vha, 0xe070, "This adapter does not support target mode.\n"); return 0; } ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, "Registering target for host %ld(%p).\n", base_vha->host_no, ha); BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); if (!tgt) { ql_dbg(ql_dbg_tgt, base_vha, 0xe066, "Unable to allocate struct qla_tgt\n"); return -ENOMEM; } tgt->qphints = kcalloc(ha->max_qpairs + 1, sizeof(struct qla_qpair_hint), GFP_KERNEL); if (!tgt->qphints) { kfree(tgt); ql_log(ql_log_warn, base_vha, 0x0197, "Unable to allocate qpair hints.\n"); return -ENOMEM; } qla2xxx_driver_template.supported_mode |= MODE_TARGET; rc = btree_init64(&tgt->lun_qpair_map); if (rc) { kfree(tgt->qphints); kfree(tgt); ql_log(ql_log_info, base_vha, 0x0198, "Unable to initialize lun_qpair_map btree\n"); return -EIO; } h = &tgt->qphints[0]; h->qpair = ha->base_qpair; INIT_LIST_HEAD(&h->hint_elem); h->cpuid = ha->base_qpair->cpuid; list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list); for (i = 0; i < ha->max_qpairs; i++) { unsigned long flags; struct qla_qpair *qpair = ha->queue_pair_map[i]; h = &tgt->qphints[i + 1]; INIT_LIST_HEAD(&h->hint_elem); if (qpair) { h->qpair = qpair; spin_lock_irqsave(qpair->qp_lock_ptr, flags); list_add_tail(&h->hint_elem, &qpair->hints_list); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); h->cpuid = qpair->cpuid; } } tgt->ha = ha; tgt->vha = base_vha; init_waitqueue_head(&tgt->waitQ); spin_lock_init(&tgt->sess_work_lock); INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); INIT_LIST_HEAD(&tgt->sess_works_list); atomic_set(&tgt->tgt_global_resets_count, 0); base_vha->vha_tgt.qla_tgt = tgt; ql_dbg(ql_dbg_tgt, base_vha, 0xe067, "qla_target(%d): using 64 Bit PCI addressing", base_vha->vp_idx); /* 3 is reserved */ tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); mutex_lock(&qla_tgt_mutex); list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); mutex_unlock(&qla_tgt_mutex); if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) ha->tgt.tgt_ops->add_target(base_vha); return 0; } /* Must be called under tgt_host_action_mutex */ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) { if (!vha->vha_tgt.qla_tgt) return 0; if (vha->fc_vport) { qlt_release(vha->vha_tgt.qla_tgt); return 0; } /* free left over qfull cmds */ qlt_init_term_exchange(vha); ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", vha->host_no, ha); qlt_release(vha->vha_tgt.qla_tgt); return 0; } void qla_remove_hostmap(struct qla_hw_data *ha) { struct scsi_qla_host *node; u32 key = 0; btree_for_each_safe32(&ha->host_map, key, node) btree_remove32(&ha->host_map, key); btree_destroy32(&ha->host_map); } static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, unsigned char *b) { pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name); pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name); put_unaligned_be64(wwpn, b); pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b); } /** * qlt_lport_register - register lport with external module * * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data * @phys_wwpn: physical port WWPN * @npiv_wwpn: NPIV WWPN * @npiv_wwnn: NPIV WWNN * @callback: lport initialization callback for tcm_qla2xxx code */ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, u64 npiv_wwpn, u64 npiv_wwnn, int (*callback)(struct scsi_qla_host *, void *, u64, u64)) { struct qla_tgt *tgt; struct scsi_qla_host *vha; struct qla_hw_data *ha; struct Scsi_Host *host; unsigned long flags; int rc; u8 b[WWN_SIZE]; mutex_lock(&qla_tgt_mutex); list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { vha = tgt->vha; ha = vha->hw; host = vha->host; if (!host) continue; if (!(host->hostt->supported_mode & MODE_TARGET)) continue; if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) continue; spin_lock_irqsave(&ha->hardware_lock, flags); if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", host->host_no); spin_unlock_irqrestore(&ha->hardware_lock, flags); continue; } if (tgt->tgt_stop) { pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", host->host_no); spin_unlock_irqrestore(&ha->hardware_lock, flags); continue; } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!scsi_host_get(host)) { ql_dbg(ql_dbg_tgt, vha, 0xe068, "Unable to scsi_host_get() for" " qla2xxx scsi_host\n"); continue; } qlt_lport_dump(vha, phys_wwpn, b); if (memcmp(vha->port_name, b, WWN_SIZE)) { scsi_host_put(host); continue; } rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); if (rc != 0) scsi_host_put(host); mutex_unlock(&qla_tgt_mutex); return rc; } mutex_unlock(&qla_tgt_mutex); return -ENODEV; } EXPORT_SYMBOL(qlt_lport_register); /** * qlt_lport_deregister - Degister lport * * @vha: Registered scsi_qla_host pointer */ void qlt_lport_deregister(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct Scsi_Host *sh = vha->host; /* * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data */ vha->vha_tgt.target_lport_ptr = NULL; ha->tgt.tgt_ops = NULL; /* * Release the Scsi_Host reference for the underlying qla2xxx host */ scsi_host_put(sh); } EXPORT_SYMBOL(qlt_lport_deregister); /* Must be called under HW lock */ void qlt_set_mode(struct scsi_qla_host *vha) { switch (vha->qlini_mode) { case QLA2XXX_INI_MODE_DISABLED: case QLA2XXX_INI_MODE_EXCLUSIVE: vha->host->active_mode = MODE_TARGET; break; case QLA2XXX_INI_MODE_ENABLED: vha->host->active_mode = MODE_INITIATOR; break; case QLA2XXX_INI_MODE_DUAL: vha->host->active_mode = MODE_DUAL; break; default: break; } } /* Must be called under HW lock */ static void qlt_clear_mode(struct scsi_qla_host *vha) { switch (vha->qlini_mode) { case QLA2XXX_INI_MODE_DISABLED: vha->host->active_mode = MODE_UNKNOWN; break; case QLA2XXX_INI_MODE_EXCLUSIVE: vha->host->active_mode = MODE_INITIATOR; break; case QLA2XXX_INI_MODE_ENABLED: case QLA2XXX_INI_MODE_DUAL: vha->host->active_mode = MODE_INITIATOR; break; default: break; } } /* * qla_tgt_enable_vha - NO LOCK HELD * * host_reset, bring up w/ Target Mode Enabled */ void qlt_enable_vha(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; unsigned long flags; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); if (!tgt) { ql_dbg(ql_dbg_tgt, vha, 0xe069, "Unable to locate qla_tgt pointer from" " struct qla_hw_data\n"); dump_stack(); return; } if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) return; if (ha->tgt.num_act_qpairs > ha->max_qpairs) ha->tgt.num_act_qpairs = ha->max_qpairs; spin_lock_irqsave(&ha->hardware_lock, flags); tgt->tgt_stopped = 0; qlt_set_mode(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); mutex_lock(&ha->optrom_mutex); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, "%s.\n", __func__); if (vha->vp_idx) { qla24xx_disable_vp(vha); qla24xx_enable_vp(vha); } else { set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(base_vha); WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) != QLA_SUCCESS); } mutex_unlock(&ha->optrom_mutex); } EXPORT_SYMBOL(qlt_enable_vha); /* * qla_tgt_disable_vha - NO LOCK HELD * * Disable Target Mode and reset the adapter */ static void qlt_disable_vha(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; unsigned long flags; if (!tgt) { ql_dbg(ql_dbg_tgt, vha, 0xe06a, "Unable to locate qla_tgt pointer from" " struct qla_hw_data\n"); dump_stack(); return; } spin_lock_irqsave(&ha->hardware_lock, flags); qlt_clear_mode(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); /* * We are expecting the offline state. * QLA_FUNCTION_FAILED means that adapter is offline. */ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) ql_dbg(ql_dbg_tgt, vha, 0xe081, "adapter is offline\n"); } /* * Called from qla_init.c:qla24xx_vport_create() contex to setup * the target mode specific struct scsi_qla_host and struct qla_hw_data * members. */ void qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) { vha->vha_tgt.qla_tgt = NULL; mutex_init(&vha->vha_tgt.tgt_mutex); mutex_init(&vha->vha_tgt.tgt_host_action_mutex); INIT_LIST_HEAD(&vha->unknown_atio_list); INIT_DELAYED_WORK(&vha->unknown_atio_work, qlt_unknown_atio_work_fn); qlt_clear_mode(vha); /* * NOTE: Currently the value is kept the same for <24xx and * >=24xx ISPs. If it is necessary to change it, * the check should be added for specific ISPs, * assigning the value appropriately. */ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; qlt_add_target(ha, vha); } u8 qlt_rff_id(struct scsi_qla_host *vha) { u8 fc4_feature = 0; /* * FC-4 Feature bit 0 indicates target functionality to the name server. */ if (qla_tgt_mode_enabled(vha)) { fc4_feature = BIT_0; } else if (qla_ini_mode_enabled(vha)) { fc4_feature = BIT_1; } else if (qla_dual_mode_enabled(vha)) fc4_feature = BIT_0 | BIT_1; return fc4_feature; } /* * qlt_init_atio_q_entries() - Initializes ATIO queue entries. * @ha: HA context * * Beginning of ATIO ring has initialization control block already built * by nvram config routine. * * Returns 0 on success. */ void qlt_init_atio_q_entries(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; uint16_t cnt; struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; if (qla_ini_mode_enabled(vha)) return; for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); pkt++; } } /* * qlt_24xx_process_atio_queue() - Process ATIO queue entries. * @ha: SCSI driver HA context */ void qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) { struct qla_hw_data *ha = vha->hw; struct atio_from_isp *pkt; int cnt, i; if (!ha->flags.fw_started) return; while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; cnt = pkt->u.raw.entry_count; if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { /* * This packet is corrupted. The header + payload * can not be trusted. There is no point in passing * it further up. */ ql_log(ql_log_warn, vha, 0xd03c, "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", &pkt->u.isp24.fcp_hdr.s_id, be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), pkt->u.isp24.exchange_addr, pkt); adjust_corrupted_atio(pkt); qlt_send_term_exchange(ha->base_qpair, NULL, pkt, ha_locked, 0); } else { qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, ha_locked); } for (i = 0; i < cnt; i++) { ha->tgt.atio_ring_index++; if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { ha->tgt.atio_ring_index = 0; ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; } else ha->tgt.atio_ring_ptr++; pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; } wmb(); } /* Adjust ring index */ wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); } void qlt_24xx_config_rings(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct qla_msix_entry *msix = &ha->msix_entries[2]; struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; if (!QLA_TGT_MODE_ENABLED()) return; wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0); wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0); rd_reg_dword(ISP_ATIO_Q_OUT(vha)); if (ha->flags.msix_enabled) { if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { icb->msix_atio = cpu_to_le16(msix->entry); icb->firmware_options_2 &= cpu_to_le32(~BIT_26); ql_dbg(ql_dbg_init, vha, 0xf072, "Registering ICB vector 0x%x for atio que.\n", msix->entry); } } else { /* INTx|MSI */ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { icb->msix_atio = 0; icb->firmware_options_2 |= cpu_to_le32(BIT_26); ql_dbg(ql_dbg_init, vha, 0xf072, "%s: Use INTx for ATIOQ.\n", __func__); } } } void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) { struct qla_hw_data *ha = vha->hw; u32 tmp; if (!QLA_TGT_MODE_ENABLED()) return; if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { if (!ha->tgt.saved_set) { /* We save only once */ ha->tgt.saved_exchange_count = nv->exchange_count; ha->tgt.saved_firmware_options_1 = nv->firmware_options_1; ha->tgt.saved_firmware_options_2 = nv->firmware_options_2; ha->tgt.saved_firmware_options_3 = nv->firmware_options_3; ha->tgt.saved_set = 1; } if (qla_tgt_mode_enabled(vha)) nv->exchange_count = cpu_to_le16(0xFFFF); else /* dual */ nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); /* Enable target mode */ nv->firmware_options_1 |= cpu_to_le32(BIT_4); /* Disable ini mode, if requested */ if (qla_tgt_mode_enabled(vha)) nv->firmware_options_1 |= cpu_to_le32(BIT_5); /* Disable Full Login after LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Enable initial LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_9); if (ql2xtgt_tape_enable) /* Enable FC Tape support */ nv->firmware_options_2 |= cpu_to_le32(BIT_12); else /* Disable FC Tape support */ nv->firmware_options_2 &= cpu_to_le32(~BIT_12); /* Disable Full Login after LIP */ nv->host_p &= cpu_to_le32(~BIT_10); /* * clear BIT 15 explicitly as we have seen at least * a couple of instances where this was set and this * was causing the firmware to not be initialized. */ nv->firmware_options_1 &= cpu_to_le32(~BIT_15); /* Enable target PRLI control */ nv->firmware_options_2 |= cpu_to_le32(BIT_14); if (IS_QLA25XX(ha)) { /* Change Loop-prefer to Pt-Pt */ tmp = ~(BIT_4|BIT_5|BIT_6); nv->firmware_options_2 &= cpu_to_le32(tmp); tmp = P2P << 4; nv->firmware_options_2 |= cpu_to_le32(tmp); } } else { if (ha->tgt.saved_set) { nv->exchange_count = ha->tgt.saved_exchange_count; nv->firmware_options_1 = ha->tgt.saved_firmware_options_1; nv->firmware_options_2 = ha->tgt.saved_firmware_options_2; nv->firmware_options_3 = ha->tgt.saved_firmware_options_3; } return; } if (ha->base_qpair->enable_class_2) { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = FC_COS_CLASS2 | FC_COS_CLASS3; nv->firmware_options_2 |= cpu_to_le32(BIT_8); } else { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = FC_COS_CLASS3; nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); } } void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, struct init_cb_24xx *icb) { struct qla_hw_data *ha = vha->hw; if (!QLA_TGT_MODE_ENABLED()) return; if (ha->tgt.node_name_set) { memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); icb->firmware_options_1 |= cpu_to_le32(BIT_14); } } void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) { struct qla_hw_data *ha = vha->hw; u32 tmp; if (!QLA_TGT_MODE_ENABLED()) return; if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { if (!ha->tgt.saved_set) { /* We save only once */ ha->tgt.saved_exchange_count = nv->exchange_count; ha->tgt.saved_firmware_options_1 = nv->firmware_options_1; ha->tgt.saved_firmware_options_2 = nv->firmware_options_2; ha->tgt.saved_firmware_options_3 = nv->firmware_options_3; ha->tgt.saved_set = 1; } if (qla_tgt_mode_enabled(vha)) nv->exchange_count = cpu_to_le16(0xFFFF); else /* dual */ nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld); /* Enable target mode */ nv->firmware_options_1 |= cpu_to_le32(BIT_4); /* Disable ini mode, if requested */ if (qla_tgt_mode_enabled(vha)) nv->firmware_options_1 |= cpu_to_le32(BIT_5); /* Disable Full Login after LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Enable initial LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_9); /* * clear BIT 15 explicitly as we have seen at * least a couple of instances where this was set * and this was causing the firmware to not be * initialized. */ nv->firmware_options_1 &= cpu_to_le32(~BIT_15); if (ql2xtgt_tape_enable) /* Enable FC tape support */ nv->firmware_options_2 |= cpu_to_le32(BIT_12); else /* Disable FC tape support */ nv->firmware_options_2 &= cpu_to_le32(~BIT_12); /* Disable Full Login after LIP */ nv->host_p &= cpu_to_le32(~BIT_10); /* Enable target PRLI control */ nv->firmware_options_2 |= cpu_to_le32(BIT_14); /* Change Loop-prefer to Pt-Pt */ tmp = ~(BIT_4|BIT_5|BIT_6); nv->firmware_options_2 &= cpu_to_le32(tmp); tmp = P2P << 4; nv->firmware_options_2 |= cpu_to_le32(tmp); } else { if (ha->tgt.saved_set) { nv->exchange_count = ha->tgt.saved_exchange_count; nv->firmware_options_1 = ha->tgt.saved_firmware_options_1; nv->firmware_options_2 = ha->tgt.saved_firmware_options_2; nv->firmware_options_3 = ha->tgt.saved_firmware_options_3; } return; } if (ha->base_qpair->enable_class_2) { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = FC_COS_CLASS2 | FC_COS_CLASS3; nv->firmware_options_2 |= cpu_to_le32(BIT_8); } else { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = FC_COS_CLASS3; nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); } } void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, struct init_cb_81xx *icb) { struct qla_hw_data *ha = vha->hw; if (!QLA_TGT_MODE_ENABLED()) return; if (ha->tgt.node_name_set) { memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); icb->firmware_options_1 |= cpu_to_le32(BIT_14); } } void qlt_83xx_iospace_config(struct qla_hw_data *ha) { if (!QLA_TGT_MODE_ENABLED()) return; ha->msix_count += 1; /* For ATIO Q */ } void qlt_modify_vp_config(struct scsi_qla_host *vha, struct vp_config_entry_24xx *vpmod) { /* enable target mode. Bit5 = 1 => disable */ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) vpmod->options_idx1 &= ~BIT_5; /* Disable ini mode, if requested. bit4 = 1 => disable */ if (qla_tgt_mode_enabled(vha)) vpmod->options_idx1 &= ~BIT_4; } void qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) { mutex_init(&base_vha->vha_tgt.tgt_mutex); if (!QLA_TGT_MODE_ENABLED()) return; if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; } else { ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; } mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); INIT_LIST_HEAD(&base_vha->unknown_atio_list); INIT_DELAYED_WORK(&base_vha->unknown_atio_work, qlt_unknown_atio_work_fn); qlt_clear_mode(base_vha); qla_update_vp_map(base_vha, SET_VP_IDX); } irqreturn_t qla83xx_msix_atio_q(int irq, void *dev_id) { struct rsp_que *rsp; scsi_qla_host_t *vha; struct qla_hw_data *ha; unsigned long flags; rsp = (struct rsp_que *) dev_id; ha = rsp->hw; vha = pci_get_drvdata(ha->pdev); spin_lock_irqsave(&ha->tgt.atio_lock, flags); qlt_24xx_process_atio_queue(vha, 0); spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); return IRQ_HANDLED; } static void qlt_handle_abts_recv_work(struct work_struct *work) { struct qla_tgt_sess_op *op = container_of(work, struct qla_tgt_sess_op, work); scsi_qla_host_t *vha = op->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags; if (qla2x00_reset_active(vha) || (op->chip_reset != ha->base_qpair->chip_reset)) return; spin_lock_irqsave(&ha->tgt.atio_lock, flags); qlt_24xx_process_atio_queue(vha, 0); spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags); qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio); spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(op); } void qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp, response_t *pkt) { struct qla_tgt_sess_op *op; op = kzalloc(sizeof(*op), GFP_ATOMIC); if (!op) { /* do not reach for ATIO queue here. This is best effort err * recovery at this point. */ qlt_response_pkt_all_vps(vha, rsp, pkt); return; } memcpy(&op->atio, pkt, sizeof(*pkt)); op->vha = vha; op->chip_reset = vha->hw->base_qpair->chip_reset; op->rsp = rsp; INIT_WORK(&op->work, qlt_handle_abts_recv_work); queue_work(qla_tgt_wq, &op->work); return; } int qlt_mem_alloc(struct qla_hw_data *ha) { if (!QLA_TGT_MODE_ENABLED()) return 0; ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), &ha->tgt.atio_dma, GFP_KERNEL); if (!ha->tgt.atio_ring) { return -ENOMEM; } return 0; } void qlt_mem_free(struct qla_hw_data *ha) { if (!QLA_TGT_MODE_ENABLED()) return; if (ha->tgt.atio_ring) { dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), ha->tgt.atio_ring, ha->tgt.atio_dma); } ha->tgt.atio_ring = NULL; ha->tgt.atio_dma = 0; } static int __init qlt_parse_ini_mode(void) { if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; else return false; return true; } int __init qlt_init(void) { int ret; BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); if (!qlt_parse_ini_mode()) { ql_log(ql_log_fatal, NULL, 0xe06b, "qlt_parse_ini_mode() failed\n"); return -EINVAL; } if (!QLA_TGT_MODE_ENABLED()) return 0; qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct qla_tgt_mgmt_cmd), 0, NULL); if (!qla_tgt_mgmt_cmd_cachep) { ql_log(ql_log_fatal, NULL, 0xd04b, "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); return -ENOMEM; } qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), 0, NULL); if (!qla_tgt_plogi_cachep) { ql_log(ql_log_fatal, NULL, 0xe06d, "kmem_cache_create for qla_tgt_plogi_cachep failed\n"); ret = -ENOMEM; goto out_mgmt_cmd_cachep; } qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, mempool_free_slab, qla_tgt_mgmt_cmd_cachep); if (!qla_tgt_mgmt_cmd_mempool) { ql_log(ql_log_fatal, NULL, 0xe06e, "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); ret = -ENOMEM; goto out_plogi_cachep; } qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); if (!qla_tgt_wq) { ql_log(ql_log_fatal, NULL, 0xe06f, "alloc_workqueue for qla_tgt_wq failed\n"); ret = -ENOMEM; goto out_cmd_mempool; } /* * Return 1 to signal that initiator-mode is being disabled */ return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; out_cmd_mempool: mempool_destroy(qla_tgt_mgmt_cmd_mempool); out_plogi_cachep: kmem_cache_destroy(qla_tgt_plogi_cachep); out_mgmt_cmd_cachep: kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); return ret; } void qlt_exit(void) { if (!QLA_TGT_MODE_ENABLED()) return; destroy_workqueue(qla_tgt_wq); mempool_destroy(qla_tgt_mgmt_cmd_mempool); kmem_cache_destroy(qla_tgt_plogi_cachep); kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); }
linux-master
drivers/scsi/qla2xxx/qla_target.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include "qla_target.h" #include "qla_gbl.h" #include <linux/delay.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/t10-pi.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_bsg_fc.h> #include <scsi/scsi_eh.h> #include <scsi/fc/fc_fs.h> #include <linux/nvme-fc-driver.h> static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item); static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size); static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt); static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp); static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item) { void *pkt = &item->iocb; uint16_t pkt_size = item->size; ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d, "%s: Enter\n", __func__); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e, "-------- ELS REQ -------\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f, pkt, pkt_size); fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt, 0); } const char *const port_state_str[] = { [FCS_UNKNOWN] = "Unknown", [FCS_UNCONFIGURED] = "UNCONFIGURED", [FCS_DEVICE_DEAD] = "DEAD", [FCS_DEVICE_LOST] = "LOST", [FCS_ONLINE] = "ONLINE" }; #define SFP_DISABLE_LASER_INITIATED 0x15 /* Sub code of 8070 AEN */ #define SFP_ENABLE_LASER_INITIATED 0x16 /* Sub code of 8070 AEN */ static inline void display_Laser_info(scsi_qla_host_t *vha, u16 mb1, u16 mb2, u16 mb3) { if (mb1 == SFP_DISABLE_LASER_INITIATED) ql_log(ql_log_warn, vha, 0xf0a2, "SFP temperature (%d C) reached/exceeded the threshold (%d C). Laser is disabled.\n", mb3, mb2); if (mb1 == SFP_ENABLE_LASER_INITIATED) ql_log(ql_log_warn, vha, 0xf0a3, "SFP temperature (%d C) reached normal operating level. Laser is enabled.\n", mb3); } static void qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) { struct abts_entry_24xx *abts = (struct abts_entry_24xx *)&pkt->iocb; struct qla_hw_data *ha = vha->hw; struct els_entry_24xx *rsp_els; struct abts_entry_24xx *abts_rsp; dma_addr_t dma; uint32_t fctl; int rval; ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__); ql_log(ql_log_warn, vha, 0x0287, "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n", abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id, abts->seq_id, abts->seq_cnt); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, "-------- ABTS RCV -------\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, (uint8_t *)abts, sizeof(*abts)); rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma, GFP_KERNEL); if (!rsp_els) { ql_log(ql_log_warn, vha, 0x0287, "Failed allocate dma buffer ABTS/ELS RSP.\n"); return; } /* terminate exchange */ rsp_els->entry_type = ELS_IOCB_TYPE; rsp_els->entry_count = 1; rsp_els->nport_handle = cpu_to_le16(~0); rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG); ql_dbg(ql_dbg_init, vha, 0x0283, "Sending ELS Response to terminate exchange %#x...\n", abts->rx_xch_addr_to_abort); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, "-------- ELS RSP -------\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, (uint8_t *)rsp_els, sizeof(*rsp_els)); rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0); if (rval) { ql_log(ql_log_warn, vha, 0x0288, "%s: iocb failed to execute -> %x\n", __func__, rval); } else if (rsp_els->comp_status) { ql_log(ql_log_warn, vha, 0x0289, "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", __func__, rsp_els->comp_status, rsp_els->error_subcode_1, rsp_els->error_subcode_2); } else { ql_dbg(ql_dbg_init, vha, 0x028a, "%s: abort exchange done.\n", __func__); } /* send ABTS response */ abts_rsp = (void *)rsp_els; memset(abts_rsp, 0, sizeof(*abts_rsp)); abts_rsp->entry_type = ABTS_RSP_TYPE; abts_rsp->entry_count = 1; abts_rsp->nport_handle = abts->nport_handle; abts_rsp->vp_idx = abts->vp_idx; abts_rsp->sof_type = abts->sof_type & 0xf0; abts_rsp->rx_xch_addr = abts->rx_xch_addr; abts_rsp->d_id[0] = abts->s_id[0]; abts_rsp->d_id[1] = abts->s_id[1]; abts_rsp->d_id[2] = abts->s_id[2]; abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC; abts_rsp->s_id[0] = abts->d_id[0]; abts_rsp->s_id[1] = abts->d_id[1]; abts_rsp->s_id[2] = abts->d_id[2]; abts_rsp->cs_ctl = abts->cs_ctl; /* include flipping bit23 in fctl */ fctl = ~(abts->f_ctl[2] | 0x7F) << 16 | FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT; abts_rsp->f_ctl[0] = fctl >> 0 & 0xff; abts_rsp->f_ctl[1] = fctl >> 8 & 0xff; abts_rsp->f_ctl[2] = fctl >> 16 & 0xff; abts_rsp->type = FC_TYPE_BLD; abts_rsp->rx_id = abts->rx_id; abts_rsp->ox_id = abts->ox_id; abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0); abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; ql_dbg(ql_dbg_init, vha, 0x028b, "Sending BA ACC response to ABTS %#x...\n", abts->rx_xch_addr_to_abort); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, "-------- ELS RSP -------\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, (uint8_t *)abts_rsp, sizeof(*abts_rsp)); rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0); if (rval) { ql_log(ql_log_warn, vha, 0x028c, "%s: iocb failed to execute -> %x\n", __func__, rval); } else if (abts_rsp->comp_status) { ql_log(ql_log_warn, vha, 0x028d, "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", __func__, abts_rsp->comp_status, abts_rsp->payload.error.subcode1, abts_rsp->payload.error.subcode2); } else { ql_dbg(ql_dbg_init, vha, 0x028ea, "%s: done.\n", __func__); } dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma); } /** * __qla_consume_iocb - this routine is used to tell fw driver has processed * or consumed the head IOCB along with the continuation IOCB's from the * provided respond queue. * @vha: host adapter pointer * @pkt: pointer to current packet. On return, this pointer shall move * to the next packet. * @rsp: respond queue pointer. * * it is assumed pkt is the head iocb, not the continuation iocbk */ void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp) { struct rsp_que *rsp_q = *rsp; response_t *new_pkt; uint16_t entry_count_remaining; struct purex_entry_24xx *purex = *pkt; entry_count_remaining = purex->entry_count; while (entry_count_remaining > 0) { new_pkt = rsp_q->ring_ptr; *pkt = new_pkt; rsp_q->ring_index++; if (rsp_q->ring_index == rsp_q->length) { rsp_q->ring_index = 0; rsp_q->ring_ptr = rsp_q->ring; } else { rsp_q->ring_ptr++; } new_pkt->signature = RESPONSE_PROCESSED; /* flush signature */ wmb(); --entry_count_remaining; } } /** * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB * and save to provided buffer * @vha: host adapter pointer * @pkt: pointer Purex IOCB * @rsp: respond queue * @buf: extracted ELS payload copy here * @buf_len: buffer length */ int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len) { struct purex_entry_24xx *purex = *pkt; struct rsp_que *rsp_q = *rsp; sts_cont_entry_t *new_pkt; uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; uint16_t buffer_copy_offset = 0; uint16_t entry_count_remaining; u16 tpad; entry_count_remaining = purex->entry_count; total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) - PURX_ELS_HEADER_SIZE; /* * end of payload may not end in 4bytes boundary. Need to * round up / pad for room to swap, before saving data */ tpad = roundup(total_bytes, 4); if (buf_len < tpad) { ql_dbg(ql_dbg_async, vha, 0x5084, "%s buffer is too small %d < %d\n", __func__, buf_len, tpad); __qla_consume_iocb(vha, pkt, rsp); return -EIO; } pending_bytes = total_bytes = tpad; no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? sizeof(purex->els_frame_payload) : pending_bytes; memcpy(buf, &purex->els_frame_payload[0], no_bytes); buffer_copy_offset += no_bytes; pending_bytes -= no_bytes; --entry_count_remaining; ((response_t *)purex)->signature = RESPONSE_PROCESSED; /* flush signature */ wmb(); do { while ((total_bytes > 0) && (entry_count_remaining > 0)) { new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; *pkt = new_pkt; if (new_pkt->entry_type != STATUS_CONT_TYPE) { ql_log(ql_log_warn, vha, 0x507a, "Unexpected IOCB type, partial data 0x%x\n", buffer_copy_offset); break; } rsp_q->ring_index++; if (rsp_q->ring_index == rsp_q->length) { rsp_q->ring_index = 0; rsp_q->ring_ptr = rsp_q->ring; } else { rsp_q->ring_ptr++; } no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? sizeof(new_pkt->data) : pending_bytes; if ((buffer_copy_offset + no_bytes) <= total_bytes) { memcpy((buf + buffer_copy_offset), new_pkt->data, no_bytes); buffer_copy_offset += no_bytes; pending_bytes -= no_bytes; --entry_count_remaining; } else { ql_log(ql_log_warn, vha, 0x5044, "Attempt to copy more that we got, optimizing..%x\n", buffer_copy_offset); memcpy((buf + buffer_copy_offset), new_pkt->data, total_bytes - buffer_copy_offset); } ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; /* flush signature */ wmb(); } if (pending_bytes != 0 || entry_count_remaining != 0) { ql_log(ql_log_fatal, vha, 0x508b, "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n", total_bytes, entry_count_remaining); return -EIO; } } while (entry_count_remaining > 0); be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2); return 0; } /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla2100_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct device_reg_2xxx __iomem *reg; int status; unsigned long iter; uint16_t hccr; uint16_t mb[8]; struct rsp_que *rsp; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0x505d, "%s: NULL response queue pointer.\n", __func__); return (IRQ_NONE); } ha = rsp->hw; reg = &ha->iobase->isp; status = 0; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { hccr = rd_reg_word(&reg->hccr); if (qla2x00_check_reg16_for_disconnect(vha, hccr)) break; if (hccr & HCCR_RISC_PAUSE) { if (pci_channel_offline(ha->pdev)) break; /* * Issue a "HARD" reset in order for the RISC interrupt * bit to be cleared. Schedule a big hammer to get * out of the RISC PAUSED state. */ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC); rd_reg_word(&reg->hccr); ha->isp_ops->fw_dump(vha); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((rd_reg_word(&reg->istatus) & ISR_RISC_INT) == 0) break; if (rd_reg_word(&reg->semaphore) & BIT_0) { wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT); rd_reg_word(&reg->hccr); /* Get mailbox data. */ mb[0] = RD_MAILBOX_REG(ha, reg, 0); if (mb[0] > 0x3fff && mb[0] < 0x8000) { qla2x00_mbx_completion(vha, mb[0]); status |= MBX_INTERRUPT; } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); mb[3] = RD_MAILBOX_REG(ha, reg, 3); qla2x00_async_event(vha, rsp, mb); } else { /*EMPTY*/ ql_dbg(ql_dbg_async, vha, 0x5025, "Unrecognized interrupt type (%d).\n", mb[0]); } /* Release mailbox registers. */ wrt_reg_word(&reg->semaphore, 0); rd_reg_word(&reg->semaphore); } else { qla2x00_process_response_queue(rsp); wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT); rd_reg_word(&reg->hccr); } } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return (IRQ_HANDLED); } bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) { /* Check for PCI disconnection */ if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { qla_schedule_eeh_work(vha); } return true; } else return false; } bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) { return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); } /** * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla2300_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct device_reg_2xxx __iomem *reg; int status; unsigned long iter; uint32_t stat; uint16_t hccr; uint16_t mb[8]; struct rsp_que *rsp; struct qla_hw_data *ha; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0x5058, "%s: NULL response queue pointer.\n", __func__); return (IRQ_NONE); } ha = rsp->hw; reg = &ha->iobase->isp; status = 0; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = rd_reg_dword(&reg->u.isp2300.host_status); if (qla2x00_check_reg32_for_disconnect(vha, stat)) break; if (stat & HSR_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = rd_reg_word(&reg->hccr); if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) ql_log(ql_log_warn, vha, 0x5026, "Parity error -- HCCR=%x, Dumping " "firmware.\n", hccr); else ql_log(ql_log_warn, vha, 0x5027, "RISC paused -- HCCR=%x, Dumping " "firmware.\n", hccr); /* * Issue a "HARD" reset in order for the RISC * interrupt bit to be cleared. Schedule a big * hammer to get out of the RISC PAUSED state. */ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC); rd_reg_word(&reg->hccr); ha->isp_ops->fw_dump(vha); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((stat & HSR_RISC_INT) == 0) break; switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla2x00_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; /* Release mailbox registers. */ wrt_reg_word(&reg->semaphore, 0); break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); mb[3] = RD_MAILBOX_REG(ha, reg, 3); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla2x00_process_response_queue(rsp); break; case 0x15: mb[0] = MBA_CMPLT_1_16BIT; mb[1] = MSW(stat); qla2x00_async_event(vha, rsp, mb); break; case 0x16: mb[0] = MBA_SCSI_COMPLETION; mb[1] = MSW(stat); mb[2] = RD_MAILBOX_REG(ha, reg, 2); qla2x00_async_event(vha, rsp, mb); break; default: ql_dbg(ql_dbg_async, vha, 0x5028, "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT); rd_reg_word_relaxed(&reg->hccr); } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return (IRQ_HANDLED); } /** * qla2x00_mbx_completion() - Process mailbox command completions. * @vha: SCSI driver HA context * @mb0: Mailbox0 register */ static void qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; uint32_t mboxes; __le16 __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; /* Read all mbox registers? */ WARN_ON_ONCE(ha->mbx_count > 32); mboxes = (1ULL << ha->mbx_count) - 1; if (!ha->mcp) ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); else mboxes = ha->mcp->in_mb; /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; mboxes >>= 1; wptr = MAILBOX_REG(ha, reg, 1); for (cnt = 1; cnt < ha->mbx_count; cnt++) { if (IS_QLA2200(ha) && cnt == 8) wptr = MAILBOX_REG(ha, reg, 8); if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); else if (mboxes & BIT_0) ha->mailbox_out[cnt] = rd_reg_word(wptr); wptr++; mboxes >>= 1; } } static void qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) { static char *event[] = { "Complete", "Request Notification", "Time Extension" }; int rval; struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; __le16 __iomem *wptr; uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; /* Seed data -- mailbox1 -> mailbox7. */ if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) wptr = &reg24->mailbox1; else if (IS_QLA8044(vha->hw)) wptr = &reg82->mailbox_out[1]; else return; for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) mb[cnt] = rd_reg_word(wptr); ql_dbg(ql_dbg_async, vha, 0x5021, "Inter-Driver Communication %s -- " "%04x %04x %04x %04x %04x %04x %04x.\n", event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]); switch (aen) { /* Handle IDC Error completion case. */ case MBA_IDC_COMPLETE: if (mb[1] >> 15) { vha->hw->flags.idc_compl_status = 1; if (vha->hw->notify_dcbx_comp && !vha->vp_idx) complete(&vha->hw->dcbx_comp); } break; case MBA_IDC_NOTIFY: /* Acknowledgement needed? [Notify && non-zero timeout]. */ timeout = (descr >> 8) & 0xf; ql_dbg(ql_dbg_async, vha, 0x5022, "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout); if (!timeout) return; rval = qla2x00_post_idc_ack_work(vha, mb); if (rval != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x5023, "IDC failed to post ACK.\n"); break; case MBA_IDC_TIME_EXT: vha->hw->idc_extend_tmo = descr; ql_dbg(ql_dbg_async, vha, 0x5087, "%lu Inter-Driver Communication %s -- " "Extend timeout by=%d.\n", vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); break; } } #define LS_UNKNOWN 2 const char * qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) { static const char *const link_speeds[] = { "1", "2", "?", "4", "8", "16", "32", "64", "10" }; #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1) if (IS_QLA2100(ha) || IS_QLA2200(ha)) return link_speeds[0]; else if (speed == 0x13) return link_speeds[QLA_LAST_SPEED]; else if (speed < QLA_LAST_SPEED) return link_speeds[speed]; else return link_speeds[LS_UNKNOWN]; } static void qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) { struct qla_hw_data *ha = vha->hw; /* * 8200 AEN Interpretation: * mb[0] = AEN code * mb[1] = AEN Reason code * mb[2] = LSW of Peg-Halt Status-1 Register * mb[6] = MSW of Peg-Halt Status-1 Register * mb[3] = LSW of Peg-Halt Status-2 register * mb[7] = MSW of Peg-Halt Status-2 register * mb[4] = IDC Device-State Register value * mb[5] = IDC Driver-Presence Register value */ ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", mb[0], mb[1], mb[2], mb[6]); ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | IDC_HEARTBEAT_FAILURE)) { ha->flags.nic_core_hung = 1; ql_log(ql_log_warn, vha, 0x5060, "83XX: F/W Error Reported: Check if reset required.\n"); if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { uint32_t protocol_engine_id, fw_err_code, err_level; /* * IDC_PEG_HALT_STATUS_CHANGE interpretation: * - PEG-Halt Status-1 Register: * (LSW = mb[2], MSW = mb[6]) * Bits 0-7 = protocol-engine ID * Bits 8-28 = f/w error code * Bits 29-31 = Error-level * Error-level 0x1 = Non-Fatal error * Error-level 0x2 = Recoverable Fatal error * Error-level 0x4 = UnRecoverable Fatal error * - PEG-Halt Status-2 Register: * (LSW = mb[3], MSW = mb[7]) */ protocol_engine_id = (mb[2] & 0xff); fw_err_code = (((mb[2] & 0xff00) >> 8) | ((mb[6] & 0x1fff) << 8)); err_level = ((mb[6] & 0xe000) >> 13); ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " "Register: protocol_engine_id=0x%x " "fw_err_code=0x%x err_level=0x%x.\n", protocol_engine_id, fw_err_code, err_level); ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " "Register: 0x%x%x.\n", mb[7], mb[3]); if (err_level == ERR_LEVEL_NON_FATAL) { ql_log(ql_log_warn, vha, 0x5063, "Not a fatal error, f/w has recovered itself.\n"); } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { ql_log(ql_log_fatal, vha, 0x5064, "Recoverable Fatal error: Chip reset " "required.\n"); qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { ql_log(ql_log_fatal, vha, 0x5065, "Unrecoverable Fatal error: Set FAILED " "state, reboot required.\n"); qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_UNRECOVERABLE); } } if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { uint16_t peg_fw_state, nw_interface_link_up; uint16_t nw_interface_signal_detect, sfp_status; uint16_t htbt_counter, htbt_monitor_enable; uint16_t sfp_additional_info, sfp_multirate; uint16_t sfp_tx_fault, link_speed, dcbx_status; /* * IDC_NIC_FW_REPORTED_FAILURE interpretation: * - PEG-to-FC Status Register: * (LSW = mb[2], MSW = mb[6]) * Bits 0-7 = Peg-Firmware state * Bit 8 = N/W Interface Link-up * Bit 9 = N/W Interface signal detected * Bits 10-11 = SFP Status * SFP Status 0x0 = SFP+ transceiver not expected * SFP Status 0x1 = SFP+ transceiver not present * SFP Status 0x2 = SFP+ transceiver invalid * SFP Status 0x3 = SFP+ transceiver present and * valid * Bits 12-14 = Heartbeat Counter * Bit 15 = Heartbeat Monitor Enable * Bits 16-17 = SFP Additional Info * SFP info 0x0 = Unregocnized transceiver for * Ethernet * SFP info 0x1 = SFP+ brand validation failed * SFP info 0x2 = SFP+ speed validation failed * SFP info 0x3 = SFP+ access error * Bit 18 = SFP Multirate * Bit 19 = SFP Tx Fault * Bits 20-22 = Link Speed * Bits 23-27 = Reserved * Bits 28-30 = DCBX Status * DCBX Status 0x0 = DCBX Disabled * DCBX Status 0x1 = DCBX Enabled * DCBX Status 0x2 = DCBX Exchange error * Bit 31 = Reserved */ peg_fw_state = (mb[2] & 0x00ff); nw_interface_link_up = ((mb[2] & 0x0100) >> 8); nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); sfp_status = ((mb[2] & 0x0c00) >> 10); htbt_counter = ((mb[2] & 0x7000) >> 12); htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); sfp_additional_info = (mb[6] & 0x0003); sfp_multirate = ((mb[6] & 0x0004) >> 2); sfp_tx_fault = ((mb[6] & 0x0008) >> 3); link_speed = ((mb[6] & 0x0070) >> 4); dcbx_status = ((mb[6] & 0x7000) >> 12); ql_log(ql_log_warn, vha, 0x5066, "Peg-to-Fc Status Register:\n" "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " "nw_interface_signal_detect=0x%x" "\nsfp_statis=0x%x.\n ", peg_fw_state, nw_interface_link_up, nw_interface_signal_detect, sfp_status); ql_log(ql_log_warn, vha, 0x5067, "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", htbt_counter, htbt_monitor_enable, sfp_additional_info, sfp_multirate); ql_log(ql_log_warn, vha, 0x5068, "sfp_tx_fault=0x%x, link_state=0x%x, " "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, dcbx_status); qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); } if (mb[1] & IDC_HEARTBEAT_FAILURE) { ql_log(ql_log_warn, vha, 0x5069, "Heartbeat Failure encountered, chip reset " "required.\n"); qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); } } if (mb[1] & IDC_DEVICE_STATE_CHANGE) { ql_log(ql_log_info, vha, 0x506a, "IDC Device-State changed = 0x%x.\n", mb[4]); if (ha->flags.nic_core_reset_owner) return; qla83xx_schedule_work(vha, MBA_IDC_AEN); } } /** * qla27xx_copy_multiple_pkt() - Copy over purex/purls packets that can * span over multiple IOCBs. * @vha: SCSI driver HA context * @pkt: ELS packet * @rsp: Response queue * @is_purls: True, for Unsolicited Received FC-NVMe LS rsp IOCB * false, for Unsolicited Received ELS IOCB * @byte_order: True, to change the byte ordering of iocb payload */ struct purex_item * qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp, bool is_purls, bool byte_order) { struct purex_entry_24xx *purex = NULL; struct pt_ls4_rx_unsol *purls = NULL; struct rsp_que *rsp_q = *rsp; sts_cont_entry_t *new_pkt; uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; uint16_t buffer_copy_offset = 0, payload_size = 0; uint16_t entry_count, entry_count_remaining; struct purex_item *item; void *iocb_pkt = NULL; if (is_purls) { purls = *pkt; total_bytes = (le16_to_cpu(purls->frame_size) & 0x0FFF) - PURX_ELS_HEADER_SIZE; entry_count = entry_count_remaining = purls->entry_count; payload_size = sizeof(purls->payload); } else { purex = *pkt; total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) - PURX_ELS_HEADER_SIZE; entry_count = entry_count_remaining = purex->entry_count; payload_size = sizeof(purex->els_frame_payload); } pending_bytes = total_bytes; no_bytes = (pending_bytes > payload_size) ? payload_size : pending_bytes; ql_dbg(ql_dbg_async, vha, 0x509a, "%s LS, frame_size 0x%x, entry count %d\n", (is_purls ? "PURLS" : "FPIN"), total_bytes, entry_count); item = qla24xx_alloc_purex_item(vha, total_bytes); if (!item) return item; iocb_pkt = &item->iocb; if (is_purls) memcpy(iocb_pkt, &purls->payload[0], no_bytes); else memcpy(iocb_pkt, &purex->els_frame_payload[0], no_bytes); buffer_copy_offset += no_bytes; pending_bytes -= no_bytes; --entry_count_remaining; if (is_purls) ((response_t *)purls)->signature = RESPONSE_PROCESSED; else ((response_t *)purex)->signature = RESPONSE_PROCESSED; wmb(); do { while ((total_bytes > 0) && (entry_count_remaining > 0)) { if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) { ql_dbg(ql_dbg_async, vha, 0x5084, "Ran out of IOCBs, partial data 0x%x\n", buffer_copy_offset); cpu_relax(); continue; } new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; *pkt = new_pkt; if (new_pkt->entry_type != STATUS_CONT_TYPE) { ql_log(ql_log_warn, vha, 0x507a, "Unexpected IOCB type, partial data 0x%x\n", buffer_copy_offset); break; } rsp_q->ring_index++; if (rsp_q->ring_index == rsp_q->length) { rsp_q->ring_index = 0; rsp_q->ring_ptr = rsp_q->ring; } else { rsp_q->ring_ptr++; } no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? sizeof(new_pkt->data) : pending_bytes; if ((buffer_copy_offset + no_bytes) <= total_bytes) { memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset), new_pkt->data, no_bytes); buffer_copy_offset += no_bytes; pending_bytes -= no_bytes; --entry_count_remaining; } else { ql_log(ql_log_warn, vha, 0x5044, "Attempt to copy more that we got, optimizing..%x\n", buffer_copy_offset); memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset), new_pkt->data, total_bytes - buffer_copy_offset); } ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; wmb(); } if (pending_bytes != 0 || entry_count_remaining != 0) { ql_log(ql_log_fatal, vha, 0x508b, "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n", total_bytes, entry_count_remaining); qla24xx_free_purex_item(item); return NULL; } } while (entry_count_remaining > 0); if (byte_order) host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes); return item; } int qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) { struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; uint32_t vp_did; unsigned long flags; int ret = 0; if (!ha->num_vhosts) return ret; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { vp_did = vp->d_id.b24; if (vp_did == rscn_entry) { ret = 1; break; } } spin_unlock_irqrestore(&ha->vport_slock, flags); return ret; } fc_port_t * qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) { fc_port_t *f, *tf; f = tf = NULL; list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) if (f->loop_id == loop_id) return f; return NULL; } fc_port_t * qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) { fc_port_t *f, *tf; f = tf = NULL; list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { if (incl_deleted) return f; else if (f->deleted == 0) return f; } } return NULL; } fc_port_t * qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, u8 incl_deleted) { fc_port_t *f, *tf; f = tf = NULL; list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { if (f->d_id.b24 == id->b24) { if (incl_deleted) return f; else if (f->deleted == 0) return f; } } return NULL; } /* Shall be called only on supported adapters. */ static void qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) { struct qla_hw_data *ha = vha->hw; bool reset_isp_needed = false; ql_log(ql_log_warn, vha, 0x02f0, "MPI Heartbeat stop. MPI reset is%s needed. " "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", mb[1] & BIT_8 ? "" : " not", mb[0], mb[1], mb[2], mb[3]); if ((mb[1] & BIT_8) == 0) return; ql_log(ql_log_warn, vha, 0x02f1, "MPI Heartbeat stop. FW dump needed\n"); if (ql2xfulldump_on_mpifail) { ha->isp_ops->fw_dump(vha); reset_isp_needed = true; } ha->isp_ops->mpi_fw_dump(vha, 1); if (reset_isp_needed) { vha->hw->flags.fw_init_done = 0; set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } static struct purex_item * qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) { struct purex_item *item = NULL; uint8_t item_hdr_size = sizeof(*item); if (size > QLA_DEFAULT_PAYLOAD_SIZE) { item = kzalloc(item_hdr_size + (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC); } else { if (atomic_inc_return(&vha->default_item.in_use) == 1) { item = &vha->default_item; goto initialize_purex_header; } else { item = kzalloc(item_hdr_size, GFP_ATOMIC); } } if (!item) { ql_log(ql_log_warn, vha, 0x5092, ">> Failed allocate purex list item.\n"); return NULL; } initialize_purex_header: item->vha = vha; item->size = size; return item; } void qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt, void (*process_item)(struct scsi_qla_host *vha, struct purex_item *pkt)) { struct purex_list *list = &vha->purex_list; ulong flags; pkt->process_item = process_item; spin_lock_irqsave(&list->lock, flags); list_add_tail(&pkt->list, &list->head); spin_unlock_irqrestore(&list->lock, flags); set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); } /** * qla24xx_copy_std_pkt() - Copy over purex ELS which is * contained in a single IOCB. * purex packet. * @vha: SCSI driver HA context * @pkt: ELS packet */ static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) { struct purex_item *item; item = qla24xx_alloc_purex_item(vha, QLA_DEFAULT_PAYLOAD_SIZE); if (!item) return item; memcpy(&item->iocb, pkt, sizeof(item->iocb)); return item; } /** * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can * span over multiple IOCBs. * @vha: SCSI driver HA context * @pkt: ELS packet * @rsp: Response queue */ static struct purex_item * qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp) { struct purex_entry_24xx *purex = *pkt; struct rsp_que *rsp_q = *rsp; sts_cont_entry_t *new_pkt; uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; uint16_t buffer_copy_offset = 0; uint16_t entry_count, entry_count_remaining; struct purex_item *item; void *fpin_pkt = NULL; total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) - PURX_ELS_HEADER_SIZE; pending_bytes = total_bytes; entry_count = entry_count_remaining = purex->entry_count; no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? sizeof(purex->els_frame_payload) : pending_bytes; ql_log(ql_log_info, vha, 0x509a, "FPIN ELS, frame_size 0x%x, entry count %d\n", total_bytes, entry_count); item = qla24xx_alloc_purex_item(vha, total_bytes); if (!item) return item; fpin_pkt = &item->iocb; memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes); buffer_copy_offset += no_bytes; pending_bytes -= no_bytes; --entry_count_remaining; ((response_t *)purex)->signature = RESPONSE_PROCESSED; wmb(); do { while ((total_bytes > 0) && (entry_count_remaining > 0)) { if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) { ql_dbg(ql_dbg_async, vha, 0x5084, "Ran out of IOCBs, partial data 0x%x\n", buffer_copy_offset); cpu_relax(); continue; } new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; *pkt = new_pkt; if (new_pkt->entry_type != STATUS_CONT_TYPE) { ql_log(ql_log_warn, vha, 0x507a, "Unexpected IOCB type, partial data 0x%x\n", buffer_copy_offset); break; } rsp_q->ring_index++; if (rsp_q->ring_index == rsp_q->length) { rsp_q->ring_index = 0; rsp_q->ring_ptr = rsp_q->ring; } else { rsp_q->ring_ptr++; } no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? sizeof(new_pkt->data) : pending_bytes; if ((buffer_copy_offset + no_bytes) <= total_bytes) { memcpy(((uint8_t *)fpin_pkt + buffer_copy_offset), new_pkt->data, no_bytes); buffer_copy_offset += no_bytes; pending_bytes -= no_bytes; --entry_count_remaining; } else { ql_log(ql_log_warn, vha, 0x5044, "Attempt to copy more that we got, optimizing..%x\n", buffer_copy_offset); memcpy(((uint8_t *)fpin_pkt + buffer_copy_offset), new_pkt->data, total_bytes - buffer_copy_offset); } ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; wmb(); } if (pending_bytes != 0 || entry_count_remaining != 0) { ql_log(ql_log_fatal, vha, 0x508b, "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n", total_bytes, entry_count_remaining); qla24xx_free_purex_item(item); return NULL; } } while (entry_count_remaining > 0); host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes); return item; } /** * qla2x00_async_event() - Process aynchronous events. * @vha: SCSI driver HA context * @rsp: response queue * @mb: Mailbox registers (0 - 3) */ void qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) { uint16_t handle_cnt; uint16_t cnt, mbx; uint32_t handles[5]; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; uint32_t rscn_entry, host_pid; unsigned long flags; fc_port_t *fcport = NULL; if (!vha->hw->flags.fw_started) { ql_log(ql_log_warn, vha, 0x50ff, "Dropping AEN - %04x %04x %04x %04x.\n", mb[0], mb[1], mb[2], mb[3]); return; } /* Setup to process RIO completion. */ handle_cnt = 0; if (IS_CNA_CAPABLE(ha)) goto skip_rio; switch (mb[0]) { case MBA_SCSI_COMPLETION: handles[0] = make_handle(mb[2], mb[1]); handle_cnt = 1; break; case MBA_CMPLT_1_16BIT: handles[0] = mb[1]; handle_cnt = 1; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_2_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handle_cnt = 2; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_3_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handles[2] = mb[3]; handle_cnt = 3; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_4_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handles[2] = mb[3]; handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); handle_cnt = 4; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_5_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handles[2] = mb[3]; handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); handle_cnt = 5; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_2_32BIT: handles[0] = make_handle(mb[2], mb[1]); handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7), RD_MAILBOX_REG(ha, reg, 6)); handle_cnt = 2; mb[0] = MBA_SCSI_COMPLETION; break; default: break; } skip_rio: switch (mb[0]) { case MBA_SCSI_COMPLETION: /* Fast Post */ if (!vha->flags.online) break; for (cnt = 0; cnt < handle_cnt; cnt++) qla2x00_process_completed_request(vha, rsp->req, handles[cnt]); break; case MBA_RESET: /* Reset */ ql_dbg(ql_dbg_async, vha, 0x5002, "Asynchronous RESET.\n"); set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); break; case MBA_SYSTEM_ERR: /* System Error */ mbx = 0; vha->hw_err_cnt++; if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { u16 m[4]; m[0] = rd_reg_word(&reg24->mailbox4); m[1] = rd_reg_word(&reg24->mailbox5); m[2] = rd_reg_word(&reg24->mailbox6); mbx = m[3] = rd_reg_word(&reg24->mailbox7); ql_log(ql_log_warn, vha, 0x5003, "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]); } else ql_log(ql_log_warn, vha, 0x5003, "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ", mb[1], mb[2], mb[3]); if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && rd_reg_word(&reg24->mailbox7) & BIT_8) ha->isp_ops->mpi_fw_dump(vha, 1); ha->isp_ops->fw_dump(vha); ha->flags.fw_init_done = 0; QLA_FW_STOPPED(ha); if (IS_FWI2_CAPABLE(ha)) { if (mb[1] == 0 && mb[2] == 0) { ql_log(ql_log_fatal, vha, 0x5004, "Unrecoverable Hardware Error: adapter " "marked OFFLINE!\n"); vha->flags.online = 0; vha->device_flags |= DFLG_DEV_FAILED; } else { /* Check to see if MPI timeout occurred */ if ((mbx & MBX_3) && (ha->port_no == 0)) set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } } else if (mb[1] == 0) { ql_log(ql_log_fatal, vha, 0x5005, "Unrecoverable Hardware Error: adapter marked " "OFFLINE!\n"); vha->flags.online = 0; vha->device_flags |= DFLG_DEV_FAILED; } else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ ql_log(ql_log_warn, vha, 0x5006, "ISP Request Transfer Error (%x).\n", mb[1]); vha->hw_err_cnt++; set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ ql_log(ql_log_warn, vha, 0x5007, "ISP Response Transfer Error (%x).\n", mb[1]); vha->hw_err_cnt++; set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ ql_dbg(ql_dbg_async, vha, 0x5008, "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); break; case MBA_LOOP_INIT_ERR: ql_log(ql_log_warn, vha, 0x5090, "LOOP INIT ERROR (%x).\n", mb[1]); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ ha->flags.lip_ae = 1; ql_dbg(ql_dbg_async, vha, 0x5009, "LIP occurred (%x).\n", mb[1]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); break; case MBA_LOOP_UP: /* Loop Up Event */ if (IS_QLA2100(ha) || IS_QLA2200(ha)) ha->link_data_rate = PORT_SPEED_1GB; else ha->link_data_rate = mb[1]; ql_log(ql_log_info, vha, 0x500a, "LOOP UP detected (%s Gbps).\n", qla2x00_get_link_speed_str(ha, ha->link_data_rate)); if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (mb[2] & BIT_0) ql_log(ql_log_info, vha, 0x11a0, "FEC=enabled (link up).\n"); } vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); if (vha->link_down_time < vha->hw->port_down_retry_count) { vha->short_link_down_cnt++; vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; } break; case MBA_LOOP_DOWN: /* Loop Down Event */ SAVE_TOPO(ha); ha->flags.lip_ae = 0; ha->current_topology = 0; vha->link_down_time = 0; mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) ? rd_reg_word(&reg24->mailbox4) : 0; mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(&reg82->mailbox_out[4]) : mbx; ql_log(ql_log_info, vha, 0x500b, "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3], mbx); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); /* * In case of loop down, restore WWPN from * NVRAM in case of FA-WWPN capable ISP * Restore for Physical Port only */ if (!vha->vp_idx) { if (ha->flags.fawwpn_enabled && (ha->current_topology == ISP_CFG_F)) { memcpy(vha->port_name, ha->port_name, WWN_SIZE); fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x00d8, "LOOP DOWN detected," "restore WWPN %016llx\n", wwn_to_u64(vha->port_name)); } clear_bit(VP_CONFIG_OK, &vha->vp_flags); } vha->device_flags |= DFLG_NO_CABLE; qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } vha->flags.management_server_logged_in = 0; ha->link_data_rate = PORT_SPEED_UNKNOWN; qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); break; case MBA_LIP_RESET: /* LIP reset occurred */ ql_dbg(ql_dbg_async, vha, 0x500c, "LIP reset occurred (%x).\n", mb[1]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); ha->operating_mode = LOOP; vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); break; /* case MBA_DCBX_COMPLETE: */ case MBA_POINT_TO_POINT: /* Point-to-Point */ ha->flags.lip_ae = 0; if (IS_QLA2100(ha)) break; if (IS_CNA_CAPABLE(ha)) { ql_dbg(ql_dbg_async, vha, 0x500d, "DCBX Completed -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); if (ha->notify_dcbx_comp && !vha->vp_idx) complete(&ha->dcbx_comp); } else ql_dbg(ql_dbg_async, vha, 0x500e, "Asynchronous P2P MODE received.\n"); /* * Until there's a transition from loop down to loop up, treat * this as loop down only. */ if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); if (!N2N_TOPO(ha)) qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); vha->flags.management_server_logged_in = 0; break; case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ if (IS_QLA2100(ha)) break; ql_dbg(ql_dbg_async, vha, 0x500f, "Configuration change detected: value=%x.\n", mb[1]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); break; case MBA_PORT_UPDATE: /* Port database update */ /* * Handle only global and vn-port update events * * Relevant inputs: * mb[1] = N_Port handle of changed port * OR 0xffff for global event * mb[2] = New login state * 7 = Port logged out * mb[3] = LSB is vp_idx, 0xff = all vps * * Skip processing if: * Event is global, vp_idx is NOT all vps, * vp_idx does not match * Event is not global, vp_idx does not match */ if (IS_QLA2XXX_MIDTYPE(ha) && ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) break; if (mb[2] == 0x7) { ql_dbg(ql_dbg_async, vha, 0x5010, "Port %s %04x %04x %04x.\n", mb[1] == 0xffff ? "unavailable" : "logout", mb[1], mb[2], mb[3]); if (mb[1] == 0xffff) goto global_port_update; if (mb[1] == NPH_SNS_LID(ha)) { set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); break; } /* use handle_cnt for loop id/nport handle */ if (IS_FWI2_CAPABLE(ha)) handle_cnt = NPH_SNS; else handle_cnt = SIMPLE_NAME_SERVER; if (mb[1] == handle_cnt) { set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); break; } /* Port logout */ fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); if (!fcport) break; if (atomic_read(&fcport->state) != FCS_ONLINE) break; ql_dbg(ql_dbg_async, vha, 0x508a, "Marking port lost loopid=%04x portid=%06x.\n", fcport->loop_id, fcport->d_id.b24); if (qla_ini_mode_enabled(vha)) { fcport->logout_on_delete = 0; qlt_schedule_sess_for_deletion(fcport); } break; global_port_update: if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); vha->device_flags |= DFLG_NO_CABLE; qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); qla2x00_mark_all_devices_lost(vha); } vha->flags.management_server_logged_in = 0; ha->link_data_rate = PORT_SPEED_UNKNOWN; break; } /* * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET * event etc. earlier indicating loop is down) then process * it. Otherwise ignore it and Wait for RSCN to come in. */ atomic_set(&vha->loop_down_timer, 0); if (atomic_read(&vha->loop_state) != LOOP_DOWN && !ha->flags.n2n_ae && atomic_read(&vha->loop_state) != LOOP_DEAD) { ql_dbg(ql_dbg_async, vha, 0x5011, "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", mb[1], mb[2], mb[3]); break; } ql_dbg(ql_dbg_async, vha, 0x5012, "Port database changed %04x %04x %04x.\n", mb[1], mb[2], mb[3]); /* * Mark all devices as missing so we will login again. */ atomic_set(&vha->loop_state, LOOP_UP); vha->scan.scan_retry = 0; set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(VP_CONFIG_OK, &vha->vp_flags); break; case MBA_RSCN_UPDATE: /* State Change Registration */ /* Check if the Vport has issued a SCR */ if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) break; /* Only handle SCNs for our Vport index. */ if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) break; ql_log(ql_log_warn, vha, 0x5013, "RSCN database changed -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) | vha->d_id.b.al_pa; if (rscn_entry == host_pid) { ql_dbg(ql_dbg_async, vha, 0x5014, "Ignoring RSCN update to local host " "port ID (%06x).\n", host_pid); break; } /* Ignore reserved bits from RSCN-payload. */ rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; /* Skip RSCNs for virtual ports on the same physical port */ if (qla2x00_is_a_vp_did(vha, rscn_entry)) break; atomic_set(&vha->loop_down_timer, 0); vha->flags.management_server_logged_in = 0; { struct event_arg ea; memset(&ea, 0, sizeof(ea)); ea.id.b24 = rscn_entry; ea.id.b.rsvd_1 = rscn_entry >> 24; qla2x00_handle_rscn(vha, &ea); qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); } break; case MBA_CONGN_NOTI_RECV: if (!ha->flags.scm_enabled || mb[1] != QLA_CON_PRIMITIVE_RECEIVED) break; if (mb[2] == QLA_CONGESTION_ARB_WARNING) { ql_dbg(ql_dbg_async, vha, 0x509b, "Congestion Warning %04x %04x.\n", mb[1], mb[2]); } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) { ql_log(ql_log_warn, vha, 0x509b, "Congestion Alarm %04x %04x.\n", mb[1], mb[2]); } break; /* case MBA_RIO_RESPONSE: */ case MBA_ZIO_RESPONSE: ql_dbg(ql_dbg_async, vha, 0x5015, "[R|Z]IO update completion.\n"); if (IS_FWI2_CAPABLE(ha)) qla24xx_process_response_queue(vha, rsp); else qla2x00_process_response_queue(rsp); break; case MBA_DISCARD_RND_FRAME: ql_dbg(ql_dbg_async, vha, 0x5016, "Discard RND Frame -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); vha->interface_err_cnt++; break; case MBA_TRACE_NOTIFICATION: ql_dbg(ql_dbg_async, vha, 0x5017, "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); break; case MBA_ISP84XX_ALERT: ql_dbg(ql_dbg_async, vha, 0x5018, "ISP84XX Alert Notification -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); spin_lock_irqsave(&ha->cs84xx->access_lock, flags); switch (mb[1]) { case A84_PANIC_RECOVERY: ql_log(ql_log_info, vha, 0x5019, "Alert 84XX: panic recovery %04x %04x.\n", mb[2], mb[3]); break; case A84_OP_LOGIN_COMPLETE: ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; ql_log(ql_log_info, vha, 0x501a, "Alert 84XX: firmware version %x.\n", ha->cs84xx->op_fw_version); break; case A84_DIAG_LOGIN_COMPLETE: ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; ql_log(ql_log_info, vha, 0x501b, "Alert 84XX: diagnostic firmware version %x.\n", ha->cs84xx->diag_fw_version); break; case A84_GOLD_LOGIN_COMPLETE: ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; ha->cs84xx->fw_update = 1; ql_log(ql_log_info, vha, 0x501c, "Alert 84XX: gold firmware version %x.\n", ha->cs84xx->gold_fw_version); break; default: ql_log(ql_log_warn, vha, 0x501d, "Alert 84xx: Invalid Alert %04x %04x %04x.\n", mb[1], mb[2], mb[3]); } spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); break; case MBA_DCBX_START: ql_dbg(ql_dbg_async, vha, 0x501e, "DCBX Started -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); break; case MBA_DCBX_PARAM_UPDATE: ql_dbg(ql_dbg_async, vha, 0x501f, "DCBX Parameters Updated -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); break; case MBA_FCF_CONF_ERR: ql_dbg(ql_dbg_async, vha, 0x5020, "FCF Configuration Error -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); break; case MBA_IDC_NOTIFY: if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { mb[4] = rd_reg_word(&reg24->mailbox4); if (((mb[2] & 0x7fff) == MBC_PORT_RESET || (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); /* * Extend loop down timer since port is active. */ if (atomic_read(&vha->loop_state) == LOOP_DOWN) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2xxx_wake_dpc(vha); } } fallthrough; case MBA_IDC_COMPLETE: if (ha->notify_lb_portup_comp && !vha->vp_idx) complete(&ha->lb_portup_comp); fallthrough; case MBA_IDC_TIME_EXT: if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) qla81xx_idc_event(vha, mb[0], mb[1]); break; case MBA_IDC_AEN: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { vha->hw_err_cnt++; qla27xx_handle_8200_aen(vha, mb); } else if (IS_QLA83XX(ha)) { mb[4] = rd_reg_word(&reg24->mailbox4); mb[5] = rd_reg_word(&reg24->mailbox5); mb[6] = rd_reg_word(&reg24->mailbox6); mb[7] = rd_reg_word(&reg24->mailbox7); qla83xx_handle_8200_aen(vha, mb); } else { ql_dbg(ql_dbg_async, vha, 0x5052, "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n", mb[0], mb[1], mb[2], mb[3]); } break; case MBA_DPORT_DIAGNOSTICS: if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR || (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR) vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS; ql_dbg(ql_dbg_async, vha, 0x5052, "D-Port Diagnostics: %04x %04x %04x %04x\n", mb[0], mb[1], mb[2], mb[3]); memcpy(vha->dport_data, mb, sizeof(vha->dport_data)); if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { static char *results[] = { "start", "done(pass)", "done(error)", "undefined" }; static char *types[] = { "none", "dynamic", "static", "other" }; uint result = mb[1] >> 0 & 0x3; uint type = mb[1] >> 6 & 0x3; uint sw = mb[1] >> 15 & 0x1; ql_dbg(ql_dbg_async, vha, 0x5052, "D-Port Diagnostics: result=%s type=%s [sw=%u]\n", results[result], types[type], sw); if (result == 2) { static char *reasons[] = { "reserved", "unexpected reject", "unexpected phase", "retry exceeded", "timed out", "not supported", "user stopped" }; uint reason = mb[2] >> 0 & 0xf; uint phase = mb[2] >> 12 & 0xf; ql_dbg(ql_dbg_async, vha, 0x5052, "D-Port Diagnostics: reason=%s phase=%u \n", reason < 7 ? reasons[reason] : "other", phase >> 1); } } break; case MBA_TEMPERATURE_ALERT: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) display_Laser_info(vha, mb[1], mb[2], mb[3]); ql_dbg(ql_dbg_async, vha, 0x505e, "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); break; case MBA_TRANS_INSERT: ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Insertion: %04x\n", mb[1]); set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); break; case MBA_TRANS_REMOVE: ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n"); break; default: ql_dbg(ql_dbg_async, vha, 0x5057, "Unknown AEN:%04x %04x %04x %04x\n", mb[0], mb[1], mb[2], mb[3]); } qlt_async_event(mb[0], vha, mb); if (!vha->vp_idx && ha->num_vhosts) qla2x00_alert_all_vps(rsp, mb); } /** * qla2x00_process_completed_request() - Process a Fast Post response. * @vha: SCSI driver HA context * @req: request queue * @index: SRB index */ void qla2x00_process_completed_request(struct scsi_qla_host *vha, struct req_que *req, uint32_t index) { srb_t *sp; struct qla_hw_data *ha = vha->hw; /* Validate handle. */ if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x3014, "Invalid SCSI command index (%x).\n", index); if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); return; } sp = req->outstanding_cmds[index]; if (sp) { /* Free outstanding command slot. */ req->outstanding_cmds[index] = NULL; /* Save ISP completion status */ sp->done(sp, DID_OK << 16); } else { ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } } static srb_t * qla_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, struct req_que *req, void *iocb, u16 *ret_index) { struct qla_hw_data *ha = vha->hw; sts_entry_t *pkt = iocb; srb_t *sp; uint16_t index; if (pkt->handle == QLA_SKIP_HANDLE) return NULL; index = LSW(pkt->handle); if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x5031, "%s: Invalid command index (%x) type %8ph.\n", func, index, iocb); if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); return NULL; } sp = req->outstanding_cmds[index]; if (!sp) { ql_log(ql_log_warn, vha, 0x5032, "%s: Invalid completion handle (%x) -- timed-out.\n", func, index); return NULL; } if (sp->handle != index) { ql_log(ql_log_warn, vha, 0x5033, "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle, index); return NULL; } *ret_index = index; qla_put_fw_resources(sp->qpair, &sp->iores); return sp; } srb_t * qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, struct req_que *req, void *iocb) { uint16_t index; srb_t *sp; sp = qla_get_sp_from_handle(vha, func, req, iocb, &index); if (sp) req->outstanding_cmds[index] = NULL; return sp; } static void qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct mbx_entry *mbx) { const char func[] = "MBX-IOCB"; const char *type; fc_port_t *fcport; srb_t *sp; struct srb_iocb *lio; uint16_t *data; uint16_t status; sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); if (!sp) return; lio = &sp->u.iocb_cmd; type = sp->name; fcport = sp->fcport; data = lio->u.logio.data; data[0] = MBS_COMMAND_ERROR; data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; if (mbx->entry_status) { ql_dbg(ql_dbg_async, vha, 0x5043, "Async-%s error entry - hdl=%x portid=%02x%02x%02x " "entry-status=%x status=%x state-flag=%x " "status-flags=%x.\n", type, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, mbx->entry_status, le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), le16_to_cpu(mbx->status_flags)); ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, mbx, sizeof(*mbx)); goto logio_done; } status = le16_to_cpu(mbx->status); if (status == 0x30 && sp->type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) status = 0; if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_async, vha, 0x5045, "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", type, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)); data[0] = MBS_COMMAND_COMPLETE; if (sp->type == SRB_LOGIN_CMD) { fcport->port_type = FCT_TARGET; if (le16_to_cpu(mbx->mb1) & BIT_0) fcport->port_type = FCT_INITIATOR; else if (le16_to_cpu(mbx->mb1) & BIT_1) fcport->flags |= FCF_FCP2_DEVICE; } goto logio_done; } data[0] = le16_to_cpu(mbx->mb0); switch (data[0]) { case MBS_PORT_ID_USED: data[1] = le16_to_cpu(mbx->mb1); break; case MBS_LOOP_ID_USED: break; default: data[0] = MBS_COMMAND_ERROR; break; } ql_log(ql_log_warn, vha, 0x5046, "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), le16_to_cpu(mbx->mb7)); logio_done: sp->done(sp, 0); } static void qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct mbx_24xx_entry *pkt) { const char func[] = "MBX-IOCB2"; struct qla_hw_data *ha = vha->hw; srb_t *sp; struct srb_iocb *si; u16 sz, i; int res; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; if (sp->type == SRB_SCSI_CMD || sp->type == SRB_NVME_CMD || sp->type == SRB_TM_CMD) { ql_log(ql_log_warn, vha, 0x509d, "Inconsistent event entry type %d\n", sp->type); if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); return; } si = &sp->u.iocb_cmd; sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); for (i = 0; i < sz; i++) si->u.mbx.in_mb[i] = pkt->mb[i]; res = (si->u.mbx.in_mb[0] & MBS_MASK); sp->done(sp, res); } static void qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct nack_to_isp *pkt) { const char func[] = "nack"; srb_t *sp; int res = 0; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) res = QLA_FUNCTION_FAILED; sp->done(sp, res); } static void qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, sts_entry_t *pkt, int iocb_type) { const char func[] = "CT_IOCB"; const char *type; srb_t *sp; struct bsg_job *bsg_job; struct fc_bsg_reply *bsg_reply; uint16_t comp_status; int res = 0; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; switch (sp->type) { case SRB_CT_CMD: bsg_job = sp->u.bsg_job; bsg_reply = bsg_job->reply; type = "ct pass-through"; comp_status = le16_to_cpu(pkt->comp_status); /* * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT * fc payload to the caller */ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply); if (comp_status != CS_COMPLETE) { if (comp_status == CS_DATA_UNDERRUN) { res = DID_OK << 16; bsg_reply->reply_payload_rcv_len = le16_to_cpu(pkt->rsp_info_len); ql_log(ql_log_warn, vha, 0x5048, "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", type, comp_status, bsg_reply->reply_payload_rcv_len); } else { ql_log(ql_log_warn, vha, 0x5049, "CT pass-through-%s error comp_status=0x%x.\n", type, comp_status); res = DID_ERROR << 16; bsg_reply->reply_payload_rcv_len = 0; } ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, pkt, sizeof(*pkt)); } else { res = DID_OK << 16; bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; bsg_job->reply_len = 0; } break; case SRB_CT_PTHRU_CMD: /* * borrowing sts_entry_24xx.comp_status. * same location as ct_entry_24xx.comp_status */ res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, sp->name); break; } sp->done(sp, res); } static void qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req, struct sts_entry_24xx *pkt, int iocb_type) { struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt; const char func[] = "ELS_CT_IOCB"; const char *type; srb_t *sp; struct bsg_job *bsg_job; struct fc_bsg_reply *bsg_reply; uint16_t comp_status; uint32_t fw_status[3]; int res, logit = 1; struct srb_iocb *els; uint n; scsi_qla_host_t *vha; struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt; sp = qla2x00_get_sp_from_handle(v, func, req, pkt); if (!sp) return; bsg_job = sp->u.bsg_job; vha = sp->vha; type = NULL; comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1); fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2); switch (sp->type) { case SRB_ELS_CMD_RPT: case SRB_ELS_CMD_HST: type = "rpt hst"; break; case SRB_ELS_CMD_HST_NOLOGIN: type = "els"; { struct els_entry_24xx *els = (void *)pkt; struct qla_bsg_auth_els_request *p = (struct qla_bsg_auth_els_request *)bsg_job->request; ql_dbg(ql_dbg_user, vha, 0x700f, "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n", __func__, sc_to_str(p->e.sub_cmd), e->d_id[2], e->d_id[1], e->d_id[0], comp_status, p->e.extra_rx_xchg_address, bsg_job); if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) { if (sp->remap.remapped) { n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, sp->remap.rsp.buf, sp->remap.rsp.len); ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e, "%s: SG copied %x of %x\n", __func__, n, sp->remap.rsp.len); } else { ql_dbg(ql_dbg_user, vha, 0x700f, "%s: NOT REMAPPED (error)...!!!\n", __func__); } } } break; case SRB_CT_CMD: type = "ct pass-through"; break; case SRB_ELS_DCMD: type = "Driver ELS logo"; if (iocb_type != ELS_IOCB_TYPE) { ql_dbg(ql_dbg_user, vha, 0x5047, "Completing %s: (%p) type=%d.\n", type, sp, sp->type); sp->done(sp, 0); return; } break; case SRB_CT_PTHRU_CMD: /* borrowing sts_entry_24xx.comp_status. same location as ct_entry_24xx.comp_status */ res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, sp->name); sp->done(sp, res); return; default: ql_dbg(ql_dbg_user, vha, 0x503e, "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); return; } if (iocb_type == ELS_IOCB_TYPE) { els = &sp->u.iocb_cmd; els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]); els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]); els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]); els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]); if (comp_status == CS_COMPLETE) { res = DID_OK << 16; } else { if (comp_status == CS_DATA_UNDERRUN) { res = DID_OK << 16; els->u.els_plogi.len = cpu_to_le16(le32_to_cpu( ese->total_byte_count)); if (sp->remap.remapped && ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) { ql_dbg(ql_dbg_user, vha, 0x503f, "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x", __func__, e->s_id[0], e->s_id[2], e->s_id[1], e->d_id[2], e->d_id[1], e->d_id[0]); logit = 0; } } else if (comp_status == CS_PORT_LOGGED_OUT) { ql_dbg(ql_dbg_disc, vha, 0x911e, "%s %d schedule session deletion\n", __func__, __LINE__); els->u.els_plogi.len = 0; res = DID_IMM_RETRY << 16; qlt_schedule_sess_for_deletion(sp->fcport); } else { els->u.els_plogi.len = 0; res = DID_ERROR << 16; } if (sp->remap.remapped && ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) { if (logit) { ql_dbg(ql_dbg_user, vha, 0x503f, "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n", type, sp->handle, comp_status); ql_dbg(ql_dbg_user, vha, 0x503f, "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n", fw_status[1], fw_status[2], le32_to_cpu(((struct els_sts_entry_24xx *) pkt)->total_byte_count), e->s_id[0], e->s_id[2], e->s_id[1], e->d_id[2], e->d_id[1], e->d_id[0]); } if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE && sp->type == SRB_ELS_CMD_HST_NOLOGIN) { ql_dbg(ql_dbg_edif, vha, 0x911e, "%s rcv reject. Sched delete\n", __func__); qlt_schedule_sess_for_deletion(sp->fcport); } } else if (logit) { ql_log(ql_log_info, vha, 0x503f, "%s IOCB Done hdl=%x comp_status=0x%x\n", type, sp->handle, comp_status); ql_log(ql_log_info, vha, 0x503f, "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n", fw_status[1], fw_status[2], le32_to_cpu(((struct els_sts_entry_24xx *) pkt)->total_byte_count), e->s_id[0], e->s_id[2], e->s_id[1], e->d_id[2], e->d_id[1], e->d_id[0]); } } goto els_ct_done; } /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT * fc payload to the caller */ bsg_job = sp->u.bsg_job; bsg_reply = bsg_job->reply; bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); if (comp_status != CS_COMPLETE) { if (comp_status == CS_DATA_UNDERRUN) { res = DID_OK << 16; bsg_reply->reply_payload_rcv_len = le32_to_cpu(ese->total_byte_count); ql_dbg(ql_dbg_user, vha, 0x503f, "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", type, sp->handle, comp_status, fw_status[1], fw_status[2], le32_to_cpu(ese->total_byte_count)); } else { ql_dbg(ql_dbg_user, vha, 0x5040, "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " "error subcode 1=0x%x error subcode 2=0x%x.\n", type, sp->handle, comp_status, le32_to_cpu(ese->error_subcode_1), le32_to_cpu(ese->error_subcode_2)); res = DID_ERROR << 16; bsg_reply->reply_payload_rcv_len = 0; } memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), fw_status, sizeof(fw_status)); ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, pkt, sizeof(*pkt)); } else { res = DID_OK << 16; bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; bsg_job->reply_len = 0; } els_ct_done: sp->done(sp, res); } static void qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, struct logio_entry_24xx *logio) { const char func[] = "LOGIO-IOCB"; const char *type; fc_port_t *fcport; srb_t *sp; struct srb_iocb *lio; uint16_t *data; uint32_t iop[2]; int logit = 1; sp = qla2x00_get_sp_from_handle(vha, func, req, logio); if (!sp) return; lio = &sp->u.iocb_cmd; type = sp->name; fcport = sp->fcport; data = lio->u.logio.data; data[0] = MBS_COMMAND_ERROR; data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; if (logio->entry_status) { ql_log(ql_log_warn, fcport->vha, 0x5034, "Async-%s error entry - %8phC hdl=%x" "portid=%02x%02x%02x entry-status=%x.\n", type, fcport->port_name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, logio->entry_status); ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, logio, sizeof(*logio)); goto logio_done; } if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { ql_dbg(ql_dbg_async, sp->vha, 0x5036, "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n", type, sp->handle, fcport->d_id.b24, fcport->port_name, le32_to_cpu(logio->io_parameter[0])); vha->hw->exch_starvation = 0; data[0] = MBS_COMMAND_COMPLETE; if (sp->type == SRB_PRLI_CMD) { lio->u.logio.iop[0] = le32_to_cpu(logio->io_parameter[0]); lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[1]); goto logio_done; } if (sp->type != SRB_LOGIN_CMD) goto logio_done; lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]); if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP) fcport->flags |= FCF_FCSP_DEVICE; iop[0] = le32_to_cpu(logio->io_parameter[0]); if (iop[0] & BIT_4) { fcport->port_type = FCT_TARGET; if (iop[0] & BIT_8) fcport->flags |= FCF_FCP2_DEVICE; } else if (iop[0] & BIT_5) fcport->port_type = FCT_INITIATOR; if (iop[0] & BIT_7) fcport->flags |= FCF_CONF_COMP_SUPPORTED; if (logio->io_parameter[7] || logio->io_parameter[8]) fcport->supported_classes |= FC_COS_CLASS2; if (logio->io_parameter[9] || logio->io_parameter[10]) fcport->supported_classes |= FC_COS_CLASS3; goto logio_done; } iop[0] = le32_to_cpu(logio->io_parameter[0]); iop[1] = le32_to_cpu(logio->io_parameter[1]); lio->u.logio.iop[0] = iop[0]; lio->u.logio.iop[1] = iop[1]; switch (iop[0]) { case LSC_SCODE_PORTID_USED: data[0] = MBS_PORT_ID_USED; data[1] = LSW(iop[1]); logit = 0; break; case LSC_SCODE_NPORT_USED: data[0] = MBS_LOOP_ID_USED; logit = 0; break; case LSC_SCODE_CMD_FAILED: if (iop[1] == 0x0606) { /* * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, * Target side acked. */ data[0] = MBS_COMMAND_COMPLETE; goto logio_done; } data[0] = MBS_COMMAND_ERROR; break; case LSC_SCODE_NOXCB: vha->hw->exch_starvation++; if (vha->hw->exch_starvation > 5) { ql_log(ql_log_warn, vha, 0xd046, "Exchange starvation. Resetting RISC\n"); vha->hw->exch_starvation = 0; if (IS_P3P_TYPE(vha->hw)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } fallthrough; default: data[0] = MBS_COMMAND_ERROR; break; } if (logit) ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: " "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", type, sp->handle, fcport->d_id.b24, fcport->port_name, le16_to_cpu(logio->comp_status), le32_to_cpu(logio->io_parameter[0]), le32_to_cpu(logio->io_parameter[1])); else ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: " "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", type, sp->handle, fcport->d_id.b24, fcport->port_name, le16_to_cpu(logio->comp_status), le32_to_cpu(logio->io_parameter[0]), le32_to_cpu(logio->io_parameter[1])); logio_done: sp->done(sp, 0); } static void qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) { const char func[] = "TMF-IOCB"; const char *type; fc_port_t *fcport; srb_t *sp; struct srb_iocb *iocb; struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; u16 comp_status; sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); if (!sp) return; comp_status = le16_to_cpu(sts->comp_status); iocb = &sp->u.iocb_cmd; type = sp->name; fcport = sp->fcport; iocb->u.tmf.data = QLA_SUCCESS; if (sts->entry_status) { ql_log(ql_log_warn, fcport->vha, 0x5038, "Async-%s error - hdl=%x entry-status(%x).\n", type, sp->handle, sts->entry_status); iocb->u.tmf.data = QLA_FUNCTION_FAILED; } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { ql_log(ql_log_warn, fcport->vha, 0x5039, "Async-%s error - hdl=%x completion status(%x).\n", type, sp->handle, comp_status); iocb->u.tmf.data = QLA_FUNCTION_FAILED; } else if ((le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID)) { host_to_fcp_swap(sts->data, sizeof(sts->data)); if (le32_to_cpu(sts->rsp_data_len) < 4) { ql_log(ql_log_warn, fcport->vha, 0x503b, "Async-%s error - hdl=%x not enough response(%d).\n", type, sp->handle, sts->rsp_data_len); } else if (sts->data[3]) { ql_log(ql_log_warn, fcport->vha, 0x503c, "Async-%s error - hdl=%x response(%x).\n", type, sp->handle, sts->data[3]); iocb->u.tmf.data = QLA_FUNCTION_FAILED; } } switch (comp_status) { case CS_PORT_LOGGED_OUT: case CS_PORT_CONFIG_CHG: case CS_PORT_BUSY: case CS_INCOMPLETE: case CS_PORT_UNAVAILABLE: case CS_RESET: if (atomic_read(&fcport->state) == FCS_ONLINE) { ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n", fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, port_state_str[FCS_ONLINE], comp_status); qlt_schedule_sess_for_deletion(fcport); } break; default: break; } if (iocb->u.tmf.data != QLA_SUCCESS) ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055, sts, sizeof(*sts)); sp->done(sp, 0); } static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk, srb_t *sp) { fc_port_t *fcport; struct srb_iocb *iocb; struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; uint16_t state_flags; struct nvmefc_fcp_req *fd; uint16_t ret = QLA_SUCCESS; __le16 comp_status = sts->comp_status; int logit = 0; iocb = &sp->u.iocb_cmd; fcport = sp->fcport; iocb->u.nvme.comp_status = comp_status; state_flags = le16_to_cpu(sts->state_flags); fd = iocb->u.nvme.desc; if (unlikely(iocb->u.nvme.aen_op)) atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); else sp->qpair->cmd_completion_cnt++; if (unlikely(comp_status != CS_COMPLETE)) logit = 1; fd->transferred_length = fd->payload_length - le32_to_cpu(sts->residual_len); /* * State flags: Bit 6 and 0. * If 0 is set, we don't care about 6. * both cases resp was dma'd to host buffer * if both are 0, that is good path case. * if six is set and 0 is clear, we need to * copy resp data from status iocb to resp buffer. */ if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { iocb->u.nvme.rsp_pyld_len = 0; } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == (SF_FCP_RSP_DMA | SF_NVME_ERSP)) { /* Response already DMA'd to fd->rspaddr. */ iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; } else if ((state_flags & SF_FCP_RSP_DMA)) { /* * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this * as an error. */ iocb->u.nvme.rsp_pyld_len = 0; fd->transferred_length = 0; ql_dbg(ql_dbg_io, fcport->vha, 0x307a, "Unexpected values in NVMe_RSP IU.\n"); logit = 1; } else if (state_flags & SF_NVME_ERSP) { uint32_t *inbuf, *outbuf; uint16_t iter; inbuf = (uint32_t *)&sts->nvme_ersp_data; outbuf = (uint32_t *)fd->rspaddr; iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) > sizeof(struct nvme_fc_ersp_iu))) { if (ql_mask_match(ql_dbg_io)) { WARN_ONCE(1, "Unexpected response payload length %u.\n", iocb->u.nvme.rsp_pyld_len); ql_log(ql_log_warn, fcport->vha, 0x5100, "Unexpected response payload length %u.\n", iocb->u.nvme.rsp_pyld_len); } iocb->u.nvme.rsp_pyld_len = cpu_to_le16(sizeof(struct nvme_fc_ersp_iu)); } iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2; for (; iter; iter--) *outbuf++ = swab32(*inbuf++); } if (state_flags & SF_NVME_ERSP) { struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr; u32 tgt_xfer_len; tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len); if (fd->transferred_length != tgt_xfer_len) { ql_log(ql_log_warn, fcport->vha, 0x3079, "Dropped frame(s) detected (sent/rcvd=%u/%u).\n", tgt_xfer_len, fd->transferred_length); logit = 1; } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) { /* * Do not log if this is just an underflow and there * is no data loss. */ logit = 0; } } if (unlikely(logit)) ql_dbg(ql_dbg_io, fcport->vha, 0x5060, "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", sp->name, sp->handle, comp_status, fd->transferred_length, le32_to_cpu(sts->residual_len), sts->ox_id); /* * If transport error then Failure (HBA rejects request) * otherwise transport will handle. */ switch (le16_to_cpu(comp_status)) { case CS_COMPLETE: break; case CS_RESET: case CS_PORT_UNAVAILABLE: case CS_PORT_LOGGED_OUT: fcport->nvme_flag |= NVME_FLAG_RESETTING; if (atomic_read(&fcport->state) == FCS_ONLINE) { ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, "Port to be marked lost on fcport=%06x, current " "port state= %s comp_status %x.\n", fcport->d_id.b24, port_state_str[FCS_ONLINE], comp_status); qlt_schedule_sess_for_deletion(fcport); } fallthrough; case CS_ABORTED: case CS_PORT_BUSY: fd->transferred_length = 0; iocb->u.nvme.rsp_pyld_len = 0; ret = QLA_ABORTED; break; case CS_DATA_UNDERRUN: break; default: ret = QLA_FUNCTION_FAILED; break; } sp->done(sp, ret); } static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req, struct vp_ctrl_entry_24xx *vce) { const char func[] = "CTRLVP-IOCB"; srb_t *sp; int rval = QLA_SUCCESS; sp = qla2x00_get_sp_from_handle(vha, func, req, vce); if (!sp) return; if (vce->entry_status != 0) { ql_dbg(ql_dbg_vport, vha, 0x10c4, "%s: Failed to complete IOCB -- error status (%x)\n", sp->name, vce->entry_status); rval = QLA_FUNCTION_FAILED; } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_vport, vha, 0x10c5, "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n", sp->name, le16_to_cpu(vce->comp_status), le16_to_cpu(vce->vp_idx_failed)); rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_vport, vha, 0x10c6, "Done %s.\n", __func__); } sp->rc = rval; sp->done(sp, rval); } /* Process a single response queue entry. */ static void qla2x00_process_response_entry(struct scsi_qla_host *vha, struct rsp_que *rsp, sts_entry_t *pkt) { sts21_entry_t *sts21_entry; sts22_entry_t *sts22_entry; uint16_t handle_cnt; uint16_t cnt; switch (pkt->entry_type) { case STATUS_TYPE: qla2x00_status_entry(vha, rsp, pkt); break; case STATUS_TYPE_21: sts21_entry = (sts21_entry_t *)pkt; handle_cnt = sts21_entry->handle_count; for (cnt = 0; cnt < handle_cnt; cnt++) qla2x00_process_completed_request(vha, rsp->req, sts21_entry->handle[cnt]); break; case STATUS_TYPE_22: sts22_entry = (sts22_entry_t *)pkt; handle_cnt = sts22_entry->handle_count; for (cnt = 0; cnt < handle_cnt; cnt++) qla2x00_process_completed_request(vha, rsp->req, sts22_entry->handle[cnt]); break; case STATUS_CONT_TYPE: qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); break; case MBX_IOCB_TYPE: qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); break; case CT_IOCB_TYPE: qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); break; default: /* Type Not Supported. */ ql_log(ql_log_warn, vha, 0x504a, "Received unknown response pkt type %x entry status=%x.\n", pkt->entry_type, pkt->entry_status); break; } } /** * qla2x00_process_response_queue() - Process response queue entries. * @rsp: response queue */ void qla2x00_process_response_queue(struct rsp_que *rsp) { struct scsi_qla_host *vha; struct qla_hw_data *ha = rsp->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; sts_entry_t *pkt; vha = pci_get_drvdata(ha->pdev); if (!vha->flags.online) return; while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (sts_entry_t *)rsp->ring_ptr; rsp->ring_index++; if (rsp->ring_index == rsp->length) { rsp->ring_index = 0; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr++; } if (pkt->entry_status != 0) { qla2x00_error_entry(vha, rsp, pkt); ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); continue; } qla2x00_process_response_entry(vha, rsp, pkt); ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); } /* Adjust ring index */ wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); } static inline void qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, uint32_t sense_len, struct rsp_que *rsp, int res) { struct scsi_qla_host *vha = sp->vha; struct scsi_cmnd *cp = GET_CMD_SP(sp); uint32_t track_sense_len; if (sense_len >= SCSI_SENSE_BUFFERSIZE) sense_len = SCSI_SENSE_BUFFERSIZE; SET_CMD_SENSE_LEN(sp, sense_len); SET_CMD_SENSE_PTR(sp, cp->sense_buffer); track_sense_len = sense_len; if (sense_len > par_sense_len) sense_len = par_sense_len; memcpy(cp->sense_buffer, sense_data, sense_len); SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); track_sense_len -= sense_len; SET_CMD_SENSE_LEN(sp, track_sense_len); if (track_sense_len != 0) { rsp->status_srb = sp; cp->result = res; } if (sense_len) { ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", sp->vha->host_no, cp->device->id, cp->device->lun, cp); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, cp->sense_buffer, sense_len); } } struct scsi_dif_tuple { __be16 guard; /* Checksum */ __be16 app_tag; /* APPL identifier */ __be32 ref_tag; /* Target LBA or indirect LBA */ }; /* * Checks the guard or meta-data for the type of error * detected by the HBA. In case of errors, we set the * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST * to indicate to the kernel that the HBA detected error. */ static inline int qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) { struct scsi_qla_host *vha = sp->vha; struct scsi_cmnd *cmd = GET_CMD_SP(sp); uint8_t *ap = &sts24->data[12]; uint8_t *ep = &sts24->data[20]; uint32_t e_ref_tag, a_ref_tag; uint16_t e_app_tag, a_app_tag; uint16_t e_guard, a_guard; /* * swab32 of the "data" field in the beginning of qla2x00_status_entry() * would make guard field appear at offset 2 */ a_guard = get_unaligned_le16(ap + 2); a_app_tag = get_unaligned_le16(ap + 0); a_ref_tag = get_unaligned_le32(ap + 4); e_guard = get_unaligned_le16(ep + 2); e_app_tag = get_unaligned_le16(ep + 0); e_ref_tag = get_unaligned_le32(ep + 4); ql_dbg(ql_dbg_io, vha, 0x3023, "iocb(s) %p Returned STATUS.\n", sts24); ql_dbg(ql_dbg_io, vha, 0x3024, "DIF ERROR in cmd 0x%x lba 0x%llx act ref" " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); /* * Ignore sector if: * For type 3: ref & app tag is all 'f's * For type 0,1,2: app tag is all 'f's */ if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) && (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 || a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) { uint32_t blocks_done, resid; sector_t lba_s = scsi_get_lba(cmd); /* 2TB boundary case covered automatically with this */ blocks_done = e_ref_tag - (uint32_t)lba_s + 1; resid = scsi_bufflen(cmd) - (blocks_done * cmd->device->sector_size); scsi_set_resid(cmd, resid); cmd->result = DID_OK << 16; /* Update protection tag */ if (scsi_prot_sg_count(cmd)) { uint32_t i, j = 0, k = 0, num_ent; struct scatterlist *sg; struct t10_pi_tuple *spt; /* Patch the corresponding protection tags */ scsi_for_each_prot_sg(cmd, sg, scsi_prot_sg_count(cmd), i) { num_ent = sg_dma_len(sg) / 8; if (k + num_ent < blocks_done) { k += num_ent; continue; } j = blocks_done - k - 1; k = blocks_done; break; } if (k != blocks_done) { ql_log(ql_log_warn, vha, 0x302f, "unexpected tag values tag:lba=%x:%llx)\n", e_ref_tag, (unsigned long long)lba_s); return 1; } spt = page_address(sg_page(sg)) + sg->offset; spt += j; spt->app_tag = T10_PI_APP_ESCAPE; if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) spt->ref_tag = T10_PI_REF_ESCAPE; } return 0; } /* check guard */ if (e_guard != a_guard) { scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); set_host_byte(cmd, DID_ABORT); return 1; } /* check ref tag */ if (e_ref_tag != a_ref_tag) { scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); set_host_byte(cmd, DID_ABORT); return 1; } /* check appl tag */ if (e_app_tag != a_app_tag) { scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); set_host_byte(cmd, DID_ABORT); return 1; } return 1; } static void qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, struct req_que *req, uint32_t index) { struct qla_hw_data *ha = vha->hw; srb_t *sp; uint16_t comp_status; uint16_t scsi_status; uint16_t thread_id; uint32_t rval = EXT_STATUS_OK; struct bsg_job *bsg_job = NULL; struct fc_bsg_request *bsg_request; struct fc_bsg_reply *bsg_reply; sts_entry_t *sts = pkt; struct sts_entry_24xx *sts24 = pkt; /* Validate handle. */ if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x70af, "Invalid SCSI completion handle 0x%x.\n", index); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); return; } sp = req->outstanding_cmds[index]; if (!sp) { ql_log(ql_log_warn, vha, 0x70b0, "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", req->id, index); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); return; } /* Free outstanding command slot. */ req->outstanding_cmds[index] = NULL; bsg_job = sp->u.bsg_job; bsg_request = bsg_job->request; bsg_reply = bsg_job->reply; if (IS_FWI2_CAPABLE(ha)) { comp_status = le16_to_cpu(sts24->comp_status); scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; } else { comp_status = le16_to_cpu(sts->comp_status); scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; } thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; switch (comp_status) { case CS_COMPLETE: if (scsi_status == 0) { bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; vha->qla_stats.input_bytes += bsg_reply->reply_payload_rcv_len; vha->qla_stats.input_requests++; rval = EXT_STATUS_OK; } goto done; case CS_DATA_OVERRUN: ql_dbg(ql_dbg_user, vha, 0x70b1, "Command completed with data overrun thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_OVERRUN; break; case CS_DATA_UNDERRUN: ql_dbg(ql_dbg_user, vha, 0x70b2, "Command completed with data underrun thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_UNDERRUN; break; case CS_BIDIR_RD_OVERRUN: ql_dbg(ql_dbg_user, vha, 0x70b3, "Command completed with read data overrun thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_OVERRUN; break; case CS_BIDIR_RD_WR_OVERRUN: ql_dbg(ql_dbg_user, vha, 0x70b4, "Command completed with read and write data overrun " "thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_OVERRUN; break; case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: ql_dbg(ql_dbg_user, vha, 0x70b5, "Command completed with read data over and write data " "underrun thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_OVERRUN; break; case CS_BIDIR_RD_UNDERRUN: ql_dbg(ql_dbg_user, vha, 0x70b6, "Command completed with read data underrun " "thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_UNDERRUN; break; case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: ql_dbg(ql_dbg_user, vha, 0x70b7, "Command completed with read data under and write data " "overrun thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_UNDERRUN; break; case CS_BIDIR_RD_WR_UNDERRUN: ql_dbg(ql_dbg_user, vha, 0x70b8, "Command completed with read and write data underrun " "thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_UNDERRUN; break; case CS_BIDIR_DMA: ql_dbg(ql_dbg_user, vha, 0x70b9, "Command completed with data DMA error thread_id=%d\n", thread_id); rval = EXT_STATUS_DMA_ERR; break; case CS_TIMEOUT: ql_dbg(ql_dbg_user, vha, 0x70ba, "Command completed with timeout thread_id=%d\n", thread_id); rval = EXT_STATUS_TIMEOUT; break; default: ql_dbg(ql_dbg_user, vha, 0x70bb, "Command completed with completion status=0x%x " "thread_id=%d\n", comp_status, thread_id); rval = EXT_STATUS_ERR; break; } bsg_reply->reply_payload_rcv_len = 0; done: /* Return the vendor specific reply to API */ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; bsg_job->reply_len = sizeof(struct fc_bsg_reply); /* Always return DID_OK, bsg will send the vendor specific response * in this case only */ sp->done(sp, DID_OK << 16); } /** * qla2x00_status_entry() - Process a Status IOCB entry. * @vha: SCSI driver HA context * @rsp: response queue * @pkt: Entry pointer */ static void qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) { srb_t *sp; fc_port_t *fcport; struct scsi_cmnd *cp; sts_entry_t *sts = pkt; struct sts_entry_24xx *sts24 = pkt; uint16_t comp_status; uint16_t scsi_status; uint16_t ox_id; uint8_t lscsi_status; int32_t resid; uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, fw_resid_len; uint8_t *rsp_info, *sense_data; struct qla_hw_data *ha = vha->hw; uint32_t handle; uint16_t que; struct req_que *req; int logit = 1; int res = 0; uint16_t state_flags = 0; uint16_t sts_qual = 0; if (IS_FWI2_CAPABLE(ha)) { comp_status = le16_to_cpu(sts24->comp_status); scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; state_flags = le16_to_cpu(sts24->state_flags); } else { comp_status = le16_to_cpu(sts->comp_status); scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; } handle = (uint32_t) LSW(sts->handle); que = MSW(sts->handle); req = ha->req_q_map[que]; /* Check for invalid queue pointer */ if (req == NULL || que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { ql_dbg(ql_dbg_io, vha, 0x3059, "Invalid status handle (0x%x): Bad req pointer. req=%p, " "que=%u.\n", sts->handle, req, que); return; } /* Validate handle. */ if (handle < req->num_outstanding_cmds) { sp = req->outstanding_cmds[handle]; if (!sp) { ql_dbg(ql_dbg_io, vha, 0x3075, "%s(%ld): Already returned command for status handle (0x%x).\n", __func__, vha->host_no, sts->handle); return; } } else { ql_dbg(ql_dbg_io, vha, 0x3017, "Invalid status handle, out of range (0x%x).\n", sts->handle); if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } return; } qla_put_fw_resources(sp->qpair, &sp->iores); if (sp->cmd_type != TYPE_SRB) { req->outstanding_cmds[handle] = NULL; ql_dbg(ql_dbg_io, vha, 0x3015, "Unknown sp->cmd_type %x %p).\n", sp->cmd_type, sp); return; } /* NVME completion. */ if (sp->type == SRB_NVME_CMD) { req->outstanding_cmds[handle] = NULL; qla24xx_nvme_iocb_entry(vha, req, pkt, sp); return; } if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); return; } /* Task Management completion. */ if (sp->type == SRB_TM_CMD) { qla24xx_tm_iocb_entry(vha, req, pkt); return; } /* Fast path completion. */ qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24); sp->qpair->cmd_completion_cnt++; if (comp_status == CS_COMPLETE && scsi_status == 0) { qla2x00_process_completed_request(vha, req, handle); return; } cp = GET_CMD_SP(sp); if (cp == NULL) { ql_dbg(ql_dbg_io, vha, 0x3018, "Command already returned (0x%x/%p).\n", sts->handle, sp); req->outstanding_cmds[handle] = NULL; return; } lscsi_status = scsi_status & STATUS_MASK; fcport = sp->fcport; ox_id = 0; sense_len = par_sense_len = rsp_info_len = resid_len = fw_resid_len = 0; if (IS_FWI2_CAPABLE(ha)) { if (scsi_status & SS_SENSE_LEN_VALID) sense_len = le32_to_cpu(sts24->sense_len); if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) rsp_info_len = le32_to_cpu(sts24->rsp_data_len); if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) resid_len = le32_to_cpu(sts24->rsp_residual_count); if (comp_status == CS_DATA_UNDERRUN) fw_resid_len = le32_to_cpu(sts24->residual_len); rsp_info = sts24->data; sense_data = sts24->data; host_to_fcp_swap(sts24->data, sizeof(sts24->data)); ox_id = le16_to_cpu(sts24->ox_id); par_sense_len = sizeof(sts24->data); sts_qual = le16_to_cpu(sts24->status_qualifier); } else { if (scsi_status & SS_SENSE_LEN_VALID) sense_len = le16_to_cpu(sts->req_sense_length); if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) rsp_info_len = le16_to_cpu(sts->rsp_info_len); resid_len = le32_to_cpu(sts->residual_length); rsp_info = sts->rsp_info; sense_data = sts->req_sense_data; par_sense_len = sizeof(sts->req_sense_data); } /* Check for any FCP transport errors. */ if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { /* Sense data lies beyond any FCP RESPONSE data. */ if (IS_FWI2_CAPABLE(ha)) { sense_data += rsp_info_len; par_sense_len -= rsp_info_len; } if (rsp_info_len > 3 && rsp_info[3]) { ql_dbg(ql_dbg_io, fcport->vha, 0x3019, "FCP I/O protocol failure (0x%x/0x%x).\n", rsp_info_len, rsp_info[3]); res = DID_BUS_BUSY << 16; goto out; } } /* Check for overrun. */ if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && scsi_status & SS_RESIDUAL_OVER) comp_status = CS_DATA_OVERRUN; /* * Check retry_delay_timer value if we receive a busy or * queue full. */ if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL || lscsi_status == SAM_STAT_BUSY)) qla2x00_set_retry_delay_timestamp(fcport, sts_qual); /* * Based on Host and scsi status generate status code for Linux */ switch (comp_status) { case CS_COMPLETE: case CS_QUEUE_FULL: if (scsi_status == 0) { res = DID_OK << 16; break; } if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { resid = resid_len; scsi_set_resid(cp, resid); if (!lscsi_status && ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { ql_dbg(ql_dbg_io, fcport->vha, 0x301a, "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16; break; } } res = DID_OK << 16 | lscsi_status; if (lscsi_status == SAM_STAT_TASK_SET_FULL) { ql_dbg(ql_dbg_io, fcport->vha, 0x301b, "QUEUE FULL detected.\n"); break; } logit = 0; if (lscsi_status != SS_CHECK_CONDITION) break; memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (!(scsi_status & SS_SENSE_LEN_VALID)) break; qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); break; case CS_DATA_UNDERRUN: /* Use F/W calculated residual length. */ resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; scsi_set_resid(cp, resid); if (scsi_status & SS_RESIDUAL_UNDER) { if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { ql_log(ql_log_warn, fcport->vha, 0x301d, "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16 | lscsi_status; goto check_scsi_status; } if (!lscsi_status && ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { ql_dbg(ql_dbg_io, fcport->vha, 0x301e, "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16; break; } } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && lscsi_status != SAM_STAT_BUSY) { /* * scsi status of task set and busy are considered to be * task not completed. */ ql_log(ql_log_warn, fcport->vha, 0x301f, "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); vha->interface_err_cnt++; res = DID_ERROR << 16 | lscsi_status; goto check_scsi_status; } else { ql_dbg(ql_dbg_io, fcport->vha, 0x3030, "scsi_status: 0x%x, lscsi_status: 0x%x\n", scsi_status, lscsi_status); } res = DID_OK << 16 | lscsi_status; logit = 0; check_scsi_status: /* * Check to see if SCSI Status is non zero. If so report SCSI * Status. */ if (lscsi_status != 0) { if (lscsi_status == SAM_STAT_TASK_SET_FULL) { ql_dbg(ql_dbg_io, fcport->vha, 0x3020, "QUEUE FULL detected.\n"); logit = 1; break; } if (lscsi_status != SS_CHECK_CONDITION) break; memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (!(scsi_status & SS_SENSE_LEN_VALID)) break; qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); } break; case CS_PORT_LOGGED_OUT: case CS_PORT_CONFIG_CHG: case CS_PORT_BUSY: case CS_INCOMPLETE: case CS_PORT_UNAVAILABLE: case CS_TIMEOUT: case CS_RESET: case CS_EDIF_INV_REQ: /* * We are going to have the fc class block the rport * while we try to recover so instruct the mid layer * to requeue until the class decides how to handle this. */ res = DID_TRANSPORT_DISRUPTED << 16; if (comp_status == CS_TIMEOUT) { if (IS_FWI2_CAPABLE(ha)) break; else if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT) == 0) break; } if (atomic_read(&fcport->state) == FCS_ONLINE) { ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, "Port to be marked lost on fcport=%02x%02x%02x, current " "port state= %s comp_status %x.\n", fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, port_state_str[FCS_ONLINE], comp_status); qlt_schedule_sess_for_deletion(fcport); } break; case CS_ABORTED: res = DID_RESET << 16; break; case CS_DIF_ERROR: logit = qla2x00_handle_dif_error(sp, sts24); res = cp->result; break; case CS_TRANSPORT: res = DID_ERROR << 16; vha->hw_err_cnt++; if (!IS_PI_SPLIT_DET_CAPABLE(ha)) break; if (state_flags & BIT_4) scmd_printk(KERN_WARNING, cp, "Unsupported device '%s' found.\n", cp->device->vendor); break; case CS_DMA: ql_log(ql_log_info, fcport->vha, 0x3022, "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", comp_status, scsi_status, res, vha->host_no, cp->device->id, cp->device->lun, fcport->d_id.b24, ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, resid_len, fw_resid_len, sp, cp); ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee, pkt, sizeof(*sts24)); res = DID_ERROR << 16; vha->hw_err_cnt++; break; default: res = DID_ERROR << 16; break; } out: if (logit) ql_dbg(ql_dbg_io, fcport->vha, 0x3022, "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", comp_status, scsi_status, res, vha->host_no, cp->device->id, cp->device->lun, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, resid_len, fw_resid_len, sp, cp); if (rsp->status_srb == NULL) sp->done(sp, res); /* for io's, clearing of outstanding_cmds[handle] means scsi_done was called */ req->outstanding_cmds[handle] = NULL; } /** * qla2x00_status_cont_entry() - Process a Status Continuations entry. * @rsp: response queue * @pkt: Entry pointer * * Extended sense data. */ static void qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) { uint8_t sense_sz = 0; struct qla_hw_data *ha = rsp->hw; struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); srb_t *sp = rsp->status_srb; struct scsi_cmnd *cp; uint32_t sense_len; uint8_t *sense_ptr; if (!sp || !GET_CMD_SENSE_LEN(sp)) return; sense_len = GET_CMD_SENSE_LEN(sp); sense_ptr = GET_CMD_SENSE_PTR(sp); cp = GET_CMD_SP(sp); if (cp == NULL) { ql_log(ql_log_warn, vha, 0x3025, "cmd is NULL: already returned to OS (sp=%p).\n", sp); rsp->status_srb = NULL; return; } if (sense_len > sizeof(pkt->data)) sense_sz = sizeof(pkt->data); else sense_sz = sense_len; /* Move sense data. */ if (IS_FWI2_CAPABLE(ha)) host_to_fcp_swap(pkt->data, sizeof(pkt->data)); memcpy(sense_ptr, pkt->data, sense_sz); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, sense_ptr, sense_sz); sense_len -= sense_sz; sense_ptr += sense_sz; SET_CMD_SENSE_PTR(sp, sense_ptr); SET_CMD_SENSE_LEN(sp, sense_len); /* Place command on done queue. */ if (sense_len == 0) { rsp->status_srb = NULL; sp->done(sp, cp->result); } } /** * qla2x00_error_entry() - Process an error entry. * @vha: SCSI driver HA context * @rsp: response queue * @pkt: Entry pointer * return : 1=allow further error analysis. 0=no additional error analysis. */ static int qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) { srb_t *sp; struct qla_hw_data *ha = vha->hw; const char func[] = "ERROR-IOCB"; uint16_t que = MSW(pkt->handle); struct req_que *req = NULL; int res = DID_ERROR << 16; u16 index; ql_dbg(ql_dbg_async, vha, 0x502a, "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); if (que >= ha->max_req_queues || !ha->req_q_map[que]) goto fatal; req = ha->req_q_map[que]; if (pkt->entry_status & RF_BUSY) res = DID_BUS_BUSY << 16; if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) return 0; switch (pkt->entry_type) { case NOTIFY_ACK_TYPE: case STATUS_CONT_TYPE: case LOGINOUT_PORT_IOCB_TYPE: case CT_IOCB_TYPE: case ELS_IOCB_TYPE: case ABORT_IOCB_TYPE: case MBX_IOCB_TYPE: default: sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { sp->done(sp, res); return 0; } break; case SA_UPDATE_IOCB_TYPE: case ABTS_RESP_24XX: case CTIO_TYPE7: case CTIO_CRC2: return 1; case STATUS_TYPE: sp = qla_get_sp_from_handle(vha, func, req, pkt, &index); if (sp) { sp->done(sp, res); req->outstanding_cmds[index] = NULL; return 0; } break; } fatal: ql_log(ql_log_warn, vha, 0x5030, "Error entry - invalid handle/queue (%04x).\n", que); return 0; } /** * qla24xx_mbx_completion() - Process mailbox command completions. * @vha: SCSI driver HA context * @mb0: Mailbox0 register */ static void qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; uint32_t mboxes; __le16 __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; /* Read all mbox registers? */ WARN_ON_ONCE(ha->mbx_count > 32); mboxes = (1ULL << ha->mbx_count) - 1; if (!ha->mcp) ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); else mboxes = ha->mcp->in_mb; /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; mboxes >>= 1; wptr = &reg->mailbox1; for (cnt = 1; cnt < ha->mbx_count; cnt++) { if (mboxes & BIT_0) ha->mailbox_out[cnt] = rd_reg_word(wptr); mboxes >>= 1; wptr++; } } static void qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct abort_entry_24xx *pkt) { const char func[] = "ABT_IOCB"; srb_t *sp; srb_t *orig_sp = NULL; struct srb_iocb *abt; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; abt = &sp->u.iocb_cmd; abt->u.abt.comp_status = pkt->comp_status; orig_sp = sp->cmd_sp; /* Need to pass original sp */ if (orig_sp) qla_nvme_abort_process_comp_status(pkt, orig_sp); sp->done(sp, 0); } void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, struct pt_ls4_request *pkt, struct req_que *req) { srb_t *sp; const char func[] = "LS4_IOCB"; uint16_t comp_status; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; comp_status = le16_to_cpu(pkt->status); sp->done(sp, comp_status); } /** * qla_chk_cont_iocb_avail - check for all continuation iocbs are available * before iocb processing can start. * @vha: host adapter pointer * @rsp: respond queue * @pkt: head iocb describing how many continuation iocb * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived. */ static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha, struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in) { int start_pkt_ring_index; u32 iocb_cnt = 0; int rc = 0; if (pkt->entry_count == 1) return rc; /* ring_index was pre-increment. set it back to current pkt */ if (rsp->ring_index == 0) start_pkt_ring_index = rsp->length - 1; else start_pkt_ring_index = rsp->ring_index - 1; if (rsp_q_in < start_pkt_ring_index) /* q in ptr is wrapped */ iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in; else iocb_cnt = rsp_q_in - start_pkt_ring_index; if (iocb_cnt < pkt->entry_count) rc = -EIO; ql_dbg(ql_dbg_init, vha, 0x5091, "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n", __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc); return rc; } static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct mrk_entry_24xx *pkt) { const char func[] = "MRK-IOCB"; srb_t *sp; int res = QLA_SUCCESS; if (!IS_FWI2_CAPABLE(vha->hw)) return; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; if (pkt->entry_status) { ql_dbg(ql_dbg_taskm, vha, 0x8025, "marker failure.\n"); res = QLA_COMMAND_ERROR; } sp->u.iocb_cmd.u.tmf.data = res; sp->done(sp, res); } /** * qla24xx_process_response_queue() - Process response queue entries. * @vha: SCSI driver HA context * @rsp: response queue */ void qla24xx_process_response_queue(struct scsi_qla_host *vha, struct rsp_que *rsp) { struct sts_entry_24xx *pkt; struct qla_hw_data *ha = vha->hw; struct purex_entry_24xx *purex_entry; struct purex_item *pure_item; struct pt_ls4_rx_unsol *p; u16 rsp_in = 0, cur_ring_index; int is_shadow_hba; if (!ha->flags.fw_started) return; if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) { rsp->qpair->rcv_intr = 1; if (!rsp->qpair->cpu_mapped) qla_cpu_update(rsp->qpair, raw_smp_processor_id()); } #define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \ do { \ _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \ rd_reg_dword_relaxed((_rsp)->rsp_q_in); \ } while (0) is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha); __update_rsp_in(is_shadow_hba, rsp, rsp_in); while (rsp->ring_index != rsp_in && rsp->ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (struct sts_entry_24xx *)rsp->ring_ptr; cur_ring_index = rsp->ring_index; rsp->ring_index++; if (rsp->ring_index == rsp->length) { rsp->ring_index = 0; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr++; } if (pkt->entry_status != 0) { if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) goto process_err; ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); continue; } process_err: switch (pkt->entry_type) { case STATUS_TYPE: qla2x00_status_entry(vha, rsp, pkt); break; case STATUS_CONT_TYPE: qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); break; case VP_RPT_ID_IOCB_TYPE: qla24xx_report_id_acquisition(vha, (struct vp_rpt_id_entry_24xx *)pkt); break; case LOGINOUT_PORT_IOCB_TYPE: qla24xx_logio_entry(vha, rsp->req, (struct logio_entry_24xx *)pkt); break; case CT_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); break; case ELS_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); break; case ABTS_RECV_24XX: if (qla_ini_mode_enabled(vha)) { pure_item = qla24xx_copy_std_pkt(vha, pkt); if (!pure_item) break; qla24xx_queue_purex_item(vha, pure_item, qla24xx_process_abts); break; } if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { /* ensure that the ATIO queue is empty */ qlt_handle_abts_recv(vha, rsp, (response_t *)pkt); break; } else { qlt_24xx_process_atio_queue(vha, 1); } fallthrough; case ABTS_RESP_24XX: case CTIO_TYPE7: case CTIO_CRC2: qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); break; case PT_LS4_REQUEST: qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, rsp->req); break; case NOTIFY_ACK_TYPE: if (pkt->handle == QLA_TGT_SKIP_HANDLE) qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); else qla24xxx_nack_iocb_entry(vha, rsp->req, (struct nack_to_isp *)pkt); break; case MARKER_TYPE: qla_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx *)pkt); break; case ABORT_IOCB_TYPE: qla24xx_abort_iocb_entry(vha, rsp->req, (struct abort_entry_24xx *)pkt); break; case MBX_IOCB_TYPE: qla24xx_mbx_iocb_entry(vha, rsp->req, (struct mbx_24xx_entry *)pkt); break; case VP_CTRL_IOCB_TYPE: qla_ctrlvp_completed(vha, rsp->req, (struct vp_ctrl_entry_24xx *)pkt); break; case PUREX_IOCB_TYPE: purex_entry = (void *)pkt; switch (purex_entry->els_frame_payload[3]) { case ELS_RDP: pure_item = qla24xx_copy_std_pkt(vha, pkt); if (!pure_item) break; qla24xx_queue_purex_item(vha, pure_item, qla24xx_process_purex_rdp); break; case ELS_FPIN: if (!vha->hw->flags.scm_enabled) { ql_log(ql_log_warn, vha, 0x5094, "SCM not active for this port\n"); break; } pure_item = qla27xx_copy_fpin_pkt(vha, (void **)&pkt, &rsp); __update_rsp_in(is_shadow_hba, rsp, rsp_in); if (!pure_item) break; qla24xx_queue_purex_item(vha, pure_item, qla27xx_process_purex_fpin); break; case ELS_AUTH_ELS: if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) { /* * ring_ptr and ring_index were * pre-incremented above. Reset them * back to current. Wait for next * interrupt with all IOCBs to arrive * and re-process. */ rsp->ring_ptr = (response_t *)pkt; rsp->ring_index = cur_ring_index; ql_dbg(ql_dbg_init, vha, 0x5091, "Defer processing ELS opcode %#x...\n", purex_entry->els_frame_payload[3]); return; } qla24xx_auth_els(vha, (void **)&pkt, &rsp); break; default: ql_log(ql_log_warn, vha, 0x509c, "Discarding ELS Request opcode 0x%x\n", purex_entry->els_frame_payload[3]); } break; case SA_UPDATE_IOCB_TYPE: qla28xx_sa_update_iocb_entry(vha, rsp->req, (struct sa_update_28xx *)pkt); break; case PT_LS4_UNSOL: p = (void *)pkt; if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) { rsp->ring_ptr = (response_t *)pkt; rsp->ring_index = cur_ring_index; ql_dbg(ql_dbg_init, vha, 0x2124, "Defer processing UNSOL LS req opcode %#x...\n", p->payload[0]); return; } qla2xxx_process_purls_iocb((void **)&pkt, &rsp); break; default: /* Type Not Supported. */ ql_dbg(ql_dbg_async, vha, 0x5042, "Received unknown response pkt type 0x%x entry status=%x.\n", pkt->entry_type, pkt->entry_status); break; } ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); } /* Adjust ring index */ if (IS_P3P_TYPE(ha)) { struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; wrt_reg_dword(&reg->rsp_q_out[0], rsp->ring_index); } else { wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); } } static void qla2xxx_check_risc_status(scsi_qla_host_t *vha) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return; rval = QLA_SUCCESS; wrt_reg_dword(&reg->iobase_addr, 0x7C00); rd_reg_dword(&reg->iobase_addr); wrt_reg_dword(&reg->iobase_window, 0x0001); for (cnt = 10000; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) { wrt_reg_dword(&reg->iobase_window, 0x0001); udelay(10); } else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) goto next_test; rval = QLA_SUCCESS; wrt_reg_dword(&reg->iobase_window, 0x0003); for (cnt = 100; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) { wrt_reg_dword(&reg->iobase_window, 0x0003); udelay(10); } else rval = QLA_FUNCTION_TIMEOUT; } if (rval != QLA_SUCCESS) goto done; next_test: if (rd_reg_dword(&reg->iobase_c8) & BIT_3) ql_log(ql_log_info, vha, 0x504c, "Additional code -- 0x55AA.\n"); done: wrt_reg_dword(&reg->iobase_window, 0x0000); rd_reg_dword(&reg->iobase_window); } /** * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla24xx_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct device_reg_24xx __iomem *reg; int status; unsigned long iter; uint32_t stat; uint32_t hccr; uint16_t mb[8]; struct rsp_que *rsp; unsigned long flags; bool process_atio = false; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0x5059, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp24; status = 0; if (unlikely(pci_channel_offline(ha->pdev))) return IRQ_HANDLED; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = rd_reg_dword(&reg->host_status); if (qla2x00_check_reg32_for_disconnect(vha, stat)) break; if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = rd_reg_dword(&reg->hccr); ql_log(ql_log_warn, vha, 0x504b, "RISC paused -- HCCR=%x, Dumping firmware.\n", hccr); qla2xxx_check_risc_status(vha); ha->isp_ops->fw_dump(vha); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((stat & HSRX_RISC_INT) == 0) break; switch (stat & 0xff) { case INTR_ROM_MB_SUCCESS: case INTR_ROM_MB_FAILED: case INTR_MB_SUCCESS: case INTR_MB_FAILED: qla24xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case INTR_ASYNC_EVENT: mb[0] = MSW(stat); mb[1] = rd_reg_word(&reg->mailbox1); mb[2] = rd_reg_word(&reg->mailbox2); mb[3] = rd_reg_word(&reg->mailbox3); qla2x00_async_event(vha, rsp, mb); break; case INTR_RSP_QUE_UPDATE: case INTR_RSP_QUE_UPDATE_83XX: qla24xx_process_response_queue(vha, rsp); break; case INTR_ATIO_QUE_UPDATE_27XX: case INTR_ATIO_QUE_UPDATE: process_atio = true; break; case INTR_ATIO_RSP_QUE_UPDATE: process_atio = true; qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_async, vha, 0x504f, "Unrecognized interrupt type (%d).\n", stat * 0xff); break; } wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT); rd_reg_dword_relaxed(&reg->hccr); if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) ndelay(3500); } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (process_atio) { spin_lock_irqsave(&ha->tgt.atio_lock, flags); qlt_24xx_process_atio_queue(vha, 0); spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); } return IRQ_HANDLED; } static irqreturn_t qla24xx_msix_rsp_q(int irq, void *dev_id) { struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; struct scsi_qla_host *vha; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0x505a, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp24; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); if (!ha->flags.disable_msix_handshake) { wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT); rd_reg_dword_relaxed(&reg->hccr); } spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } static irqreturn_t qla24xx_msix_default(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; int status; uint32_t stat; uint32_t hccr; uint16_t mb[8]; unsigned long flags; bool process_atio = false; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0x505c, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp24; status = 0; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { stat = rd_reg_dword(&reg->host_status); if (qla2x00_check_reg32_for_disconnect(vha, stat)) break; if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = rd_reg_dword(&reg->hccr); ql_log(ql_log_info, vha, 0x5050, "RISC paused -- HCCR=%x, Dumping firmware.\n", hccr); qla2xxx_check_risc_status(vha); vha->hw_err_cnt++; ha->isp_ops->fw_dump(vha); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((stat & HSRX_RISC_INT) == 0) break; switch (stat & 0xff) { case INTR_ROM_MB_SUCCESS: case INTR_ROM_MB_FAILED: case INTR_MB_SUCCESS: case INTR_MB_FAILED: qla24xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case INTR_ASYNC_EVENT: mb[0] = MSW(stat); mb[1] = rd_reg_word(&reg->mailbox1); mb[2] = rd_reg_word(&reg->mailbox2); mb[3] = rd_reg_word(&reg->mailbox3); qla2x00_async_event(vha, rsp, mb); break; case INTR_RSP_QUE_UPDATE: case INTR_RSP_QUE_UPDATE_83XX: qla24xx_process_response_queue(vha, rsp); break; case INTR_ATIO_QUE_UPDATE_27XX: case INTR_ATIO_QUE_UPDATE: process_atio = true; break; case INTR_ATIO_RSP_QUE_UPDATE: process_atio = true; qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_async, vha, 0x5051, "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT); } while (0); qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (process_atio) { spin_lock_irqsave(&ha->tgt.atio_lock, flags); qlt_24xx_process_atio_queue(vha, 0); spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); } return IRQ_HANDLED; } irqreturn_t qla2xxx_msix_rsp_q(int irq, void *dev_id) { struct qla_hw_data *ha; struct qla_qpair *qpair; qpair = dev_id; if (!qpair) { ql_log(ql_log_info, NULL, 0x505b, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = qpair->hw; queue_work(ha->wq, &qpair->q_work); return IRQ_HANDLED; } irqreturn_t qla2xxx_msix_rsp_q_hs(int irq, void *dev_id) { struct qla_hw_data *ha; struct qla_qpair *qpair; struct device_reg_24xx __iomem *reg; unsigned long flags; qpair = dev_id; if (!qpair) { ql_log(ql_log_info, NULL, 0x505b, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = qpair->hw; reg = &ha->iobase->isp24; spin_lock_irqsave(&ha->hardware_lock, flags); wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); queue_work(ha->wq, &qpair->q_work); return IRQ_HANDLED; } /* Interrupt handling helpers. */ struct qla_init_msix_entry { const char *name; irq_handler_t handler; }; static const struct qla_init_msix_entry msix_entries[] = { { "default", qla24xx_msix_default }, { "rsp_q", qla24xx_msix_rsp_q }, { "atio_q", qla83xx_msix_atio_q }, { "qpair_multiq", qla2xxx_msix_rsp_q }, { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs }, }; static const struct qla_init_msix_entry qla82xx_msix_entries[] = { { "qla2xxx (default)", qla82xx_msix_default }, { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, }; static int qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) { int i, ret; struct qla_msix_entry *qentry; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); int min_vecs = QLA_BASE_VECTORS; struct irq_affinity desc = { .pre_vectors = QLA_BASE_VECTORS, }; if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && IS_ATIO_MSIX_CAPABLE(ha)) { desc.pre_vectors++; min_vecs++; } if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { /* user wants to control IRQ setting for target mode */ ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), PCI_IRQ_MSIX); } else ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc); if (ret < 0) { ql_log(ql_log_fatal, vha, 0x00c7, "MSI-X: Failed to enable support, " "giving up -- %d/%d.\n", ha->msix_count, ret); goto msix_out; } else if (ret < ha->msix_count) { ql_log(ql_log_info, vha, 0x00c6, "MSI-X: Using %d vectors\n", ret); ha->msix_count = ret; /* Recalculate queue values */ if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { ha->max_req_queues = ha->msix_count - 1; /* ATIOQ needs 1 vector. That's 1 less QPair */ if (QLA_TGT_MODE_ENABLED()) ha->max_req_queues--; ha->max_rsp_queues = ha->max_req_queues; ha->max_qpairs = ha->max_req_queues - 1; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); } } vha->irq_offset = desc.pre_vectors; ha->msix_entries = kcalloc(ha->msix_count, sizeof(struct qla_msix_entry), GFP_KERNEL); if (!ha->msix_entries) { ql_log(ql_log_fatal, vha, 0x00c8, "Failed to allocate memory for ha->msix_entries.\n"); ret = -ENOMEM; goto free_irqs; } ha->flags.msix_enabled = 1; for (i = 0; i < ha->msix_count; i++) { qentry = &ha->msix_entries[i]; qentry->vector = pci_irq_vector(ha->pdev, i); qentry->vector_base0 = i; qentry->entry = i; qentry->have_irq = 0; qentry->in_use = 0; qentry->handle = NULL; } /* Enable MSI-X vectors for the base queue */ for (i = 0; i < QLA_BASE_VECTORS; i++) { qentry = &ha->msix_entries[i]; qentry->handle = rsp; rsp->msix = qentry; scnprintf(qentry->name, sizeof(qentry->name), "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); if (IS_P3P_TYPE(ha)) ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, 0, qla82xx_msix_entries[i].name, rsp); else ret = request_irq(qentry->vector, msix_entries[i].handler, 0, qentry->name, rsp); if (ret) goto msix_register_fail; qentry->have_irq = 1; qentry->in_use = 1; } /* * If target mode is enable, also request the vector for the ATIO * queue. */ if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && IS_ATIO_MSIX_CAPABLE(ha)) { qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; rsp->msix = qentry; qentry->handle = rsp; scnprintf(qentry->name, sizeof(qentry->name), "qla2xxx%lu_%s", vha->host_no, msix_entries[QLA_ATIO_VECTOR].name); qentry->in_use = 1; ret = request_irq(qentry->vector, msix_entries[QLA_ATIO_VECTOR].handler, 0, qentry->name, rsp); qentry->have_irq = 1; } msix_register_fail: if (ret) { ql_log(ql_log_fatal, vha, 0x00cb, "MSI-X: unable to register handler -- %x/%d.\n", qentry->vector, ret); qla2x00_free_irqs(vha); ha->mqenable = 0; goto msix_out; } /* Enable MSI-X vector for response queue update for queue 0 */ if (IS_MQUE_CAPABLE(ha) && (ha->msixbase && ha->mqiobase && ha->max_qpairs)) ha->mqenable = 1; else ha->mqenable = 0; ql_dbg(ql_dbg_multiq, vha, 0xc005, "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); ql_dbg(ql_dbg_init, vha, 0x0055, "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); msix_out: return ret; free_irqs: pci_free_irq_vectors(ha->pdev); goto msix_out; } int qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) { int ret = QLA_FUNCTION_FAILED; device_reg_t *reg = ha->iobase; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); /* If possible, enable MSI-X. */ if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))) goto skip_msi; if (ql2xenablemsix == 2) goto skip_msix; if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && (ha->pdev->subsystem_device == 0x7040 || ha->pdev->subsystem_device == 0x7041 || ha->pdev->subsystem_device == 0x1705)) { ql_log(ql_log_warn, vha, 0x0034, "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", ha->pdev->subsystem_vendor, ha->pdev->subsystem_device); goto skip_msi; } if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { ql_log(ql_log_warn, vha, 0x0035, "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); goto skip_msix; } ret = qla24xx_enable_msix(ha, rsp); if (!ret) { ql_dbg(ql_dbg_init, vha, 0x0036, "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, ha->fw_attributes); goto clear_risc_ints; } skip_msix: ql_log(ql_log_info, vha, 0x0037, "Falling back-to MSI mode -- ret=%d.\n", ret); if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto skip_msi; ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); if (ret > 0) { ql_dbg(ql_dbg_init, vha, 0x0038, "MSI: Enabled.\n"); ha->flags.msi_enabled = 1; } else ql_log(ql_log_warn, vha, 0x0039, "Falling back-to INTa mode -- ret=%d.\n", ret); skip_msi: /* Skip INTx on ISP82xx. */ if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) return QLA_FUNCTION_FAILED; ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, ha->flags.msi_enabled ? 0 : IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp); if (ret) { ql_log(ql_log_warn, vha, 0x003a, "Failed to reserve interrupt %d already in use.\n", ha->pdev->irq); goto fail; } else if (!ha->flags.msi_enabled) { ql_dbg(ql_dbg_init, vha, 0x0125, "INTa mode: Enabled.\n"); ha->flags.mr_intr_valid = 1; /* Set max_qpair to 0, as MSI-X and MSI in not enabled */ ha->max_qpairs = 0; } clear_risc_ints: if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) goto fail; spin_lock_irq(&ha->hardware_lock); wrt_reg_word(&reg->isp.semaphore, 0); spin_unlock_irq(&ha->hardware_lock); fail: return ret; } void qla2x00_free_irqs(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct rsp_que *rsp; struct qla_msix_entry *qentry; int i; /* * We need to check that ha->rsp_q_map is valid in case we are called * from a probe failure context. */ if (!ha->rsp_q_map || !ha->rsp_q_map[0]) goto free_irqs; rsp = ha->rsp_q_map[0]; if (ha->flags.msix_enabled) { for (i = 0; i < ha->msix_count; i++) { qentry = &ha->msix_entries[i]; if (qentry->have_irq) { irq_set_affinity_notifier(qentry->vector, NULL); free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); } } kfree(ha->msix_entries); ha->msix_entries = NULL; ha->flags.msix_enabled = 0; ql_dbg(ql_dbg_init, vha, 0x0042, "Disabled MSI-X.\n"); } else { free_irq(pci_irq_vector(ha->pdev, 0), rsp); } free_irqs: pci_free_irq_vectors(ha->pdev); } int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, struct qla_msix_entry *msix, int vector_type) { const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); int ret; scnprintf(msix->name, sizeof(msix->name), "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); if (ret) { ql_log(ql_log_fatal, vha, 0x00e6, "MSI-X: Unable to register handler -- %x/%d.\n", msix->vector, ret); return ret; } msix->have_irq = 1; msix->handle = qpair; qla_mapq_init_qp_cpu_map(ha, msix, qpair); return ret; }
linux-master
drivers/scsi/qla2xxx/qla_isr.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include "qla_target.h" #include <linux/kthread.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/delay.h> static int qla24xx_vport_disable(struct fc_vport *, bool); /* SYSFS attributes --------------------------------------------------------- */ static ssize_t qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; int rval = 0; if (!(ha->fw_dump_reading || ha->mctp_dump_reading || ha->mpi_fw_dump_reading)) return 0; mutex_lock(&ha->optrom_mutex); if (IS_P3P_TYPE(ha)) { if (off < ha->md_template_size) { rval = memory_read_from_buffer(buf, count, &off, ha->md_tmplt_hdr, ha->md_template_size); } else { off -= ha->md_template_size; rval = memory_read_from_buffer(buf, count, &off, ha->md_dump, ha->md_dump_size); } } else if (ha->mctp_dumped && ha->mctp_dump_reading) { rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump, MCTP_DUMP_SIZE); } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) { rval = memory_read_from_buffer(buf, count, &off, ha->mpi_fw_dump, ha->mpi_fw_dump_len); } else if (ha->fw_dump_reading) { rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump, ha->fw_dump_len); } else { rval = 0; } mutex_unlock(&ha->optrom_mutex); return rval; } static ssize_t qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; int reading; if (off != 0) return (0); reading = simple_strtol(buf, NULL, 10); switch (reading) { case 0: if (!ha->fw_dump_reading) break; ql_log(ql_log_info, vha, 0x705d, "Firmware dump cleared on (%ld).\n", vha->host_no); if (IS_P3P_TYPE(ha)) { qla82xx_md_free(vha); qla82xx_md_prep(vha); } ha->fw_dump_reading = 0; ha->fw_dumped = false; break; case 1: if (ha->fw_dumped && !ha->fw_dump_reading) { ha->fw_dump_reading = 1; ql_log(ql_log_info, vha, 0x705e, "Raw firmware dump ready for read on (%ld).\n", vha->host_no); } break; case 2: qla2x00_alloc_fw_dump(vha); break; case 3: if (IS_QLA82XX(ha)) { qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); } else if (IS_QLA8044(ha)) { qla8044_idc_lock(ha); qla82xx_set_reset_owner(vha); qla8044_idc_unlock(ha); } else { qla2x00_system_error(vha); } break; case 4: if (IS_P3P_TYPE(ha)) { if (ha->md_tmplt_hdr) ql_dbg(ql_dbg_user, vha, 0x705b, "MiniDump supported with this firmware.\n"); else ql_dbg(ql_dbg_user, vha, 0x709d, "MiniDump not supported with this firmware.\n"); } break; case 5: if (IS_P3P_TYPE(ha)) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case 6: if (!ha->mctp_dump_reading) break; ql_log(ql_log_info, vha, 0x70c1, "MCTP dump cleared on (%ld).\n", vha->host_no); ha->mctp_dump_reading = 0; ha->mctp_dumped = 0; break; case 7: if (ha->mctp_dumped && !ha->mctp_dump_reading) { ha->mctp_dump_reading = 1; ql_log(ql_log_info, vha, 0x70c2, "Raw mctp dump ready for read on (%ld).\n", vha->host_no); } break; case 8: if (!ha->mpi_fw_dump_reading) break; ql_log(ql_log_info, vha, 0x70e7, "MPI firmware dump cleared on (%ld).\n", vha->host_no); ha->mpi_fw_dump_reading = 0; ha->mpi_fw_dumped = 0; break; case 9: if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) { ha->mpi_fw_dump_reading = 1; ql_log(ql_log_info, vha, 0x70e8, "Raw MPI firmware dump ready for read on (%ld).\n", vha->host_no); } break; case 10: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ql_log(ql_log_info, vha, 0x70e9, "Issuing MPI firmware dump on host#%ld.\n", vha->host_no); ha->isp_ops->mpi_fw_dump(vha, 0); } break; } return count; } static struct bin_attribute sysfs_fw_dump_attr = { .attr = { .name = "fw_dump", .mode = S_IRUSR | S_IWUSR, }, .size = 0, .read = qla2x00_sysfs_read_fw_dump, .write = qla2x00_sysfs_write_fw_dump, }; static ssize_t qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; uint32_t faddr; struct active_regions active_regions = { }; if (!capable(CAP_SYS_ADMIN)) return 0; mutex_lock(&ha->optrom_mutex); if (qla2x00_chip_is_down(vha)) { mutex_unlock(&ha->optrom_mutex); return -EAGAIN; } if (!IS_NOCACHE_VPD_TYPE(ha)) { mutex_unlock(&ha->optrom_mutex); goto skip; } faddr = ha->flt_region_nvram; if (IS_QLA28XX(ha)) { qla28xx_get_aux_images(vha, &active_regions); if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) faddr = ha->flt_region_nvram_sec; } ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); mutex_unlock(&ha->optrom_mutex); skip: return memory_read_from_buffer(buf, count, &off, ha->nvram, ha->nvram_size); } static ssize_t qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; uint16_t cnt; if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || !ha->isp_ops->write_nvram) return -EINVAL; /* Checksum NVRAM. */ if (IS_FWI2_CAPABLE(ha)) { __le32 *iter = (__force __le32 *)buf; uint32_t chksum; chksum = 0; for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++) chksum += le32_to_cpu(*iter); chksum = ~chksum + 1; *iter = cpu_to_le32(chksum); } else { uint8_t *iter; uint8_t chksum; iter = (uint8_t *)buf; chksum = 0; for (cnt = 0; cnt < count - 1; cnt++) chksum += *iter++; chksum = ~chksum + 1; *iter = chksum; } if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x705f, "HBA not online, failing NVRAM update.\n"); return -EAGAIN; } mutex_lock(&ha->optrom_mutex); if (qla2x00_chip_is_down(vha)) { mutex_unlock(&ha->optrom_mutex); return -EAGAIN; } /* Write NVRAM. */ ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count); ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base, count); mutex_unlock(&ha->optrom_mutex); ql_dbg(ql_dbg_user, vha, 0x7060, "Setting ISP_ABORT_NEEDED\n"); /* NVRAM settings take effect immediately. */ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); return count; } static struct bin_attribute sysfs_nvram_attr = { .attr = { .name = "nvram", .mode = S_IRUSR | S_IWUSR, }, .size = 512, .read = qla2x00_sysfs_read_nvram, .write = qla2x00_sysfs_write_nvram, }; static ssize_t qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; ssize_t rval = 0; mutex_lock(&ha->optrom_mutex); if (ha->optrom_state != QLA_SREADING) goto out; rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, ha->optrom_region_size); out: mutex_unlock(&ha->optrom_mutex); return rval; } static ssize_t qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; mutex_lock(&ha->optrom_mutex); if (ha->optrom_state != QLA_SWRITING) { mutex_unlock(&ha->optrom_mutex); return -EINVAL; } if (off > ha->optrom_region_size) { mutex_unlock(&ha->optrom_mutex); return -ERANGE; } if (off + count > ha->optrom_region_size) count = ha->optrom_region_size - off; memcpy(&ha->optrom_buffer[off], buf, count); mutex_unlock(&ha->optrom_mutex); return count; } static struct bin_attribute sysfs_optrom_attr = { .attr = { .name = "optrom", .mode = S_IRUSR | S_IWUSR, }, .size = 0, .read = qla2x00_sysfs_read_optrom, .write = qla2x00_sysfs_write_optrom, }; static ssize_t qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; uint32_t start = 0; uint32_t size = ha->optrom_size; int val, valid; ssize_t rval = count; if (off) return -EINVAL; if (unlikely(pci_channel_offline(ha->pdev))) return -EAGAIN; if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) return -EINVAL; if (start > ha->optrom_size) return -EINVAL; if (size > ha->optrom_size - start) size = ha->optrom_size - start; mutex_lock(&ha->optrom_mutex); if (qla2x00_chip_is_down(vha)) { mutex_unlock(&ha->optrom_mutex); return -EAGAIN; } switch (val) { case 0: if (ha->optrom_state != QLA_SREADING && ha->optrom_state != QLA_SWRITING) { rval = -EINVAL; goto out; } ha->optrom_state = QLA_SWAITING; ql_dbg(ql_dbg_user, vha, 0x7061, "Freeing flash region allocation -- 0x%x bytes.\n", ha->optrom_region_size); vfree(ha->optrom_buffer); ha->optrom_buffer = NULL; break; case 1: if (ha->optrom_state != QLA_SWAITING) { rval = -EINVAL; goto out; } ha->optrom_region_start = start; ha->optrom_region_size = size; ha->optrom_state = QLA_SREADING; ha->optrom_buffer = vzalloc(ha->optrom_region_size); if (ha->optrom_buffer == NULL) { ql_log(ql_log_warn, vha, 0x7062, "Unable to allocate memory for optrom retrieval " "(%x).\n", ha->optrom_region_size); ha->optrom_state = QLA_SWAITING; rval = -ENOMEM; goto out; } if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7063, "HBA not online, failing NVRAM update.\n"); rval = -EAGAIN; goto out; } ql_dbg(ql_dbg_user, vha, 0x7064, "Reading flash region -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); ha->isp_ops->read_optrom(vha, ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); break; case 2: if (ha->optrom_state != QLA_SWAITING) { rval = -EINVAL; goto out; } /* * We need to be more restrictive on which FLASH regions are * allowed to be updated via user-space. Regions accessible * via this method include: * * ISP21xx/ISP22xx/ISP23xx type boards: * * 0x000000 -> 0x020000 -- Boot code. * * ISP2322/ISP24xx type boards: * * 0x000000 -> 0x07ffff -- Boot code. * 0x080000 -> 0x0fffff -- Firmware. * * ISP25xx type boards: * * 0x000000 -> 0x07ffff -- Boot code. * 0x080000 -> 0x0fffff -- Firmware. * 0x120000 -> 0x12ffff -- VPD and HBA parameters. * * > ISP25xx type boards: * * None -- should go through BSG. */ valid = 0; if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) valid = 1; else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) valid = 1; if (!valid) { ql_log(ql_log_warn, vha, 0x7065, "Invalid start region 0x%x/0x%x.\n", start, size); rval = -EINVAL; goto out; } ha->optrom_region_start = start; ha->optrom_region_size = size; ha->optrom_state = QLA_SWRITING; ha->optrom_buffer = vzalloc(ha->optrom_region_size); if (ha->optrom_buffer == NULL) { ql_log(ql_log_warn, vha, 0x7066, "Unable to allocate memory for optrom update " "(%x)\n", ha->optrom_region_size); ha->optrom_state = QLA_SWAITING; rval = -ENOMEM; goto out; } ql_dbg(ql_dbg_user, vha, 0x7067, "Staging flash region write -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); break; case 3: if (ha->optrom_state != QLA_SWRITING) { rval = -EINVAL; goto out; } if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7068, "HBA not online, failing flash update.\n"); rval = -EAGAIN; goto out; } ql_dbg(ql_dbg_user, vha, 0x7069, "Writing flash region -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); if (rval) rval = -EIO; break; default: rval = -EINVAL; } out: mutex_unlock(&ha->optrom_mutex); return rval; } static struct bin_attribute sysfs_optrom_ctl_attr = { .attr = { .name = "optrom_ctl", .mode = S_IWUSR, }, .size = 0, .write = qla2x00_sysfs_write_optrom_ctl, }; static ssize_t qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; uint32_t faddr; struct active_regions active_regions = { }; if (unlikely(pci_channel_offline(ha->pdev))) return -EAGAIN; if (!capable(CAP_SYS_ADMIN)) return -EINVAL; if (!IS_NOCACHE_VPD_TYPE(ha)) goto skip; faddr = ha->flt_region_vpd << 2; if (IS_QLA28XX(ha)) { qla28xx_get_aux_images(vha, &active_regions); if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) faddr = ha->flt_region_vpd_sec << 2; ql_dbg(ql_dbg_init, vha, 0x7070, "Loading %s nvram image.\n", active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? "primary" : "secondary"); } mutex_lock(&ha->optrom_mutex); if (qla2x00_chip_is_down(vha)) { mutex_unlock(&ha->optrom_mutex); return -EAGAIN; } ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size); mutex_unlock(&ha->optrom_mutex); ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size); skip: return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); } static ssize_t qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; uint8_t *tmp_data; if (unlikely(pci_channel_offline(ha->pdev))) return 0; if (qla2x00_chip_is_down(vha)) return 0; if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || !ha->isp_ops->write_nvram) return 0; if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x706a, "HBA not online, failing VPD update.\n"); return -EAGAIN; } mutex_lock(&ha->optrom_mutex); if (qla2x00_chip_is_down(vha)) { mutex_unlock(&ha->optrom_mutex); return -EAGAIN; } /* Write NVRAM. */ ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count); ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count); /* Update flash version information for 4Gb & above. */ if (!IS_FWI2_CAPABLE(ha)) { mutex_unlock(&ha->optrom_mutex); return -EINVAL; } tmp_data = vmalloc(256); if (!tmp_data) { mutex_unlock(&ha->optrom_mutex); ql_log(ql_log_warn, vha, 0x706b, "Unable to allocate memory for VPD information update.\n"); return -ENOMEM; } ha->isp_ops->get_flash_version(vha, tmp_data); vfree(tmp_data); mutex_unlock(&ha->optrom_mutex); return count; } static struct bin_attribute sysfs_vpd_attr = { .attr = { .name = "vpd", .mode = S_IRUSR | S_IWUSR, }, .size = 0, .read = qla2x00_sysfs_read_vpd, .write = qla2x00_sysfs_write_vpd, }; static ssize_t qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); int rval; if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE) return 0; mutex_lock(&vha->hw->optrom_mutex); if (qla2x00_chip_is_down(vha)) { mutex_unlock(&vha->hw->optrom_mutex); return 0; } rval = qla2x00_read_sfp_dev(vha, buf, count); mutex_unlock(&vha->hw->optrom_mutex); if (rval) return -EIO; return count; } static struct bin_attribute sysfs_sfp_attr = { .attr = { .name = "sfp", .mode = S_IRUSR | S_IWUSR, }, .size = SFP_DEV_SIZE, .read = qla2x00_sysfs_read_sfp, }; static ssize_t qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); int type; uint32_t idc_control; uint8_t *tmp_data = NULL; if (off != 0) return -EINVAL; type = simple_strtol(buf, NULL, 10); switch (type) { case 0x2025c: ql_log(ql_log_info, vha, 0x706e, "Issuing ISP reset.\n"); if (vha->hw->flags.port_isolated) { ql_log(ql_log_info, vha, 0x706e, "Port is isolated, returning.\n"); return -EINVAL; } scsi_block_requests(vha->host); if (IS_QLA82XX(ha)) { ha->flags.isp82xx_no_md_cap = 1; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); } else if (IS_QLA8044(ha)) { qla8044_idc_lock(ha); idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, (idc_control | GRACEFUL_RESET_BIT1)); qla82xx_set_reset_owner(vha); qla8044_idc_unlock(ha); } else { set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); break; case 0x2025d: if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return -EPERM; ql_log(ql_log_info, vha, 0x706f, "Issuing MPI reset.\n"); if (IS_QLA83XX(ha)) { uint32_t idc_control; qla83xx_idc_lock(vha, 0); __qla83xx_get_idc_control(vha, &idc_control); idc_control |= QLA83XX_IDC_GRACEFUL_RESET; __qla83xx_set_idc_control(vha, idc_control); qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_NEED_RESET); qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); qla83xx_idc_unlock(vha, 0); break; } else { /* Make sure FC side is not in reset */ WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS); /* Issue MPI reset */ scsi_block_requests(vha->host); if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x7070, "MPI reset failed.\n"); scsi_unblock_requests(vha->host); break; } break; case 0x2025e: if (!IS_P3P_TYPE(ha) || vha != base_vha) { ql_log(ql_log_info, vha, 0x7071, "FCoE ctx reset not supported.\n"); return -EPERM; } ql_log(ql_log_info, vha, 0x7072, "Issuing FCoE ctx reset.\n"); set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); qla2x00_wait_for_fcoe_ctx_reset(vha); break; case 0x2025f: if (!IS_QLA8031(ha)) return -EPERM; ql_log(ql_log_info, vha, 0x70bc, "Disabling Reset by IDC control\n"); qla83xx_idc_lock(vha, 0); __qla83xx_get_idc_control(vha, &idc_control); idc_control |= QLA83XX_IDC_RESET_DISABLED; __qla83xx_set_idc_control(vha, idc_control); qla83xx_idc_unlock(vha, 0); break; case 0x20260: if (!IS_QLA8031(ha)) return -EPERM; ql_log(ql_log_info, vha, 0x70bd, "Enabling Reset by IDC control\n"); qla83xx_idc_lock(vha, 0); __qla83xx_get_idc_control(vha, &idc_control); idc_control &= ~QLA83XX_IDC_RESET_DISABLED; __qla83xx_set_idc_control(vha, idc_control); qla83xx_idc_unlock(vha, 0); break; case 0x20261: ql_dbg(ql_dbg_user, vha, 0x70e0, "Updating cache versions without reset "); tmp_data = vmalloc(256); if (!tmp_data) { ql_log(ql_log_warn, vha, 0x70e1, "Unable to allocate memory for VPD information update.\n"); return -ENOMEM; } ha->isp_ops->get_flash_version(vha, tmp_data); vfree(tmp_data); break; } return count; } static struct bin_attribute sysfs_reset_attr = { .attr = { .name = "reset", .mode = S_IWUSR, }, .size = 0, .write = qla2x00_sysfs_write_reset, }; static ssize_t qla2x00_issue_logo(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); int type; port_id_t did; if (!capable(CAP_SYS_ADMIN)) return 0; if (unlikely(pci_channel_offline(vha->hw->pdev))) return 0; if (qla2x00_chip_is_down(vha)) return 0; type = simple_strtol(buf, NULL, 10); did.b.domain = (type & 0x00ff0000) >> 16; did.b.area = (type & 0x0000ff00) >> 8; did.b.al_pa = (type & 0x000000ff); ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n", did.b.domain, did.b.area, did.b.al_pa); ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); return count; } static struct bin_attribute sysfs_issue_logo_attr = { .attr = { .name = "issue_logo", .mode = S_IWUSR, }, .size = 0, .write = qla2x00_issue_logo, }; static ssize_t qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; int rval; uint16_t actual_size; if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) return 0; if (unlikely(pci_channel_offline(ha->pdev))) return 0; mutex_lock(&vha->hw->optrom_mutex); if (qla2x00_chip_is_down(vha)) { mutex_unlock(&vha->hw->optrom_mutex); return 0; } if (ha->xgmac_data) goto do_read; ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, &ha->xgmac_data_dma, GFP_KERNEL); if (!ha->xgmac_data) { mutex_unlock(&vha->hw->optrom_mutex); ql_log(ql_log_warn, vha, 0x7076, "Unable to allocate memory for XGMAC read-data.\n"); return 0; } do_read: actual_size = 0; memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE); rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, XGMAC_DATA_SIZE, &actual_size); mutex_unlock(&vha->hw->optrom_mutex); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7077, "Unable to read XGMAC data (%x).\n", rval); count = 0; } count = actual_size > count ? count : actual_size; memcpy(buf, ha->xgmac_data, count); return count; } static struct bin_attribute sysfs_xgmac_stats_attr = { .attr = { .name = "xgmac_stats", .mode = S_IRUSR, }, .size = 0, .read = qla2x00_sysfs_read_xgmac_stats, }; static ssize_t qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; int rval; if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) return 0; mutex_lock(&vha->hw->optrom_mutex); if (ha->dcbx_tlv) goto do_read; if (qla2x00_chip_is_down(vha)) { mutex_unlock(&vha->hw->optrom_mutex); return 0; } ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, &ha->dcbx_tlv_dma, GFP_KERNEL); if (!ha->dcbx_tlv) { mutex_unlock(&vha->hw->optrom_mutex); ql_log(ql_log_warn, vha, 0x7078, "Unable to allocate memory for DCBX TLV read-data.\n"); return -ENOMEM; } do_read: memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, DCBX_TLV_DATA_SIZE); mutex_unlock(&vha->hw->optrom_mutex); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7079, "Unable to read DCBX TLV (%x).\n", rval); return -EIO; } memcpy(buf, ha->dcbx_tlv, count); return count; } static struct bin_attribute sysfs_dcbx_tlv_attr = { .attr = { .name = "dcbx_tlv", .mode = S_IRUSR, }, .size = 0, .read = qla2x00_sysfs_read_dcbx_tlv, }; static struct sysfs_entry { char *name; struct bin_attribute *attr; int type; } bin_file_entries[] = { { "fw_dump", &sysfs_fw_dump_attr, }, { "nvram", &sysfs_nvram_attr, }, { "optrom", &sysfs_optrom_attr, }, { "optrom_ctl", &sysfs_optrom_ctl_attr, }, { "vpd", &sysfs_vpd_attr, 1 }, { "sfp", &sysfs_sfp_attr, 1 }, { "reset", &sysfs_reset_attr, }, { "issue_logo", &sysfs_issue_logo_attr, }, { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, { NULL }, }; void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) { struct Scsi_Host *host = vha->host; struct sysfs_entry *iter; int ret; for (iter = bin_file_entries; iter->name; iter++) { if (iter->type && !IS_FWI2_CAPABLE(vha->hw)) continue; if (iter->type == 2 && !IS_QLA25XX(vha->hw)) continue; if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw))) continue; ret = sysfs_create_bin_file(&host->shost_gendev.kobj, iter->attr); if (ret) ql_log(ql_log_warn, vha, 0x00f3, "Unable to create sysfs %s binary attribute (%d).\n", iter->name, ret); else ql_dbg(ql_dbg_init, vha, 0x00f4, "Successfully created sysfs %s binary attribute.\n", iter->name); } } void qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) { struct Scsi_Host *host = vha->host; struct sysfs_entry *iter; struct qla_hw_data *ha = vha->hw; for (iter = bin_file_entries; iter->name; iter++) { if (iter->type && !IS_FWI2_CAPABLE(ha)) continue; if (iter->type == 2 && !IS_QLA25XX(ha)) continue; if (iter->type == 3 && !(IS_CNA_CAPABLE(ha))) continue; sysfs_remove_bin_file(&host->shost_gendev.kobj, iter->attr); } if (stop_beacon && ha->beacon_blink_led == 1) ha->isp_ops->beacon_off(vha); } /* Scsi_Host attributes. */ static ssize_t qla2x00_driver_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); } static ssize_t qla2x00_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; char fw_str[128]; return scnprintf(buf, PAGE_SIZE, "%s\n", ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str))); } static ssize_t qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; uint32_t sn; if (IS_QLAFX00(vha->hw)) { return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->mr.serial_num); } else if (IS_FWI2_CAPABLE(ha)) { qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1); return strlen(strcat(buf, "\n")); } sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, sn % 100000); } static ssize_t qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device); } static ssize_t qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; if (IS_QLAFX00(vha->hw)) return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->mr.hw_version); return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", ha->product_id[0], ha->product_id[1], ha->product_id[2], ha->product_id[3]); } static ssize_t qla2x00_model_name_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number); } static ssize_t qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc); } static ssize_t qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); char pci_info[30]; return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->isp_ops->pci_info_str(vha, pci_info, sizeof(pci_info))); } static ssize_t qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; int len = 0; if (atomic_read(&vha->loop_state) == LOOP_DOWN || atomic_read(&vha->loop_state) == LOOP_DEAD || vha->device_flags & DFLG_NO_CABLE) len = scnprintf(buf, PAGE_SIZE, "Link Down\n"); else if (atomic_read(&vha->loop_state) != LOOP_READY || qla2x00_chip_is_down(vha)) len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n"); else { len = scnprintf(buf, PAGE_SIZE, "Link Up - "); switch (ha->current_topology) { case ISP_CFG_NL: len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n"); break; case ISP_CFG_FL: len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n"); break; case ISP_CFG_N: len += scnprintf(buf + len, PAGE_SIZE-len, "N_Port to N_Port\n"); break; case ISP_CFG_F: len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n"); break; default: len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n"); break; } } return len; } static ssize_t qla2x00_zio_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int len = 0; switch (vha->hw->zio_mode) { case QLA_ZIO_MODE_6: len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); break; case QLA_ZIO_DISABLED: len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); break; } return len; } static ssize_t qla2x00_zio_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; int val = 0; uint16_t zio_mode; if (!IS_ZIO_SUPPORTED(ha)) return -ENOTSUPP; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; if (val) zio_mode = QLA_ZIO_MODE_6; else zio_mode = QLA_ZIO_DISABLED; /* Update per-hba values and queue a reset. */ if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) { ha->zio_mode = zio_mode; set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } return strlen(buf); } static ssize_t qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100); } static ssize_t qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int val = 0; uint16_t zio_timer; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; if (val > 25500 || val < 100) return -ERANGE; zio_timer = (uint16_t)(val / 100); vha->hw->zio_timer = zio_timer; return strlen(buf); } static ssize_t qla_zio_threshold_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); return scnprintf(buf, PAGE_SIZE, "%d exchanges\n", vha->hw->last_zio_threshold); } static ssize_t qla_zio_threshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int val = 0; if (vha->hw->zio_mode != QLA_ZIO_MODE_6) return -EINVAL; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; if (val < 0 || val > 256) return -ERANGE; atomic_set(&vha->hw->zio_threshold, val); return strlen(buf); } static ssize_t qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int len = 0; if (vha->hw->beacon_blink_led) len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); else len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); return len; } static ssize_t qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; int val = 0; int rval; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return -EPERM; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; mutex_lock(&vha->hw->optrom_mutex); if (qla2x00_chip_is_down(vha)) { mutex_unlock(&vha->hw->optrom_mutex); ql_log(ql_log_warn, vha, 0x707a, "Abort ISP active -- ignoring beacon request.\n"); return -EBUSY; } if (val) rval = ha->isp_ops->beacon_on(vha); else rval = ha->isp_ops->beacon_off(vha); if (rval != QLA_SUCCESS) count = 0; mutex_unlock(&vha->hw->optrom_mutex); return count; } static ssize_t qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; uint16_t led[3] = { 0 }; if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return -EPERM; if (ql26xx_led_config(vha, 0, led)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n", led[0], led[1], led[2]); } static ssize_t qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; uint16_t options = BIT_0; uint16_t led[3] = { 0 }; uint16_t word[4]; int n; if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return -EPERM; n = sscanf(buf, "%hx %hx %hx %hx", word+0, word+1, word+2, word+3); if (n == 4) { if (word[0] == 3) { options |= BIT_3|BIT_2|BIT_1; led[0] = word[1]; led[1] = word[2]; led[2] = word[3]; goto write; } return -EINVAL; } if (n == 2) { /* check led index */ if (word[0] == 0) { options |= BIT_2; led[0] = word[1]; goto write; } if (word[0] == 1) { options |= BIT_3; led[1] = word[1]; goto write; } if (word[0] == 2) { options |= BIT_1; led[2] = word[1]; goto write; } return -EINVAL; } return -EINVAL; write: if (ql26xx_led_config(vha, options, led)) return -EFAULT; return count; } static ssize_t qla2x00_optrom_bios_version_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], ha->bios_revision[0]); } static ssize_t qla2x00_optrom_efi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], ha->efi_revision[0]); } static ssize_t qla2x00_optrom_fcode_version_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], ha->fcode_revision[0]); } static ssize_t qla2x00_optrom_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], ha->fw_revision[3]); } static ssize_t qla2x00_optrom_gold_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", ha->gold_fw_version[0], ha->gold_fw_version[1], ha->gold_fw_version[2], ha->gold_fw_version[3]); } static ssize_t qla2x00_total_isp_aborts_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); return scnprintf(buf, PAGE_SIZE, "%d\n", vha->qla_stats.total_isp_aborts); } static ssize_t qla24xx_84xx_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { int rval = QLA_SUCCESS; uint16_t status[2] = { 0 }; scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; if (!IS_QLA84XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); if (!ha->cs84xx->op_fw_version) { rval = qla84xx_verify_chip(vha, status); if (!rval && !status[0]) return scnprintf(buf, PAGE_SIZE, "%u\n", (uint32_t)ha->cs84xx->op_fw_version); } return scnprintf(buf, PAGE_SIZE, "\n"); } static ssize_t qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", ha->serdes_version[0], ha->serdes_version[1], ha->serdes_version[2]); } static ssize_t qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2], ha->mpi_capabilities); } static ssize_t qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]); } static ssize_t qla2x00_flash_block_size_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); } static ssize_t qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); if (!IS_CNA_CAPABLE(vha->hw)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); } static ssize_t qla2x00_vn_port_mac_address_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); if (!IS_CNA_CAPABLE(vha->hw)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac); } static ssize_t qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); } static ssize_t qla2x00_thermal_temp_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); uint16_t temp = 0; int rc; mutex_lock(&vha->hw->optrom_mutex); if (qla2x00_chip_is_down(vha)) { mutex_unlock(&vha->hw->optrom_mutex); ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); goto done; } if (vha->hw->flags.eeh_busy) { mutex_unlock(&vha->hw->optrom_mutex); ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n"); goto done; } rc = qla2x00_get_thermal_temp(vha, &temp); mutex_unlock(&vha->hw->optrom_mutex); if (rc == QLA_SUCCESS) return scnprintf(buf, PAGE_SIZE, "%d\n", temp); done: return scnprintf(buf, PAGE_SIZE, "\n"); } static ssize_t qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int rval = QLA_FUNCTION_FAILED; uint16_t state[6]; uint32_t pstate; if (IS_QLAFX00(vha->hw)) { pstate = qlafx00_fw_state_show(dev, attr, buf); return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate); } mutex_lock(&vha->hw->optrom_mutex); if (qla2x00_chip_is_down(vha)) { mutex_unlock(&vha->hw->optrom_mutex); ql_log(ql_log_warn, vha, 0x707c, "ISP reset active.\n"); goto out; } else if (vha->hw->flags.eeh_busy) { mutex_unlock(&vha->hw->optrom_mutex); goto out; } rval = qla2x00_get_firmware_state(vha, state); mutex_unlock(&vha->hw->optrom_mutex); out: if (rval != QLA_SUCCESS) { memset(state, -1, sizeof(state)); rval = qla2x00_get_firmware_state(vha, state); } return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], state[1], state[2], state[3], state[4], state[5]); } static ssize_t qla2x00_diag_requests_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); if (!IS_BIDI_CAPABLE(vha->hw)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count); } static ssize_t qla2x00_diag_megabytes_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); if (!IS_BIDI_CAPABLE(vha->hw)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.transfer_bytes >> 20); } static ssize_t qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; uint32_t size; if (!ha->fw_dumped) size = 0; else if (IS_P3P_TYPE(ha)) size = ha->md_template_size + ha->md_dump_size; else size = ha->fw_dump_len; return scnprintf(buf, PAGE_SIZE, "%d\n", size); } static ssize_t qla2x00_allow_cna_fw_dump_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); if (!IS_P3P_TYPE(vha->hw)) return scnprintf(buf, PAGE_SIZE, "\n"); else return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->allow_cna_fw_dump ? "true" : "false"); } static ssize_t qla2x00_allow_cna_fw_dump_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int val = 0; if (!IS_P3P_TYPE(vha->hw)) return -EINVAL; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; vha->hw->allow_cna_fw_dump = val != 0; return strlen(buf); } static ssize_t qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]); } static ssize_t qla2x00_min_supported_speed_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%s\n", ha->min_supported_speed == 6 ? "64Gps" : ha->min_supported_speed == 5 ? "32Gps" : ha->min_supported_speed == 4 ? "16Gps" : ha->min_supported_speed == 3 ? "8Gps" : ha->min_supported_speed == 2 ? "4Gps" : ha->min_supported_speed != 0 ? "unknown" : ""); } static ssize_t qla2x00_max_supported_speed_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%s\n", ha->max_supported_speed == 2 ? "64Gps" : ha->max_supported_speed == 1 ? "32Gps" : ha->max_supported_speed == 0 ? "16Gps" : "unknown"); } static ssize_t qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev)); ulong type, speed; int oldspeed, rval; int mode = QLA_SET_DATA_RATE_LR; struct qla_hw_data *ha = vha->hw; if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) { ql_log(ql_log_warn, vha, 0x70d8, "Speed setting not supported \n"); return -EINVAL; } rval = kstrtol(buf, 10, &type); if (rval) return rval; speed = type; if (type == 40 || type == 80 || type == 160 || type == 320) { ql_dbg(ql_dbg_user, vha, 0x70d9, "Setting will be affected after a loss of sync\n"); type = type/10; mode = QLA_SET_DATA_RATE_NOLR; } oldspeed = ha->set_data_rate; switch (type) { case 0: ha->set_data_rate = PORT_SPEED_AUTO; break; case 4: ha->set_data_rate = PORT_SPEED_4GB; break; case 8: ha->set_data_rate = PORT_SPEED_8GB; break; case 16: ha->set_data_rate = PORT_SPEED_16GB; break; case 32: ha->set_data_rate = PORT_SPEED_32GB; break; default: ql_log(ql_log_warn, vha, 0x1199, "Unrecognized speed setting:%lx. Setting Autoneg\n", speed); ha->set_data_rate = PORT_SPEED_AUTO; } if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate)) return -EINVAL; ql_log(ql_log_info, vha, 0x70da, "Setting speed to %lx Gbps \n", type); rval = qla2x00_set_data_rate(vha, mode); if (rval != QLA_SUCCESS) return -EIO; return strlen(buf); } static const struct { u16 rate; char *str; } port_speed_str[] = { { PORT_SPEED_4GB, "4" }, { PORT_SPEED_8GB, "8" }, { PORT_SPEED_16GB, "16" }, { PORT_SPEED_32GB, "32" }, { PORT_SPEED_64GB, "64" }, { PORT_SPEED_10GB, "10" }, }; static ssize_t qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev)); struct qla_hw_data *ha = vha->hw; ssize_t rval; u16 i; char *speed = "Unknown"; rval = qla2x00_get_data_rate(vha); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x70db, "Unable to get port speed rval:%zd\n", rval); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(port_speed_str); i++) { if (port_speed_str[i].rate != ha->link_data_rate) continue; speed = port_speed_str[i].str; break; } return scnprintf(buf, PAGE_SIZE, "%s\n", speed); } static ssize_t qla2x00_mpi_pause_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int rval = 0; if (sscanf(buf, "%d", &rval) != 1) return -EINVAL; ql_log(ql_log_warn, vha, 0x7089, "Pausing MPI...\n"); rval = qla83xx_wr_reg(vha, 0x002012d4, 0x30000001); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x708a, "Unable to pause MPI.\n"); count = 0; } return count; } static DEVICE_ATTR(mpi_pause, S_IWUSR, NULL, qla2x00_mpi_pause_store); /* ----- */ static ssize_t qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int len = 0; len += scnprintf(buf + len, PAGE_SIZE-len, "Supported options: enabled | disabled | dual | exclusive\n"); /* --- */ len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: "); switch (vha->qlini_mode) { case QLA2XXX_INI_MODE_EXCLUSIVE: len += scnprintf(buf + len, PAGE_SIZE-len, QLA2XXX_INI_MODE_STR_EXCLUSIVE); break; case QLA2XXX_INI_MODE_DISABLED: len += scnprintf(buf + len, PAGE_SIZE-len, QLA2XXX_INI_MODE_STR_DISABLED); break; case QLA2XXX_INI_MODE_ENABLED: len += scnprintf(buf + len, PAGE_SIZE-len, QLA2XXX_INI_MODE_STR_ENABLED); break; case QLA2XXX_INI_MODE_DUAL: len += scnprintf(buf + len, PAGE_SIZE-len, QLA2XXX_INI_MODE_STR_DUAL); break; } len += scnprintf(buf + len, PAGE_SIZE-len, "\n"); return len; } static char *mode_to_str[] = { "exclusive", "disabled", "enabled", "dual", }; #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT) static void qla_set_ini_mode(scsi_qla_host_t *vha, int op) { enum { NO_ACTION, MODE_CHANGE_ACCEPT, MODE_CHANGE_NO_ACTION, TARGET_STILL_ACTIVE, }; int action = NO_ACTION; int set_mode = 0; u8 eo_toggle = 0; /* exchange offload flipped */ switch (vha->qlini_mode) { case QLA2XXX_INI_MODE_DISABLED: switch (op) { case QLA2XXX_INI_MODE_DISABLED: if (qla_tgt_mode_enabled(vha)) { if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != vha->hw->flags.exchoffld_enabled) eo_toggle = 1; if (((vha->ql2xexchoffld != vha->u_ql2xexchoffld) && NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || eo_toggle) { /* * The number of exchange to be offload * was tweaked or offload option was * flipped */ action = MODE_CHANGE_ACCEPT; } else { action = MODE_CHANGE_NO_ACTION; } } else { action = MODE_CHANGE_NO_ACTION; } break; case QLA2XXX_INI_MODE_EXCLUSIVE: if (qla_tgt_mode_enabled(vha)) { if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != vha->hw->flags.exchoffld_enabled) eo_toggle = 1; if (((vha->ql2xexchoffld != vha->u_ql2xexchoffld) && NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || eo_toggle) { /* * The number of exchange to be offload * was tweaked or offload option was * flipped */ action = MODE_CHANGE_ACCEPT; } else { action = MODE_CHANGE_NO_ACTION; } } else { action = MODE_CHANGE_ACCEPT; } break; case QLA2XXX_INI_MODE_DUAL: action = MODE_CHANGE_ACCEPT; /* active_mode is target only, reset it to dual */ if (qla_tgt_mode_enabled(vha)) { set_mode = 1; action = MODE_CHANGE_ACCEPT; } else { action = MODE_CHANGE_NO_ACTION; } break; case QLA2XXX_INI_MODE_ENABLED: if (qla_tgt_mode_enabled(vha)) action = TARGET_STILL_ACTIVE; else { action = MODE_CHANGE_ACCEPT; set_mode = 1; } break; } break; case QLA2XXX_INI_MODE_EXCLUSIVE: switch (op) { case QLA2XXX_INI_MODE_EXCLUSIVE: if (qla_tgt_mode_enabled(vha)) { if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != vha->hw->flags.exchoffld_enabled) eo_toggle = 1; if (((vha->ql2xexchoffld != vha->u_ql2xexchoffld) && NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || eo_toggle) /* * The number of exchange to be offload * was tweaked or offload option was * flipped */ action = MODE_CHANGE_ACCEPT; else action = NO_ACTION; } else action = NO_ACTION; break; case QLA2XXX_INI_MODE_DISABLED: if (qla_tgt_mode_enabled(vha)) { if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) != vha->hw->flags.exchoffld_enabled) eo_toggle = 1; if (((vha->ql2xexchoffld != vha->u_ql2xexchoffld) && NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) || eo_toggle) action = MODE_CHANGE_ACCEPT; else action = MODE_CHANGE_NO_ACTION; } else action = MODE_CHANGE_NO_ACTION; break; case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */ if (qla_tgt_mode_enabled(vha)) { action = MODE_CHANGE_ACCEPT; set_mode = 1; } else action = MODE_CHANGE_ACCEPT; break; case QLA2XXX_INI_MODE_ENABLED: if (qla_tgt_mode_enabled(vha)) action = TARGET_STILL_ACTIVE; else { if (vha->hw->flags.fw_started) action = MODE_CHANGE_NO_ACTION; else action = MODE_CHANGE_ACCEPT; } break; } break; case QLA2XXX_INI_MODE_ENABLED: switch (op) { case QLA2XXX_INI_MODE_ENABLED: if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) != vha->hw->flags.exchoffld_enabled) eo_toggle = 1; if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) && NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) || eo_toggle) action = MODE_CHANGE_ACCEPT; else action = NO_ACTION; break; case QLA2XXX_INI_MODE_DUAL: case QLA2XXX_INI_MODE_DISABLED: action = MODE_CHANGE_ACCEPT; break; default: action = MODE_CHANGE_NO_ACTION; break; } break; case QLA2XXX_INI_MODE_DUAL: switch (op) { case QLA2XXX_INI_MODE_DUAL: if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld + vha->u_ql2xiniexchg) != vha->hw->flags.exchoffld_enabled) eo_toggle = 1; if ((((vha->ql2xexchoffld + vha->ql2xiniexchg) != (vha->u_ql2xiniexchg + vha->u_ql2xexchoffld)) && NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg + vha->u_ql2xexchoffld)) || eo_toggle) action = MODE_CHANGE_ACCEPT; else action = NO_ACTION; } else { if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld + vha->u_ql2xiniexchg) != vha->hw->flags.exchoffld_enabled) eo_toggle = 1; if ((((vha->ql2xexchoffld + vha->ql2xiniexchg) != (vha->u_ql2xiniexchg + vha->u_ql2xexchoffld)) && NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg + vha->u_ql2xexchoffld)) || eo_toggle) action = MODE_CHANGE_NO_ACTION; else action = NO_ACTION; } break; case QLA2XXX_INI_MODE_DISABLED: if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { /* turning off initiator mode */ set_mode = 1; action = MODE_CHANGE_ACCEPT; } else { action = MODE_CHANGE_NO_ACTION; } break; case QLA2XXX_INI_MODE_EXCLUSIVE: if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { set_mode = 1; action = MODE_CHANGE_ACCEPT; } else { action = MODE_CHANGE_ACCEPT; } break; case QLA2XXX_INI_MODE_ENABLED: if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { action = TARGET_STILL_ACTIVE; } else { action = MODE_CHANGE_ACCEPT; } } break; } switch (action) { case MODE_CHANGE_ACCEPT: ql_log(ql_log_warn, vha, 0xffff, "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n", mode_to_str[vha->qlini_mode], mode_to_str[op], vha->ql2xexchoffld, vha->u_ql2xexchoffld, vha->ql2xiniexchg, vha->u_ql2xiniexchg); vha->qlini_mode = op; vha->ql2xexchoffld = vha->u_ql2xexchoffld; vha->ql2xiniexchg = vha->u_ql2xiniexchg; if (set_mode) qlt_set_mode(vha); vha->flags.online = 1; set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MODE_CHANGE_NO_ACTION: ql_log(ql_log_warn, vha, 0xffff, "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n", mode_to_str[vha->qlini_mode], mode_to_str[op], vha->ql2xexchoffld, vha->u_ql2xexchoffld, vha->ql2xiniexchg, vha->u_ql2xiniexchg); vha->qlini_mode = op; vha->ql2xexchoffld = vha->u_ql2xexchoffld; vha->ql2xiniexchg = vha->u_ql2xiniexchg; break; case TARGET_STILL_ACTIVE: ql_log(ql_log_warn, vha, 0xffff, "Target Mode is active. Unable to change Mode.\n"); break; case NO_ACTION: default: ql_log(ql_log_warn, vha, 0xffff, "Mode unchange. No action taken. %d|%d pct %d|%d.\n", vha->qlini_mode, op, vha->ql2xexchoffld, vha->u_ql2xexchoffld); break; } } static ssize_t qlini_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int ini; if (!buf) return -EINVAL; if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf, strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0) ini = QLA2XXX_INI_MODE_EXCLUSIVE; else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf, strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0) ini = QLA2XXX_INI_MODE_DISABLED; else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf, strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0) ini = QLA2XXX_INI_MODE_ENABLED; else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf, strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0) ini = QLA2XXX_INI_MODE_DUAL; else return -EINVAL; qla_set_ini_mode(vha, ini); return strlen(buf); } static ssize_t ql2xexchoffld_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int len = 0; len += scnprintf(buf + len, PAGE_SIZE-len, "target exchange: new %d : current: %d\n\n", vha->u_ql2xexchoffld, vha->ql2xexchoffld); len += scnprintf(buf + len, PAGE_SIZE-len, "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n", vha->host_no); return len; } static ssize_t ql2xexchoffld_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int val = 0; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; if (val > FW_MAX_EXCHANGES_CNT) val = FW_MAX_EXCHANGES_CNT; else if (val < 0) val = 0; vha->u_ql2xexchoffld = val; return strlen(buf); } static ssize_t ql2xiniexchg_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int len = 0; len += scnprintf(buf + len, PAGE_SIZE-len, "target exchange: new %d : current: %d\n\n", vha->u_ql2xiniexchg, vha->ql2xiniexchg); len += scnprintf(buf + len, PAGE_SIZE-len, "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n", vha->host_no); return len; } static ssize_t ql2xiniexchg_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int val = 0; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; if (val > FW_MAX_EXCHANGES_CNT) val = FW_MAX_EXCHANGES_CNT; else if (val < 0) val = 0; vha->u_ql2xiniexchg = val; return strlen(buf); } static ssize_t qla2x00_dif_bundle_statistics_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; return scnprintf(buf, PAGE_SIZE, "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n", ha->dif_bundle_crossed_pages, ha->dif_bundle_reads, ha->dif_bundle_writes, ha->dif_bundle_kallocs, ha->dif_bundle_dma_allocs, ha->pool.unusable.count); } static ssize_t qla2x00_fw_attr_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%llx\n", (uint64_t)ha->fw_attributes_ext[1] << 48 | (uint64_t)ha->fw_attributes_ext[0] << 32 | (uint64_t)ha->fw_attributes_h << 16 | (uint64_t)ha->fw_attributes); } static ssize_t qla2x00_port_no_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no); } static ssize_t qla2x00_dport_diagnostics_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return scnprintf(buf, PAGE_SIZE, "\n"); if (!*vha->dport_data) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", vha->dport_data[0], vha->dport_data[1], vha->dport_data[2], vha->dport_data[3]); } static DEVICE_ATTR(dport_diagnostics, 0444, qla2x00_dport_diagnostics_show, NULL); static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL); static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL); static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL); static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL); static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL); static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL); static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL); static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store); static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, qla2x00_zio_timer_store); static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, qla2x00_beacon_store); static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show, qla2x00_beacon_config_store); static DEVICE_ATTR(optrom_bios_version, S_IRUGO, qla2x00_optrom_bios_version_show, NULL); static DEVICE_ATTR(optrom_efi_version, S_IRUGO, qla2x00_optrom_efi_version_show, NULL); static DEVICE_ATTR(optrom_fcode_version, S_IRUGO, qla2x00_optrom_fcode_version_show, NULL); static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, NULL); static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO, qla2x00_optrom_gold_fw_version_show, NULL); static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show, NULL); static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, NULL); static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL); static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, NULL); static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, qla2x00_vn_port_mac_address_show, NULL); static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL); static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL); static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL); static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL); static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR, qla2x00_allow_cna_fw_dump_show, qla2x00_allow_cna_fw_dump_store); static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL); static DEVICE_ATTR(min_supported_speed, 0444, qla2x00_min_supported_speed_show, NULL); static DEVICE_ATTR(max_supported_speed, 0444, qla2x00_max_supported_speed_show, NULL); static DEVICE_ATTR(zio_threshold, 0644, qla_zio_threshold_show, qla_zio_threshold_store); static DEVICE_ATTR_RW(qlini_mode); static DEVICE_ATTR_RW(ql2xexchoffld); static DEVICE_ATTR_RW(ql2xiniexchg); static DEVICE_ATTR(dif_bundle_statistics, 0444, qla2x00_dif_bundle_statistics_show, NULL); static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show, qla2x00_port_speed_store); static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL); static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL); static struct attribute *qla2x00_host_attrs[] = { &dev_attr_driver_version.attr, &dev_attr_fw_version.attr, &dev_attr_serial_num.attr, &dev_attr_isp_name.attr, &dev_attr_isp_id.attr, &dev_attr_model_name.attr, &dev_attr_model_desc.attr, &dev_attr_pci_info.attr, &dev_attr_link_state.attr, &dev_attr_zio.attr, &dev_attr_zio_timer.attr, &dev_attr_beacon.attr, &dev_attr_beacon_config.attr, &dev_attr_optrom_bios_version.attr, &dev_attr_optrom_efi_version.attr, &dev_attr_optrom_fcode_version.attr, &dev_attr_optrom_fw_version.attr, &dev_attr_84xx_fw_version.attr, &dev_attr_total_isp_aborts.attr, &dev_attr_serdes_version.attr, &dev_attr_mpi_version.attr, &dev_attr_phy_version.attr, &dev_attr_flash_block_size.attr, &dev_attr_vlan_id.attr, &dev_attr_vn_port_mac_address.attr, &dev_attr_fabric_param.attr, &dev_attr_fw_state.attr, &dev_attr_optrom_gold_fw_version.attr, &dev_attr_thermal_temp.attr, &dev_attr_diag_requests.attr, &dev_attr_diag_megabytes.attr, &dev_attr_fw_dump_size.attr, &dev_attr_allow_cna_fw_dump.attr, &dev_attr_pep_version.attr, &dev_attr_min_supported_speed.attr, &dev_attr_max_supported_speed.attr, &dev_attr_zio_threshold.attr, &dev_attr_dif_bundle_statistics.attr, &dev_attr_port_speed.attr, &dev_attr_port_no.attr, &dev_attr_fw_attr.attr, &dev_attr_dport_diagnostics.attr, &dev_attr_mpi_pause.attr, &dev_attr_qlini_mode.attr, &dev_attr_ql2xiniexchg.attr, &dev_attr_ql2xexchoffld.attr, NULL, }; static umode_t qla_host_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { if (ql2x_ini_mode != QLA2XXX_INI_MODE_DUAL && (attr == &dev_attr_qlini_mode.attr || attr == &dev_attr_ql2xiniexchg.attr || attr == &dev_attr_ql2xexchoffld.attr)) return 0; return attr->mode; } static const struct attribute_group qla2x00_host_attr_group = { .is_visible = qla_host_attr_is_visible, .attrs = qla2x00_host_attrs }; const struct attribute_group *qla2x00_host_groups[] = { &qla2x00_host_attr_group, NULL }; /* Host attributes. */ static void qla2x00_get_host_port_id(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); fc_host_port_id(shost) = vha->d_id.b.domain << 16 | vha->d_id.b.area << 8 | vha->d_id.b.al_pa; } static void qla2x00_get_host_speed(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); u32 speed; if (IS_QLAFX00(vha->hw)) { qlafx00_get_host_speed(shost); return; } switch (vha->hw->link_data_rate) { case PORT_SPEED_1GB: speed = FC_PORTSPEED_1GBIT; break; case PORT_SPEED_2GB: speed = FC_PORTSPEED_2GBIT; break; case PORT_SPEED_4GB: speed = FC_PORTSPEED_4GBIT; break; case PORT_SPEED_8GB: speed = FC_PORTSPEED_8GBIT; break; case PORT_SPEED_10GB: speed = FC_PORTSPEED_10GBIT; break; case PORT_SPEED_16GB: speed = FC_PORTSPEED_16GBIT; break; case PORT_SPEED_32GB: speed = FC_PORTSPEED_32GBIT; break; case PORT_SPEED_64GB: speed = FC_PORTSPEED_64GBIT; break; default: speed = FC_PORTSPEED_UNKNOWN; break; } fc_host_speed(shost) = speed; } static void qla2x00_get_host_port_type(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); uint32_t port_type; if (vha->vp_idx) { fc_host_port_type(shost) = FC_PORTTYPE_NPIV; return; } switch (vha->hw->current_topology) { case ISP_CFG_NL: port_type = FC_PORTTYPE_LPORT; break; case ISP_CFG_FL: port_type = FC_PORTTYPE_NLPORT; break; case ISP_CFG_N: port_type = FC_PORTTYPE_PTP; break; case ISP_CFG_F: port_type = FC_PORTTYPE_NPORT; break; default: port_type = FC_PORTTYPE_UNKNOWN; break; } fc_host_port_type(shost) = port_type; } static void qla2x00_get_starget_node_name(struct scsi_target *starget) { struct Scsi_Host *host = dev_to_shost(starget->dev.parent); scsi_qla_host_t *vha = shost_priv(host); fc_port_t *fcport; u64 node_name = 0; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->rport && starget->id == fcport->rport->scsi_target_id) { node_name = wwn_to_u64(fcport->node_name); break; } } fc_starget_node_name(starget) = node_name; } static void qla2x00_get_starget_port_name(struct scsi_target *starget) { struct Scsi_Host *host = dev_to_shost(starget->dev.parent); scsi_qla_host_t *vha = shost_priv(host); fc_port_t *fcport; u64 port_name = 0; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->rport && starget->id == fcport->rport->scsi_target_id) { port_name = wwn_to_u64(fcport->port_name); break; } } fc_starget_port_name(starget) = port_name; } static void qla2x00_get_starget_port_id(struct scsi_target *starget) { struct Scsi_Host *host = dev_to_shost(starget->dev.parent); scsi_qla_host_t *vha = shost_priv(host); fc_port_t *fcport; uint32_t port_id = ~0U; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->rport && starget->id == fcport->rport->scsi_target_id) { port_id = fcport->d_id.b.domain << 16 | fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; break; } } fc_starget_port_id(starget) = port_id; } static inline void qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { fc_port_t *fcport = *(fc_port_t **)rport->dd_data; rport->dev_loss_tmo = timeout ? timeout : 1; if (IS_ENABLED(CONFIG_NVME_FC) && fcport && fcport->nvme_remote_port) nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, rport->dev_loss_tmo); } static void qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) { struct Scsi_Host *host = rport_to_shost(rport); fc_port_t *fcport = *(fc_port_t **)rport->dd_data; unsigned long flags; if (!fcport) return; ql_dbg(ql_dbg_async, fcport->vha, 0x5101, DBG_FCPORT_PRFMT(fcport, "dev_loss_tmo expiry, rport_state=%d", rport->port_state)); /* * Now that the rport has been deleted, set the fcport state to * FCS_DEVICE_DEAD, if the fcport is still lost. */ if (fcport->scan_state != QLA_FCPORT_FOUND) qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD); /* * Transport has effectively 'deleted' the rport, clear * all local references. */ spin_lock_irqsave(host->host_lock, flags); /* Confirm port has not reappeared before clearing pointers. */ if (rport->port_state != FC_PORTSTATE_ONLINE) { fcport->rport = NULL; *((fc_port_t **)rport->dd_data) = NULL; } spin_unlock_irqrestore(host->host_lock, flags); if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) return; if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); return; } } static void qla2x00_terminate_rport_io(struct fc_rport *rport) { fc_port_t *fcport = *(fc_port_t **)rport->dd_data; scsi_qla_host_t *vha; if (!fcport) return; if (test_bit(UNLOADING, &fcport->vha->dpc_flags)) return; if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) return; vha = fcport->vha; if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET); return; } /* * At this point all fcport's software-states are cleared. Perform any * final cleanup of firmware resources (PCBs and XCBs). * * Attempt to cleanup only lost devices. */ if (fcport->loop_id != FC_NO_LOOP_ID) { if (IS_FWI2_CAPABLE(fcport->vha->hw) && fcport->scan_state != QLA_FCPORT_FOUND) { if (fcport->loop_id != FC_NO_LOOP_ID) fcport->logout_on_delete = 1; if (!EDIF_NEGOTIATION_PENDING(fcport)) { ql_dbg(ql_dbg_disc, fcport->vha, 0x911e, "%s %d schedule session deletion\n", __func__, __LINE__); qlt_schedule_sess_for_deletion(fcport); } } else if (!IS_FWI2_CAPABLE(fcport->vha->hw)) { qla2x00_port_logout(fcport->vha, fcport); } } /* check for any straggling io left behind */ if (qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET)) { ql_log(ql_log_warn, vha, 0x300b, "IO not return. Resetting. \n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); } } static int qla2x00_issue_lip(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); if (IS_QLAFX00(vha->hw)) return 0; if (vha->hw->flags.port_isolated) return 0; qla2x00_loop_reset(vha); return 0; } static struct fc_host_statistics * qla2x00_get_fc_host_stats(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); int rval; struct link_statistics *stats; dma_addr_t stats_dma; struct fc_host_statistics *p = &vha->fc_host_stat; struct qla_qpair *qpair; int i; u64 ib = 0, ob = 0, ir = 0, or = 0; memset(p, -1, sizeof(*p)); if (IS_QLAFX00(vha->hw)) goto done; if (test_bit(UNLOADING, &vha->dpc_flags)) goto done; if (unlikely(pci_channel_offline(ha->pdev))) goto done; if (qla2x00_chip_is_down(vha)) goto done; stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, GFP_KERNEL); if (!stats) { ql_log(ql_log_warn, vha, 0x707d, "Failed to allocate memory for stats.\n"); goto done; } rval = QLA_FUNCTION_FAILED; if (IS_FWI2_CAPABLE(ha)) { rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0); } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && !ha->dpc_active) { /* Must be in a 'READY' state for statistics retrieval. */ rval = qla2x00_get_link_status(base_vha, base_vha->loop_id, stats, stats_dma); } if (rval != QLA_SUCCESS) goto done_free; /* --- */ for (i = 0; i < vha->hw->max_qpairs; i++) { qpair = vha->hw->queue_pair_map[i]; if (!qpair) continue; ir += qpair->counters.input_requests; or += qpair->counters.output_requests; ib += qpair->counters.input_bytes; ob += qpair->counters.output_bytes; } ir += ha->base_qpair->counters.input_requests; or += ha->base_qpair->counters.output_requests; ib += ha->base_qpair->counters.input_bytes; ob += ha->base_qpair->counters.output_bytes; ir += vha->qla_stats.input_requests; or += vha->qla_stats.output_requests; ib += vha->qla_stats.input_bytes; ob += vha->qla_stats.output_bytes; /* --- */ p->link_failure_count = le32_to_cpu(stats->link_fail_cnt); p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt); p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt); p->prim_seq_protocol_err_count = le32_to_cpu(stats->prim_seq_err_cnt); p->invalid_tx_word_count = le32_to_cpu(stats->inval_xmit_word_cnt); p->invalid_crc_count = le32_to_cpu(stats->inval_crc_cnt); if (IS_FWI2_CAPABLE(ha)) { p->lip_count = le32_to_cpu(stats->lip_cnt); p->tx_frames = le32_to_cpu(stats->tx_frames); p->rx_frames = le32_to_cpu(stats->rx_frames); p->dumped_frames = le32_to_cpu(stats->discarded_frames); p->nos_count = le32_to_cpu(stats->nos_rcvd); p->error_frames = le32_to_cpu(stats->dropped_frames) + le32_to_cpu(stats->discarded_frames); if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt); p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt); } else { p->rx_words = ib >> 2; p->tx_words = ob >> 2; } } p->fcp_control_requests = vha->qla_stats.control_requests; p->fcp_input_requests = ir; p->fcp_output_requests = or; p->fcp_input_megabytes = ib >> 20; p->fcp_output_megabytes = ob >> 20; p->seconds_since_last_reset = get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset; do_div(p->seconds_since_last_reset, HZ); done_free: dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics), stats, stats_dma); done: return p; } static void qla2x00_reset_host_stats(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); struct link_statistics *stats; dma_addr_t stats_dma; int i; struct qla_qpair *qpair; memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); for (i = 0; i < vha->hw->max_qpairs; i++) { qpair = vha->hw->queue_pair_map[i]; if (!qpair) continue; memset(&qpair->counters, 0, sizeof(qpair->counters)); } memset(&ha->base_qpair->counters, 0, sizeof(qpair->counters)); vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); if (IS_FWI2_CAPABLE(ha)) { int rval; stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, GFP_KERNEL); if (!stats) { ql_log(ql_log_warn, vha, 0x70d7, "Failed to allocate memory for stats.\n"); return; } /* reset firmware statistics */ rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); if (rval != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x70de, "Resetting ISP statistics failed: rval = %d\n", rval); dma_free_coherent(&ha->pdev->dev, sizeof(*stats), stats, stats_dma); } } static void qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost), sizeof(fc_host_symbolic_name(shost))); } static void qla2x00_set_host_system_hostname(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); } static void qla2x00_get_host_fabric_name(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); static const uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u64 fabric_name = wwn_to_u64(node_name); if (vha->device_flags & SWITCH_FOUND) fabric_name = wwn_to_u64(vha->fabric_node_name); fc_host_fabric_name(shost) = fabric_name; } static void qla2x00_get_host_port_state(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); if (!base_vha->flags.online) { fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; return; } switch (atomic_read(&base_vha->loop_state)) { case LOOP_UPDATE: fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; break; case LOOP_DOWN: if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags)) fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS; else fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; break; case LOOP_DEAD: fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; break; case LOOP_READY: fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; break; default: fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; break; } } static int qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) { int ret = 0; uint8_t qos = 0; scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); scsi_qla_host_t *vha = NULL; struct qla_hw_data *ha = base_vha->hw; int cnt; struct req_que *req = ha->req_q_map[0]; struct qla_qpair *qpair; ret = qla24xx_vport_create_req_sanity_check(fc_vport); if (ret) { ql_log(ql_log_warn, vha, 0x707e, "Vport sanity check failed, status %x\n", ret); return (ret); } vha = qla24xx_create_vhost(fc_vport); if (vha == NULL) { ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n"); return FC_VPORT_FAILED; } if (disable) { atomic_set(&vha->vp_state, VP_OFFLINE); fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); } else atomic_set(&vha->vp_state, VP_FAILED); /* ready to create vport */ ql_log(ql_log_info, vha, 0x7080, "VP entry id %d assigned.\n", vha->vp_idx); /* initialized vport states */ atomic_set(&vha->loop_state, LOOP_DOWN); vha->vp_err_state = VP_ERR_PORTDWN; vha->vp_prev_err_state = VP_ERR_UNKWN; /* Check if physical ha port is Up */ if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || atomic_read(&base_vha->loop_state) == LOOP_DEAD) { /* Don't retry or attempt login of this virtual port */ ql_dbg(ql_dbg_user, vha, 0x7081, "Vport loop state is not UP.\n"); atomic_set(&vha->loop_state, LOOP_DEAD); if (!disable) fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); } if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) { int prot = 0, guard; vha->flags.difdix_supported = 1; ql_dbg(ql_dbg_user, vha, 0x7082, "Registered for DIF/DIX type 1 and 3 protection.\n"); scsi_host_set_prot(vha->host, prot | SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION); guard = SHOST_DIX_GUARD_CRC; if (IS_PI_IPGUARD_CAPABLE(ha) && (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) guard |= SHOST_DIX_GUARD_IP; scsi_host_set_guard(vha->host, guard); } else vha->flags.difdix_supported = 0; } if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, &ha->pdev->dev)) { ql_dbg(ql_dbg_user, vha, 0x7083, "scsi_add_host failure for VP[%d].\n", vha->vp_idx); goto vport_create_failed_2; } /* initialize attributes */ fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); fc_host_supported_classes(vha->host) = fc_host_supported_classes(base_vha->host); fc_host_supported_speeds(vha->host) = fc_host_supported_speeds(base_vha->host); qlt_vport_create(vha, ha); qla24xx_vport_disable(fc_vport, disable); if (!ql2xmqsupport || !ha->npiv_info) goto vport_queue; /* Create a request queue in QoS mode for the vport */ for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name, 8) == 0) { qos = ha->npiv_info[cnt].q_qos; break; } } if (qos) { qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true); if (!qpair) ql_log(ql_log_warn, vha, 0x7084, "Can't create qpair for VP[%d]\n", vha->vp_idx); else { ql_dbg(ql_dbg_multiq, vha, 0xc001, "Queue pair: %d Qos: %d) created for VP[%d]\n", qpair->id, qos, vha->vp_idx); ql_dbg(ql_dbg_user, vha, 0x7085, "Queue Pair: %d Qos: %d) created for VP[%d]\n", qpair->id, qos, vha->vp_idx); req = qpair->req; vha->qpair = qpair; } } vport_queue: vha->req = req; return 0; vport_create_failed_2: qla24xx_disable_vp(vha); qla24xx_deallocate_vp_id(vha); scsi_host_put(vha->host); return FC_VPORT_FAILED; } static int qla24xx_vport_delete(struct fc_vport *fc_vport) { scsi_qla_host_t *vha = fc_vport->dd_data; struct qla_hw_data *ha = vha->hw; uint16_t id = vha->vp_idx; set_bit(VPORT_DELETE, &vha->dpc_flags); while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) msleep(1000); qla24xx_disable_vp(vha); qla2x00_wait_for_sess_deletion(vha); qla_nvme_delete(vha); qla_enode_stop(vha); qla_edb_stop(vha); vha->flags.delete_progress = 1; qlt_remove_target(ha, vha); fc_remove_host(vha->host); scsi_remove_host(vha->host); /* Allow timer to run to drain queued items, when removing vp */ qla24xx_deallocate_vp_id(vha); if (vha->timer_active) { qla2x00_vp_stop_timer(vha); ql_dbg(ql_dbg_user, vha, 0x7086, "Timer for the VP[%d] has stopped\n", vha->vp_idx); } qla2x00_free_fcports(vha); mutex_lock(&ha->vport_lock); ha->cur_vport_count--; clear_bit(vha->vp_idx, ha->vp_idx_map); mutex_unlock(&ha->vport_lock); dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); vha->gnl.l = NULL; vfree(vha->scan.l); if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x7087, "Queue Pair delete failed.\n"); } ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id); scsi_host_put(vha->host); return 0; } static int qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable) { scsi_qla_host_t *vha = fc_vport->dd_data; if (disable) qla24xx_disable_vp(vha); else qla24xx_enable_vp(vha); return 0; } struct fc_function_template qla2xxx_transport_functions = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_speeds = 1, .get_host_port_id = qla2x00_get_host_port_id, .show_host_port_id = 1, .get_host_speed = qla2x00_get_host_speed, .show_host_speed = 1, .get_host_port_type = qla2x00_get_host_port_type, .show_host_port_type = 1, .get_host_symbolic_name = qla2x00_get_host_symbolic_name, .show_host_symbolic_name = 1, .set_host_system_hostname = qla2x00_set_host_system_hostname, .show_host_system_hostname = 1, .get_host_fabric_name = qla2x00_get_host_fabric_name, .show_host_fabric_name = 1, .get_host_port_state = qla2x00_get_host_port_state, .show_host_port_state = 1, .dd_fcrport_size = sizeof(struct fc_port *), .show_rport_supported_classes = 1, .get_starget_node_name = qla2x00_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = qla2x00_get_starget_port_name, .show_starget_port_name = 1, .get_starget_port_id = qla2x00_get_starget_port_id, .show_starget_port_id = 1, .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .issue_fc_host_lip = qla2x00_issue_lip, .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, .terminate_rport_io = qla2x00_terminate_rport_io, .get_fc_host_stats = qla2x00_get_fc_host_stats, .reset_fc_host_stats = qla2x00_reset_host_stats, .vport_create = qla24xx_vport_create, .vport_disable = qla24xx_vport_disable, .vport_delete = qla24xx_vport_delete, .bsg_request = qla24xx_bsg_request, .bsg_timeout = qla24xx_bsg_timeout, }; struct fc_function_template qla2xxx_transport_vport_functions = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .get_host_port_id = qla2x00_get_host_port_id, .show_host_port_id = 1, .get_host_speed = qla2x00_get_host_speed, .show_host_speed = 1, .get_host_port_type = qla2x00_get_host_port_type, .show_host_port_type = 1, .get_host_symbolic_name = qla2x00_get_host_symbolic_name, .show_host_symbolic_name = 1, .set_host_system_hostname = qla2x00_set_host_system_hostname, .show_host_system_hostname = 1, .get_host_fabric_name = qla2x00_get_host_fabric_name, .show_host_fabric_name = 1, .get_host_port_state = qla2x00_get_host_port_state, .show_host_port_state = 1, .dd_fcrport_size = sizeof(struct fc_port *), .show_rport_supported_classes = 1, .get_starget_node_name = qla2x00_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = qla2x00_get_starget_port_name, .show_starget_port_name = 1, .get_starget_port_id = qla2x00_get_starget_port_id, .show_starget_port_id = 1, .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .issue_fc_host_lip = qla2x00_issue_lip, .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, .terminate_rport_io = qla2x00_terminate_rport_io, .get_fc_host_stats = qla2x00_get_fc_host_stats, .reset_fc_host_stats = qla2x00_reset_host_stats, .bsg_request = qla24xx_bsg_request, .bsg_timeout = qla24xx_bsg_timeout, }; static uint qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds) { uint supported_speeds = FC_PORTSPEED_UNKNOWN; if (speeds & FDMI_PORT_SPEED_64GB) supported_speeds |= FC_PORTSPEED_64GBIT; if (speeds & FDMI_PORT_SPEED_32GB) supported_speeds |= FC_PORTSPEED_32GBIT; if (speeds & FDMI_PORT_SPEED_16GB) supported_speeds |= FC_PORTSPEED_16GBIT; if (speeds & FDMI_PORT_SPEED_8GB) supported_speeds |= FC_PORTSPEED_8GBIT; if (speeds & FDMI_PORT_SPEED_4GB) supported_speeds |= FC_PORTSPEED_4GBIT; if (speeds & FDMI_PORT_SPEED_2GB) supported_speeds |= FC_PORTSPEED_2GBIT; if (speeds & FDMI_PORT_SPEED_1GB) supported_speeds |= FC_PORTSPEED_1GBIT; return supported_speeds; } void qla2x00_init_host_attr(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; u32 speeds = 0, fdmi_speed = 0; fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ? (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3; fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; fdmi_speed = qla25xx_fdmi_port_speed_capability(ha); speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed); fc_host_supported_speeds(vha->host) = speeds; }
linux-master
drivers/scsi/qla2xxx/qla_attr.c
// SPDX-License-Identifier: GPL-2.0-or-later /******************************************************************************* * This file contains tcm implementation using v4 configfs fabric infrastructure * for QLogic target mode HBAs * * (c) Copyright 2010-2013 Datera, Inc. * * Author: Nicholas A. Bellinger <[email protected]> * * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from * the TCM_FC / Open-FCoE.org fabric module. * * Copyright (c) 2010 Cisco Systems, Inc * ****************************************************************************/ #include <linux/module.h> #include <linux/utsname.h> #include <linux/vmalloc.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> #include <linux/configfs.h> #include <linux/ctype.h> #include <asm/unaligned.h> #include <scsi/scsi_host.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include "qla_def.h" #include "qla_target.h" #include "tcm_qla2xxx.h" static struct workqueue_struct *tcm_qla2xxx_free_wq; /* * Parse WWN. * If strict, we require lower-case hex and colon separators to be sure * the name is the same as what would be generated by ft_format_wwn() * so the name and wwn are mapped one-to-one. */ static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict) { const char *cp; char c; u32 nibble; u32 byte = 0; u32 pos = 0; u32 err; *wwn = 0; for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) { c = *cp; if (c == '\n' && cp[1] == '\0') continue; if (strict && pos++ == 2 && byte++ < 7) { pos = 0; if (c == ':') continue; err = 1; goto fail; } if (c == '\0') { err = 2; if (strict && byte != 8) goto fail; return cp - name; } err = 3; if (isdigit(c)) nibble = c - '0'; else if (isxdigit(c) && (islower(c) || !strict)) nibble = tolower(c) - 'a' + 10; else goto fail; *wwn = (*wwn << 4) | nibble; } err = 4; fail: pr_debug("err %u len %zu pos %u byte %u\n", err, cp - name, pos, byte); return -1; } static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn) { u8 b[8]; put_unaligned_be64(wwn, b); return snprintf(buf, len, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); } /* * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn */ static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) { unsigned int i, j; u8 wwn[8]; memset(wwn, 0, sizeof(wwn)); /* Validate and store the new name */ for (i = 0, j = 0; i < 16; i++) { int value; value = hex_to_bin(*ns++); if (value >= 0) j = (j << 4) | value; else return -EINVAL; if (i % 2) { wwn[i/2] = j & 0xff; j = 0; } } *nm = wwn_to_u64(wwn); return 0; } /* * This parsing logic follows drivers/scsi/scsi_transport_fc.c: * store_fc_host_vport_create() */ static int tcm_qla2xxx_npiv_parse_wwn( const char *name, size_t count, u64 *wwpn, u64 *wwnn) { unsigned int cnt = count; int rc; *wwpn = 0; *wwnn = 0; /* count may include a LF at end of string */ if (name[cnt-1] == '\n' || name[cnt-1] == 0) cnt--; /* validate we have enough characters for WWPN */ if ((cnt != (16+1+16)) || (name[16] != ':')) return -EINVAL; rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn); if (rc != 0) return rc; rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn); if (rc != 0) return rc; return 0; } static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); struct tcm_qla2xxx_lport *lport = tpg->lport; return lport->lport_naa_name; } static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); return tpg->lport_tpgt; } static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); return tpg->tpg_attrib.generate_node_acls; } static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); return tpg->tpg_attrib.cache_dynamic_acls; } static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); return tpg->tpg_attrib.demo_mode_write_protect; } static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); return tpg->tpg_attrib.prod_mode_write_protect; } static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); return tpg->tpg_attrib.demo_mode_login_only; } static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); return tpg->tpg_attrib.fabric_prot_type; } static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); return tpg->lport_tpgt; } static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) { struct qla_tgt_mgmt_cmd *mcmd = container_of(work, struct qla_tgt_mgmt_cmd, free_work); transport_generic_free_cmd(&mcmd->se_cmd, 0); } /* * Called from qla_target_template->free_mcmd(), and will call * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops * release callback. qla_hw_data->hardware_lock is expected to be held */ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) { if (!mcmd) return; INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work); } static void tcm_qla2xxx_complete_free(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); unsigned long flags; cmd->cmd_in_wq = 0; WARN_ON(cmd->trc_flags & TRC_CMD_FREE); /* To do: protect all tgt_counters manipulations with proper locking. */ cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++; cmd->trc_flags |= TRC_CMD_FREE; cmd->cmd_sent_to_fw = 0; spin_lock_irqsave(&cmd->sess->sess_cmd_lock, flags); list_del_init(&cmd->sess_cmd_list); spin_unlock_irqrestore(&cmd->sess->sess_cmd_lock, flags); transport_generic_free_cmd(&cmd->se_cmd, 0); } static struct qla_tgt_cmd *tcm_qla2xxx_get_cmd(struct fc_port *sess) { struct se_session *se_sess = sess->se_sess; struct qla_tgt_cmd *cmd; int tag, cpu; tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); if (tag < 0) return NULL; cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; memset(cmd, 0, sizeof(struct qla_tgt_cmd)); cmd->se_cmd.map_tag = tag; cmd->se_cmd.map_cpu = cpu; return cmd; } static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd) { target_free_tag(cmd->sess->se_sess, &cmd->se_cmd); } /* * Called from qla_target_template->free_cmd(), and will call * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops * release callback. qla_hw_data->hardware_lock is expected to be held */ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) { cmd->qpair->tgt_counters.core_qla_free_cmd++; cmd->cmd_in_wq = 1; WARN_ON(cmd->trc_flags & TRC_CMD_DONE); cmd->trc_flags |= TRC_CMD_DONE; INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); queue_work(tcm_qla2xxx_free_wq, &cmd->work); } /* * Called from struct target_core_fabric_ops->check_stop_free() context */ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd; if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) { cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); cmd->trc_flags |= TRC_CMD_CHK_STOP; } return target_put_sess_cmd(se_cmd); } /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying * fabric descriptor @se_cmd command to release */ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd; if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, struct qla_tgt_mgmt_cmd, se_cmd); qlt_free_mcmd(mcmd); return; } cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); if (WARN_ON(cmd->cmd_sent_to_fw)) return; qlt_free_cmd(cmd); } static void tcm_qla2xxx_release_session(struct kref *kref) { struct fc_port *sess = container_of(kref, struct fc_port, sess_kref); qlt_unreg_sess(sess); } static void tcm_qla2xxx_put_sess(struct fc_port *sess) { if (!sess) return; kref_put(&sess->sess_kref, tcm_qla2xxx_release_session); } static void tcm_qla2xxx_close_session(struct se_session *se_sess) { struct fc_port *sess = se_sess->fabric_sess_ptr; BUG_ON(!sess); target_stop_session(se_sess); sess->explicit_logout = 1; tcm_qla2xxx_put_sess(sess); } static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); if (cmd->aborted) { /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task * can get ahead of this cmd. tcm_qla2xxx_aborted_task * already kick start the free. */ pr_debug("write_pending aborted cmd[%p] refcount %d " "transport_state %x, t_state %x, se_cmd_flags %x\n", cmd, kref_read(&cmd->se_cmd.cmd_kref), cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); transport_generic_request_failure(&cmd->se_cmd, TCM_CHECK_CONDITION_ABORT_CMD); return 0; } cmd->trc_flags |= TRC_XFR_RDY; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->sg_cnt = se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; cmd->prot_sg_cnt = se_cmd->t_prot_nents; cmd->prot_sg = se_cmd->t_prot_sg; cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; se_cmd->pi_err = 0; /* * qla_target.c:qlt_rdy_to_xfer() will call dma_map_sg() to setup * the SGL mappings into PCIe memory for incoming FCP WRITE data. */ return qlt_rdy_to_xfer(cmd); } static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) { if (!(se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); return cmd->state; } return 0; } /* * Called from process context in qla_target.c:qlt_do_work() code */ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, unsigned char *cdb, uint32_t data_length, int fcp_task_attr, int data_dir, int bidi) { struct se_cmd *se_cmd = &cmd->se_cmd; struct se_session *se_sess; struct fc_port *sess; #ifdef CONFIG_TCM_QLA2XXX_DEBUG struct se_portal_group *se_tpg; struct tcm_qla2xxx_tpg *tpg; #endif int rc, target_flags = TARGET_SCF_ACK_KREF; unsigned long flags; if (bidi) target_flags |= TARGET_SCF_BIDI_OP; if (se_cmd->cpuid != WORK_CPU_UNBOUND) target_flags |= TARGET_SCF_USE_CPUID; sess = cmd->sess; if (!sess) { pr_err("Unable to locate struct fc_port from qla_tgt_cmd\n"); return -EINVAL; } se_sess = sess->se_sess; if (!se_sess) { pr_err("Unable to locate active struct se_session\n"); return -EINVAL; } #ifdef CONFIG_TCM_QLA2XXX_DEBUG se_tpg = se_sess->se_tpg; tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); if (unlikely(tpg->tpg_attrib.jam_host)) { /* return, and dont run target_submit_cmd,discarding command */ return 0; } #endif cmd->qpair->tgt_counters.qla_core_sbt_cmd++; spin_lock_irqsave(&sess->sess_cmd_lock, flags); list_add_tail(&cmd->sess_cmd_list, &sess->sess_cmd_list); spin_unlock_irqrestore(&sess->sess_cmd_lock, flags); rc = target_init_cmd(se_cmd, se_sess, &cmd->sense_buffer[0], cmd->unpacked_lun, data_length, fcp_task_attr, data_dir, target_flags); if (rc) return rc; if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0, GFP_KERNEL)) return 0; target_submit(se_cmd); return 0; } static void tcm_qla2xxx_handle_data_work(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); /* * Ensure that the complete FCP WRITE payload has been received. * Otherwise return an exception via CHECK_CONDITION status. */ cmd->cmd_in_wq = 0; cmd->cmd_sent_to_fw = 0; if (cmd->aborted) { transport_generic_request_failure(&cmd->se_cmd, TCM_CHECK_CONDITION_ABORT_CMD); return; } cmd->qpair->tgt_counters.qla_core_ret_ctio++; if (!cmd->write_data_transferred) { switch (cmd->dif_err_code) { case DIF_ERR_GRD: cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; break; case DIF_ERR_REF: cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; break; case DIF_ERR_APP: cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; break; case DIF_ERR_NONE: default: break; } if (cmd->se_cmd.pi_err) transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); else transport_generic_request_failure(&cmd->se_cmd, TCM_CHECK_CONDITION_ABORT_CMD); return; } return target_execute_cmd(&cmd->se_cmd); } /* * Called from qla_target.c:qlt_do_ctio_completion() */ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) { cmd->trc_flags |= TRC_DATA_IN; cmd->cmd_in_wq = 1; INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); queue_work(tcm_qla2xxx_free_wq, &cmd->work); } static int tcm_qla2xxx_chk_dif_tags(uint32_t tag) { return 0; } static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts) { struct se_cmd *se_cmd = &cmd->se_cmd; if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK; if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)) *pfw_prot_opts |= PO_DIS_APP_TAG_VALD; return 0; } /* * Called from qla_target.c:qlt_issue_task_mgmt() */ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun, uint16_t tmr_func, uint32_t tag) { struct fc_port *sess = mcmd->sess; struct se_cmd *se_cmd = &mcmd->se_cmd; int transl_tmr_func = 0; switch (tmr_func) { case QLA_TGT_ABTS: pr_debug("%ld: ABTS received\n", sess->vha->host_no); transl_tmr_func = TMR_ABORT_TASK; break; case QLA_TGT_2G_ABORT_TASK: pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no); transl_tmr_func = TMR_ABORT_TASK; break; case QLA_TGT_CLEAR_ACA: pr_debug("%ld: CLEAR_ACA received\n", sess->vha->host_no); transl_tmr_func = TMR_CLEAR_ACA; break; case QLA_TGT_TARGET_RESET: pr_debug("%ld: TARGET_RESET received\n", sess->vha->host_no); transl_tmr_func = TMR_TARGET_WARM_RESET; break; case QLA_TGT_LUN_RESET: pr_debug("%ld: LUN_RESET received\n", sess->vha->host_no); transl_tmr_func = TMR_LUN_RESET; break; case QLA_TGT_CLEAR_TS: pr_debug("%ld: CLEAR_TS received\n", sess->vha->host_no); transl_tmr_func = TMR_CLEAR_TASK_SET; break; case QLA_TGT_ABORT_TS: pr_debug("%ld: ABORT_TS received\n", sess->vha->host_no); transl_tmr_func = TMR_ABORT_TASK_SET; break; default: pr_debug("%ld: Unknown task mgmt fn 0x%x\n", sess->vha->host_no, tmr_func); return -ENOSYS; } return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); } static struct qla_tgt_cmd *tcm_qla2xxx_find_cmd_by_tag(struct fc_port *sess, uint64_t tag) { struct qla_tgt_cmd *cmd; unsigned long flags; if (!sess->se_sess) return NULL; spin_lock_irqsave(&sess->sess_cmd_lock, flags); list_for_each_entry(cmd, &sess->sess_cmd_list, sess_cmd_list) { if (cmd->se_cmd.tag == tag) goto done; } cmd = NULL; done: spin_unlock_irqrestore(&sess->sess_cmd_lock, flags); return cmd; } static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); if (cmd->aborted) { /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task * can get ahead of this cmd. tcm_qla2xxx_aborted_task * already kick start the free. */ pr_debug("queue_data_in aborted cmd[%p] refcount %d " "transport_state %x, t_state %x, se_cmd_flags %x\n", cmd, kref_read(&cmd->se_cmd.cmd_kref), cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); return 0; } cmd->trc_flags |= TRC_XMIT_DATA; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->sg_cnt = se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; cmd->offset = 0; cmd->prot_sg_cnt = se_cmd->t_prot_nents; cmd->prot_sg = se_cmd->t_prot_sg; cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; se_cmd->pi_err = 0; /* * Now queue completed DATA_IN the qla2xxx LLD and response ring */ return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS, se_cmd->scsi_status); } static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); int xmit_type = QLA_TGT_XMIT_STATUS; if (cmd->aborted) { /* * Cmd can loop during Q-full. tcm_qla2xxx_aborted_task * can get ahead of this cmd. tcm_qla2xxx_aborted_task * already kick start the free. */ pr_debug( "queue_data_in aborted cmd[%p] refcount %d transport_state %x, t_state %x, se_cmd_flags %x\n", cmd, kref_read(&cmd->se_cmd.cmd_kref), cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); return 0; } cmd->bufflen = se_cmd->data_length; cmd->sg = NULL; cmd->sg_cnt = 0; cmd->offset = 0; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->trc_flags |= TRC_XMIT_STATUS; if (se_cmd->data_direction == DMA_FROM_DEVICE) { /* * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen * for qla_tgt_xmit_response LLD code */ if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; se_cmd->residual_count = 0; } se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; se_cmd->residual_count += se_cmd->data_length; cmd->bufflen = 0; } /* * Now queue status response to qla2xxx LLD code and response ring */ return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); } static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) { struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, struct qla_tgt_mgmt_cmd, se_cmd); pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n", mcmd, se_tmr->function, se_tmr->response); /* * Do translation between TCM TM response codes and * QLA2xxx FC TM response codes. */ switch (se_tmr->response) { case TMR_FUNCTION_COMPLETE: mcmd->fc_tm_rsp = FC_TM_SUCCESS; break; case TMR_TASK_DOES_NOT_EXIST: mcmd->fc_tm_rsp = FC_TM_BAD_CMD; break; case TMR_FUNCTION_REJECTED: mcmd->fc_tm_rsp = FC_TM_REJECT; break; case TMR_LUN_DOES_NOT_EXIST: default: mcmd->fc_tm_rsp = FC_TM_FAILED; break; } /* * Queue the TM response to QLA2xxx LLD to build a * CTIO response packet. */ qlt_xmit_tm_rsp(mcmd); } static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd; unsigned long flags; if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) return; cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); spin_lock_irqsave(&cmd->sess->sess_cmd_lock, flags); list_del_init(&cmd->sess_cmd_list); spin_unlock_irqrestore(&cmd->sess->sess_cmd_lock, flags); qlt_abort_cmd(cmd); } static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, struct tcm_qla2xxx_nacl *, struct fc_port *); /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess) { struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; struct se_portal_group *se_tpg = se_nacl->se_tpg; struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; struct tcm_qla2xxx_lport *lport = container_of(se_wwn, struct tcm_qla2xxx_lport, lport_wwn); struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); void *node; pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); if (WARN_ON(node && (node != se_nacl))) { /* * The nacl no longer matches what we think it should be. * Most likely a new dynamic acl has been added while * someone dropped the hardware lock. It clearly is a * bug elsewhere, but this bit can't make things worse. */ btree_insert32(&lport->lport_fcport_map, nacl->nport_id, node, GFP_ATOMIC); } pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", se_nacl, nacl->nport_wwnn, nacl->nport_id); /* * Now clear the se_nacl and session pointers from our HW lport lookup * table mapping for this initiator's fabric S_ID and LOOP_ID entries. * * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> * target_wait_for_sess_cmds() before the session waits for outstanding * I/O to complete, to avoid a race between session shutdown execution * and incoming ATIOs or TMRs picking up a stale se_node_act reference. */ tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); } static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess) { target_stop_session(sess->se_sess); } static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl, const char *name) { struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); u64 wwnn; if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) return -EINVAL; nacl->nport_wwnn = wwnn; tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); return 0; } /* Start items for tcm_qla2xxx_tpg_attrib_cit */ #define DEF_QLA_TPG_ATTRIB(name) \ \ static ssize_t tcm_qla2xxx_tpg_attrib_##name##_show( \ struct config_item *item, char *page) \ { \ struct se_portal_group *se_tpg = attrib_to_tpg(item); \ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ struct tcm_qla2xxx_tpg, se_tpg); \ \ return sprintf(page, "%d\n", tpg->tpg_attrib.name); \ } \ \ static ssize_t tcm_qla2xxx_tpg_attrib_##name##_store( \ struct config_item *item, const char *page, size_t count) \ { \ struct se_portal_group *se_tpg = attrib_to_tpg(item); \ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ struct tcm_qla2xxx_tpg, se_tpg); \ struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \ unsigned long val; \ int ret; \ \ ret = kstrtoul(page, 0, &val); \ if (ret < 0) { \ pr_err("kstrtoul() failed with" \ " ret: %d\n", ret); \ return -EINVAL; \ } \ \ if ((val != 0) && (val != 1)) { \ pr_err("Illegal boolean value %lu\n", val); \ return -EINVAL; \ } \ \ a->name = val; \ \ return count; \ } \ CONFIGFS_ATTR(tcm_qla2xxx_tpg_attrib_, name) DEF_QLA_TPG_ATTRIB(generate_node_acls); DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); DEF_QLA_TPG_ATTRIB(demo_mode_login_only); #ifdef CONFIG_TCM_QLA2XXX_DEBUG DEF_QLA_TPG_ATTRIB(jam_host); #endif static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { &tcm_qla2xxx_tpg_attrib_attr_generate_node_acls, &tcm_qla2xxx_tpg_attrib_attr_cache_dynamic_acls, &tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect, &tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect, &tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only, #ifdef CONFIG_TCM_QLA2XXX_DEBUG &tcm_qla2xxx_tpg_attrib_attr_jam_host, #endif NULL, }; /* End items for tcm_qla2xxx_tpg_attrib_cit */ static int tcm_qla2xxx_enable_tpg(struct se_portal_group *se_tpg, bool enable) { struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; struct tcm_qla2xxx_lport *lport = container_of(se_wwn, struct tcm_qla2xxx_lport, lport_wwn); struct scsi_qla_host *vha = lport->qla_vha; struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); if (enable) { if (atomic_read(&tpg->lport_tpg_enabled)) return -EEXIST; atomic_set(&tpg->lport_tpg_enabled, 1); qlt_enable_vha(vha); } else { if (!atomic_read(&tpg->lport_tpg_enabled)) return 0; atomic_set(&tpg->lport_tpg_enabled, 0); qlt_stop_phase1(vha->vha_tgt.qla_tgt); qlt_stop_phase2(vha->vha_tgt.qla_tgt); } return 0; } static ssize_t tcm_qla2xxx_tpg_dynamic_sessions_show(struct config_item *item, char *page) { return target_show_dynamic_sessions(to_tpg(item), page); } static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_store(struct config_item *item, const char *page, size_t count) { struct se_portal_group *se_tpg = to_tpg(item); struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); unsigned long val; int ret = kstrtoul(page, 0, &val); if (ret) { pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); return ret; } if (val != 0 && val != 1 && val != 3) { pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); return -EINVAL; } tpg->tpg_attrib.fabric_prot_type = val; return count; } static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item, char *page) { struct se_portal_group *se_tpg = to_tpg(item); struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); } CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { &tcm_qla2xxx_tpg_attr_dynamic_sessions, &tcm_qla2xxx_tpg_attr_fabric_prot_type, NULL, }; static struct se_portal_group *tcm_qla2xxx_make_tpg(struct se_wwn *wwn, const char *name) { struct tcm_qla2xxx_lport *lport = container_of(wwn, struct tcm_qla2xxx_lport, lport_wwn); struct tcm_qla2xxx_tpg *tpg; unsigned long tpgt; int ret; if (strstr(name, "tpgt_") != name) return ERR_PTR(-EINVAL); if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) return ERR_PTR(-EINVAL); if ((tpgt != 1)) { pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); return ERR_PTR(-ENOSYS); } tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); if (!tpg) { pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); return ERR_PTR(-ENOMEM); } tpg->lport = lport; tpg->lport_tpgt = tpgt; /* * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic * NodeACLs */ tpg->tpg_attrib.generate_node_acls = 1; tpg->tpg_attrib.demo_mode_write_protect = 1; tpg->tpg_attrib.cache_dynamic_acls = 1; tpg->tpg_attrib.demo_mode_login_only = 1; tpg->tpg_attrib.jam_host = 0; ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); if (ret < 0) { kfree(tpg); return NULL; } lport->tpg_1 = tpg; return &tpg->se_tpg; } static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); struct tcm_qla2xxx_lport *lport = tpg->lport; struct scsi_qla_host *vha = lport->qla_vha; /* * Call into qla2x_target.c LLD logic to shutdown the active * FC Nexuses and disable target mode operation for this qla_hw_data */ if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop) qlt_stop_phase1(vha->vha_tgt.qla_tgt); core_tpg_deregister(se_tpg); /* * Clear local TPG=1 pointer for non NPIV mode. */ lport->tpg_1 = NULL; kfree(tpg); } static int tcm_qla2xxx_npiv_enable_tpg(struct se_portal_group *se_tpg, bool enable) { struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; struct tcm_qla2xxx_lport *lport = container_of(se_wwn, struct tcm_qla2xxx_lport, lport_wwn); struct scsi_qla_host *vha = lport->qla_vha; struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); if (enable) { if (atomic_read(&tpg->lport_tpg_enabled)) return -EEXIST; atomic_set(&tpg->lport_tpg_enabled, 1); qlt_enable_vha(vha); } else { if (!atomic_read(&tpg->lport_tpg_enabled)) return 0; atomic_set(&tpg->lport_tpg_enabled, 0); qlt_stop_phase1(vha->vha_tgt.qla_tgt); qlt_stop_phase2(vha->vha_tgt.qla_tgt); } return 0; } static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn, const char *name) { struct tcm_qla2xxx_lport *lport = container_of(wwn, struct tcm_qla2xxx_lport, lport_wwn); struct tcm_qla2xxx_tpg *tpg; unsigned long tpgt; int ret; if (strstr(name, "tpgt_") != name) return ERR_PTR(-EINVAL); if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) return ERR_PTR(-EINVAL); tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); if (!tpg) { pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); return ERR_PTR(-ENOMEM); } tpg->lport = lport; tpg->lport_tpgt = tpgt; /* * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic * NodeACLs */ tpg->tpg_attrib.generate_node_acls = 1; tpg->tpg_attrib.demo_mode_write_protect = 1; tpg->tpg_attrib.cache_dynamic_acls = 1; tpg->tpg_attrib.demo_mode_login_only = 1; ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); if (ret < 0) { kfree(tpg); return NULL; } lport->tpg_1 = tpg; return &tpg->se_tpg; } /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(scsi_qla_host_t *vha, const be_id_t s_id) { struct tcm_qla2xxx_lport *lport; struct se_node_acl *se_nacl; struct tcm_qla2xxx_nacl *nacl; u32 key; lport = vha->vha_tgt.target_lport_ptr; if (!lport) { pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); dump_stack(); return NULL; } key = sid_to_key(s_id); pr_debug("find_sess_by_s_id: 0x%06x\n", key); se_nacl = btree_lookup32(&lport->lport_fcport_map, key); if (!se_nacl) { pr_debug("Unable to locate s_id: 0x%06x\n", key); return NULL; } pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n", se_nacl, se_nacl->initiatorname); nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); if (!nacl->fc_port) { pr_err("Unable to locate struct fc_port\n"); return NULL; } return nacl->fc_port; } /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ static void tcm_qla2xxx_set_sess_by_s_id( struct tcm_qla2xxx_lport *lport, struct se_node_acl *new_se_nacl, struct tcm_qla2xxx_nacl *nacl, struct se_session *se_sess, struct fc_port *fc_port, be_id_t s_id) { u32 key; void *slot; int rc; key = sid_to_key(s_id); pr_debug("set_sess_by_s_id: %06x\n", key); slot = btree_lookup32(&lport->lport_fcport_map, key); if (!slot) { if (new_se_nacl) { pr_debug("Setting up new fc_port entry to new_se_nacl\n"); nacl->nport_id = key; rc = btree_insert32(&lport->lport_fcport_map, key, new_se_nacl, GFP_ATOMIC); if (rc) printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n", (int)key); } else { pr_debug("Wiping nonexisting fc_port entry\n"); } fc_port->se_sess = se_sess; nacl->fc_port = fc_port; return; } if (nacl->fc_port) { if (new_se_nacl == NULL) { pr_debug("Clearing existing nacl->fc_port and fc_port entry\n"); btree_remove32(&lport->lport_fcport_map, key); nacl->fc_port = NULL; return; } pr_debug("Replacing existing nacl->fc_port and fc_port entry\n"); btree_update32(&lport->lport_fcport_map, key, new_se_nacl); fc_port->se_sess = se_sess; nacl->fc_port = fc_port; return; } if (new_se_nacl == NULL) { pr_debug("Clearing existing fc_port entry\n"); btree_remove32(&lport->lport_fcport_map, key); return; } pr_debug("Replacing existing fc_port entry w/o active nacl->fc_port\n"); btree_update32(&lport->lport_fcport_map, key, new_se_nacl); fc_port->se_sess = se_sess; nacl->fc_port = fc_port; pr_debug("Setup nacl->fc_port %p by s_id for se_nacl: %p, initiatorname: %s\n", nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); } /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ static struct fc_port *tcm_qla2xxx_find_sess_by_loop_id( scsi_qla_host_t *vha, const uint16_t loop_id) { struct tcm_qla2xxx_lport *lport; struct se_node_acl *se_nacl; struct tcm_qla2xxx_nacl *nacl; struct tcm_qla2xxx_fc_loopid *fc_loopid; lport = vha->vha_tgt.target_lport_ptr; if (!lport) { pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); dump_stack(); return NULL; } pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); fc_loopid = lport->lport_loopid_map + loop_id; se_nacl = fc_loopid->se_nacl; if (!se_nacl) { pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n", loop_id); return NULL; } nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); if (!nacl->fc_port) { pr_err("Unable to locate struct fc_port\n"); return NULL; } return nacl->fc_port; } /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ static void tcm_qla2xxx_set_sess_by_loop_id( struct tcm_qla2xxx_lport *lport, struct se_node_acl *new_se_nacl, struct tcm_qla2xxx_nacl *nacl, struct se_session *se_sess, struct fc_port *fc_port, uint16_t loop_id) { struct se_node_acl *saved_nacl; struct tcm_qla2xxx_fc_loopid *fc_loopid; pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); fc_loopid = &((struct tcm_qla2xxx_fc_loopid *) lport->lport_loopid_map)[loop_id]; saved_nacl = fc_loopid->se_nacl; if (!saved_nacl) { pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n"); fc_loopid->se_nacl = new_se_nacl; if (fc_port->se_sess != se_sess) fc_port->se_sess = se_sess; if (nacl->fc_port != fc_port) nacl->fc_port = fc_port; return; } if (nacl->fc_port) { if (new_se_nacl == NULL) { pr_debug("Clearing nacl->fc_port and fc_loopid->se_nacl\n"); fc_loopid->se_nacl = NULL; nacl->fc_port = NULL; return; } pr_debug("Replacing existing nacl->fc_port and fc_loopid->se_nacl\n"); fc_loopid->se_nacl = new_se_nacl; if (fc_port->se_sess != se_sess) fc_port->se_sess = se_sess; if (nacl->fc_port != fc_port) nacl->fc_port = fc_port; return; } if (new_se_nacl == NULL) { pr_debug("Clearing fc_loopid->se_nacl\n"); fc_loopid->se_nacl = NULL; return; } pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->fc_port\n"); fc_loopid->se_nacl = new_se_nacl; if (fc_port->se_sess != se_sess) fc_port->se_sess = se_sess; if (nacl->fc_port != fc_port) nacl->fc_port = fc_port; pr_debug("Setup nacl->fc_port %p by loop_id for se_nacl: %p, initiatorname: %s\n", nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); } /* * Should always be called with qla_hw_data->tgt.sess_lock held. */ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess) { struct se_session *se_sess = sess->se_sess; tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, sess, port_id_to_be_id(sess->d_id)); tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, sess, sess->loop_id); } static void tcm_qla2xxx_free_session(struct fc_port *sess) { struct qla_tgt *tgt = sess->tgt; struct qla_hw_data *ha = tgt->ha; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); struct se_session *se_sess; struct tcm_qla2xxx_lport *lport; se_sess = sess->se_sess; if (!se_sess) { pr_err("struct fc_port->se_sess is NULL\n"); dump_stack(); return; } lport = vha->vha_tgt.target_lport_ptr; if (!lport) { pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); dump_stack(); return; } target_wait_for_sess_cmds(se_sess); target_remove_session(se_sess); } static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, struct se_session *se_sess, void *p) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); struct tcm_qla2xxx_lport *lport = tpg->lport; struct qla_hw_data *ha = lport->qla_vha->hw; struct se_node_acl *se_nacl = se_sess->se_node_acl; struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); struct fc_port *qlat_sess = p; uint16_t loop_id = qlat_sess->loop_id; unsigned long flags; /* * And now setup se_nacl and session pointers into HW lport internal * mappings for fabric S_ID and LOOP_ID. */ spin_lock_irqsave(&ha->tgt.sess_lock, flags); tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, qlat_sess, port_id_to_be_id(qlat_sess->d_id)); tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess, qlat_sess, loop_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return 0; } /* * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl() * to locate struct se_node_acl */ static int tcm_qla2xxx_check_initiator_node_acl( scsi_qla_host_t *vha, unsigned char *fc_wwpn, struct fc_port *qlat_sess) { struct qla_hw_data *ha = vha->hw; struct tcm_qla2xxx_lport *lport; struct tcm_qla2xxx_tpg *tpg; struct se_session *se_sess; unsigned char port_name[36]; int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count : TCM_QLA2XXX_DEFAULT_TAGS; lport = vha->vha_tgt.target_lport_ptr; if (!lport) { pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); dump_stack(); return -EINVAL; } /* * Locate the TPG=1 reference.. */ tpg = lport->tpg_1; if (!tpg) { pr_err("Unable to locate struct tcm_qla2xxx_lport->tpg_1\n"); return -EINVAL; } /* * Format the FCP Initiator port_name into colon seperated values to * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. */ memset(&port_name, 0, 36); snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn); /* * Locate our struct se_node_acl either from an explict NodeACL created * via ConfigFS, or via running in TPG demo mode. */ se_sess = target_setup_session(&tpg->se_tpg, num_tags, sizeof(struct qla_tgt_cmd), TARGET_PROT_ALL, port_name, qlat_sess, tcm_qla2xxx_session_cb); if (IS_ERR(se_sess)) return PTR_ERR(se_sess); return 0; } static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, uint16_t loop_id, bool conf_compl_supported) { struct qla_tgt *tgt = sess->tgt; struct qla_hw_data *ha = tgt->ha; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr; struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); u32 key; if (sess->loop_id != loop_id || sess->d_id.b24 != s_id.b24) pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", sess, sess->port_name, sess->loop_id, loop_id, sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, s_id.b.domain, s_id.b.area, s_id.b.al_pa); if (sess->loop_id != loop_id) { /* * Because we can shuffle loop IDs around and we * update different sessions non-atomically, we might * have overwritten this session's old loop ID * already, and we might end up overwriting some other * session that will be updated later. So we have to * be extra careful and we can't warn about those things... */ if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl) lport->lport_loopid_map[sess->loop_id].se_nacl = NULL; lport->lport_loopid_map[loop_id].se_nacl = se_nacl; sess->loop_id = loop_id; } if (sess->d_id.b24 != s_id.b24) { key = (((u32) sess->d_id.b.domain << 16) | ((u32) sess->d_id.b.area << 8) | ((u32) sess->d_id.b.al_pa)); if (btree_lookup32(&lport->lport_fcport_map, key)) WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl, "Found wrong se_nacl when updating s_id %x:%x:%x\n", sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa); else WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n", sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa); key = (((u32) s_id.b.domain << 16) | ((u32) s_id.b.area << 8) | ((u32) s_id.b.al_pa)); if (btree_lookup32(&lport->lport_fcport_map, key)) { WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n", s_id.b.domain, s_id.b.area, s_id.b.al_pa); btree_update32(&lport->lport_fcport_map, key, se_nacl); } else { btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC); } sess->d_id = s_id; nacl->nport_id = key; } sess->conf_compl_supported = conf_compl_supported; } /* * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. */ static const struct qla_tgt_func_tmpl tcm_qla2xxx_template = { .find_cmd_by_tag = tcm_qla2xxx_find_cmd_by_tag, .handle_cmd = tcm_qla2xxx_handle_cmd, .handle_data = tcm_qla2xxx_handle_data, .handle_tmr = tcm_qla2xxx_handle_tmr, .get_cmd = tcm_qla2xxx_get_cmd, .rel_cmd = tcm_qla2xxx_rel_cmd, .free_cmd = tcm_qla2xxx_free_cmd, .free_mcmd = tcm_qla2xxx_free_mcmd, .free_session = tcm_qla2xxx_free_session, .update_sess = tcm_qla2xxx_update_sess, .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl, .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, .put_sess = tcm_qla2xxx_put_sess, .shutdown_sess = tcm_qla2xxx_shutdown_sess, .get_dif_tags = tcm_qla2xxx_dif_tags, .chk_dif_tags = tcm_qla2xxx_chk_dif_tags, }; static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) { int rc; size_t map_sz; rc = btree_init32(&lport->lport_fcport_map); if (rc) { pr_err("Unable to initialize lport->lport_fcport_map btree\n"); return rc; } map_sz = array_size(65536, sizeof(struct tcm_qla2xxx_fc_loopid)); lport->lport_loopid_map = vzalloc(map_sz); if (!lport->lport_loopid_map) { pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n", map_sz); btree_destroy32(&lport->lport_fcport_map); return -ENOMEM; } pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n", map_sz); return 0; } static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha, void *target_lport_ptr, u64 npiv_wwpn, u64 npiv_wwnn) { struct qla_hw_data *ha = vha->hw; struct tcm_qla2xxx_lport *lport = (struct tcm_qla2xxx_lport *)target_lport_ptr; /* * Setup tgt_ops, local pointer to vha and target_lport_ptr */ ha->tgt.tgt_ops = &tcm_qla2xxx_template; vha->vha_tgt.target_lport_ptr = target_lport_ptr; lport->qla_vha = vha; return 0; } static struct se_wwn *tcm_qla2xxx_make_lport( struct target_fabric_configfs *tf, struct config_group *group, const char *name) { struct tcm_qla2xxx_lport *lport; u64 wwpn; int ret = -ENODEV; if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0) return ERR_PTR(-EINVAL); lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); if (!lport) { pr_err("Unable to allocate struct tcm_qla2xxx_lport\n"); return ERR_PTR(-ENOMEM); } lport->lport_wwpn = wwpn; tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN, wwpn); sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn); ret = tcm_qla2xxx_init_lport(lport); if (ret != 0) goto out; ret = qlt_lport_register(lport, wwpn, 0, 0, tcm_qla2xxx_lport_register_cb); if (ret != 0) goto out_lport; return &lport->lport_wwn; out_lport: vfree(lport->lport_loopid_map); btree_destroy32(&lport->lport_fcport_map); out: kfree(lport); return ERR_PTR(ret); } static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) { struct tcm_qla2xxx_lport *lport = container_of(wwn, struct tcm_qla2xxx_lport, lport_wwn); struct scsi_qla_host *vha = lport->qla_vha; struct se_node_acl *node; u32 key = 0; /* * Call into qla2x_target.c LLD logic to complete the * shutdown of struct qla_tgt after the call to * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. */ if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped) qlt_stop_phase2(vha->vha_tgt.qla_tgt); qlt_lport_deregister(vha); vfree(lport->lport_loopid_map); btree_for_each_safe32(&lport->lport_fcport_map, key, node) btree_remove32(&lport->lport_fcport_map, key); btree_destroy32(&lport->lport_fcport_map); kfree(lport); } static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, void *target_lport_ptr, u64 npiv_wwpn, u64 npiv_wwnn) { struct fc_vport *vport; struct Scsi_Host *sh = base_vha->host; struct scsi_qla_host *npiv_vha; struct tcm_qla2xxx_lport *lport = (struct tcm_qla2xxx_lport *)target_lport_ptr; struct tcm_qla2xxx_lport *base_lport = (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; struct fc_vport_identifiers vport_id; if (qla_ini_mode_enabled(base_vha)) { pr_err("qla2xxx base_vha not enabled for target mode\n"); return -EPERM; } if (!base_lport || !base_lport->tpg_1 || !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) { pr_err("qla2xxx base_lport or tpg_1 not available\n"); return -EPERM; } memset(&vport_id, 0, sizeof(vport_id)); vport_id.port_name = npiv_wwpn; vport_id.node_name = npiv_wwnn; vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; vport_id.vport_type = FC_PORTTYPE_NPIV; vport_id.disable = false; vport = fc_vport_create(sh, 0, &vport_id); if (!vport) { pr_err("fc_vport_create failed for qla2xxx_npiv\n"); return -ENODEV; } /* * Setup local pointer to NPIV vhba + target_lport_ptr */ npiv_vha = (struct scsi_qla_host *)vport->dd_data; npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; lport->qla_vha = npiv_vha; scsi_host_get(npiv_vha->host); return 0; } static struct se_wwn *tcm_qla2xxx_npiv_make_lport( struct target_fabric_configfs *tf, struct config_group *group, const char *name) { struct tcm_qla2xxx_lport *lport; u64 phys_wwpn, npiv_wwpn, npiv_wwnn; char *p, tmp[128]; int ret; snprintf(tmp, 128, "%s", name); p = strchr(tmp, '@'); if (!p) { pr_err("Unable to locate NPIV '@' separator\n"); return ERR_PTR(-EINVAL); } *p++ = '\0'; if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0) return ERR_PTR(-EINVAL); if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1, &npiv_wwpn, &npiv_wwnn) < 0) return ERR_PTR(-EINVAL); lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); if (!lport) { pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n"); return ERR_PTR(-ENOMEM); } lport->lport_npiv_wwpn = npiv_wwpn; lport->lport_npiv_wwnn = npiv_wwnn; sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); ret = tcm_qla2xxx_init_lport(lport); if (ret != 0) goto out; ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn, tcm_qla2xxx_lport_register_npiv_cb); if (ret != 0) goto out_lport; return &lport->lport_wwn; out_lport: vfree(lport->lport_loopid_map); btree_destroy32(&lport->lport_fcport_map); out: kfree(lport); return ERR_PTR(ret); } static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) { struct tcm_qla2xxx_lport *lport = container_of(wwn, struct tcm_qla2xxx_lport, lport_wwn); struct scsi_qla_host *npiv_vha = lport->qla_vha; struct qla_hw_data *ha = npiv_vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); scsi_host_put(npiv_vha->host); /* * Notify libfc that we want to release the vha->fc_vport */ fc_vport_terminate(npiv_vha->fc_vport); scsi_host_put(base_vha->host); kfree(lport); } static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item, char *page) { return sprintf(page, "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on %s\n", QLA2XXX_VERSION, utsname()->sysname, utsname()->machine, utsname()->release); } CONFIGFS_ATTR_RO(tcm_qla2xxx_wwn_, version); static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { &tcm_qla2xxx_wwn_attr_version, NULL, }; static const struct target_core_fabric_ops tcm_qla2xxx_ops = { .module = THIS_MODULE, .fabric_name = "qla2xxx", .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), /* * XXX: Limit assumes single page per scatter-gather-list entry. * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096 */ .max_data_sg_nents = 1200, .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, .tpg_get_tag = tcm_qla2xxx_get_tag, .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_write_protect, .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_prod_write_protect, .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only, .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, .check_stop_free = tcm_qla2xxx_check_stop_free, .release_cmd = tcm_qla2xxx_release_cmd, .close_session = tcm_qla2xxx_close_session, .sess_get_initiator_sid = NULL, .write_pending = tcm_qla2xxx_write_pending, .get_cmd_state = tcm_qla2xxx_get_cmd_state, .queue_data_in = tcm_qla2xxx_queue_data_in, .queue_status = tcm_qla2xxx_queue_status, .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, .aborted_task = tcm_qla2xxx_aborted_task, /* * Setup function pointers for generic logic in * target_core_fabric_configfs.c */ .fabric_make_wwn = tcm_qla2xxx_make_lport, .fabric_drop_wwn = tcm_qla2xxx_drop_lport, .fabric_make_tpg = tcm_qla2xxx_make_tpg, .fabric_enable_tpg = tcm_qla2xxx_enable_tpg, .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs, .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs, }; static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { .module = THIS_MODULE, .fabric_name = "qla2xxx_npiv", .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, .tpg_get_tag = tcm_qla2xxx_get_tag, .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_prod_write_protect, .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, .check_stop_free = tcm_qla2xxx_check_stop_free, .release_cmd = tcm_qla2xxx_release_cmd, .close_session = tcm_qla2xxx_close_session, .sess_get_initiator_sid = NULL, .write_pending = tcm_qla2xxx_write_pending, .get_cmd_state = tcm_qla2xxx_get_cmd_state, .queue_data_in = tcm_qla2xxx_queue_data_in, .queue_status = tcm_qla2xxx_queue_status, .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, .aborted_task = tcm_qla2xxx_aborted_task, /* * Setup function pointers for generic logic in * target_core_fabric_configfs.c */ .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport, .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, .fabric_enable_tpg = tcm_qla2xxx_npiv_enable_tpg, .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, }; static int tcm_qla2xxx_register_configfs(void) { int ret; pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on %s\n", QLA2XXX_VERSION, utsname()->sysname, utsname()->machine, utsname()->release); ret = target_register_template(&tcm_qla2xxx_ops); if (ret) return ret; ret = target_register_template(&tcm_qla2xxx_npiv_ops); if (ret) goto out_fabric; tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", WQ_MEM_RECLAIM, 0); if (!tcm_qla2xxx_free_wq) { ret = -ENOMEM; goto out_fabric_npiv; } return 0; out_fabric_npiv: target_unregister_template(&tcm_qla2xxx_npiv_ops); out_fabric: target_unregister_template(&tcm_qla2xxx_ops); return ret; } static void tcm_qla2xxx_deregister_configfs(void) { destroy_workqueue(tcm_qla2xxx_free_wq); target_unregister_template(&tcm_qla2xxx_ops); target_unregister_template(&tcm_qla2xxx_npiv_ops); } static int __init tcm_qla2xxx_init(void) { int ret; BUILD_BUG_ON(sizeof(struct abts_recv_from_24xx) != 64); BUILD_BUG_ON(sizeof(struct abts_resp_from_24xx_fw) != 64); BUILD_BUG_ON(sizeof(struct atio7_fcp_cmnd) != 32); BUILD_BUG_ON(sizeof(struct atio_from_isp) != 64); BUILD_BUG_ON(sizeof(struct ba_acc_le) != 12); BUILD_BUG_ON(sizeof(struct ba_rjt_le) != 4); BUILD_BUG_ON(sizeof(struct ctio7_from_24xx) != 64); BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); BUILD_BUG_ON(sizeof(struct ctio_crc_from_fw) != 64); BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); BUILD_BUG_ON(sizeof(struct fcp_hdr) != 24); BUILD_BUG_ON(sizeof(struct fcp_hdr_le) != 24); BUILD_BUG_ON(sizeof(struct nack_to_isp) != 64); ret = tcm_qla2xxx_register_configfs(); if (ret < 0) return ret; return 0; } static void __exit tcm_qla2xxx_exit(void) { tcm_qla2xxx_deregister_configfs(); } MODULE_DESCRIPTION("TCM QLA24XX+ series NPIV enabled fabric driver"); MODULE_LICENSE("GPL"); module_init(tcm_qla2xxx_init); module_exit(tcm_qla2xxx_exit);
linux-master
drivers/scsi/qla2xxx/tcm_qla2xxx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell Fibre Channel HBA Driver * Copyright (c) 2021 Marvell */ #include "qla_def.h" #include "qla_edif.h" #include <linux/kthread.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <scsi/scsi_tcq.h> static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle, struct list_head *sa_list); static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame); static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle, uint16_t sa_index); static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *); struct edb_node { struct list_head list; uint32_t ntype; union { port_id_t plogi_did; uint32_t async; port_id_t els_sid; struct edif_sa_update_aen sa_aen; } u; }; static struct els_sub_cmd { uint16_t cmd; const char *str; } sc_str[] = { {SEND_ELS, "send ELS"}, {SEND_ELS_REPLY, "send ELS Reply"}, {PULL_ELS, "retrieve ELS"}, }; const char *sc_to_str(uint16_t cmd) { int i; struct els_sub_cmd *e; for (i = 0; i < ARRAY_SIZE(sc_str); i++) { e = sc_str + i; if (cmd == e->cmd) return e->str; } return "unknown"; } static struct edb_node *qla_edb_getnext(scsi_qla_host_t *vha) { unsigned long flags; struct edb_node *edbnode = NULL; spin_lock_irqsave(&vha->e_dbell.db_lock, flags); /* db nodes are fifo - no qualifications done */ if (!list_empty(&vha->e_dbell.head)) { edbnode = list_first_entry(&vha->e_dbell.head, struct edb_node, list); list_del_init(&edbnode->list); } spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); return edbnode; } static void qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node) { list_del_init(&node->list); kfree(node); } static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport, uint16_t handle) { struct edif_list_entry *entry; struct edif_list_entry *tentry; struct list_head *indx_list = &fcport->edif.edif_indx_list; list_for_each_entry_safe(entry, tentry, indx_list, next) { if (entry->handle == handle) return entry; } return NULL; } /* timeout called when no traffic and delayed rx sa_index delete */ static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t) { struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer); fc_port_t *fcport = edif_entry->fcport; struct scsi_qla_host *vha = fcport->vha; struct edif_sa_ctl *sa_ctl; uint16_t nport_handle; unsigned long flags = 0; ql_dbg(ql_dbg_edif, vha, 0x3069, "%s: nport_handle 0x%x, SA REPL Delay Timeout, %8phC portid=%06x\n", __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24); /* * if delete_sa_index is valid then no one has serviced this * delayed delete */ spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); /* * delete_sa_index is invalidated when we find the new sa_index in * the incoming data stream. If it is not invalidated then we are * still looking for the new sa_index because there is no I/O and we * need to just force the rx delete and move on. Otherwise * we could get another rekey which will result in an error 66. */ if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) { uint16_t delete_sa_index = edif_entry->delete_sa_index; edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX; nport_handle = edif_entry->handle; spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0); if (sa_ctl) { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n", __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index, nport_handle); sa_ctl->flags = EDIF_SA_CTL_FLG_DEL; set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state); qla_post_sa_replace_work(fcport->vha, fcport, nport_handle, sa_ctl); } else { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: sa_ctl not found for delete_sa_index: %d\n", __func__, edif_entry->delete_sa_index); } } else { spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); } } /* * create a new list entry for this nport handle and * add an sa_update index to the list - called for sa_update */ static int qla_edif_list_add_sa_update_index(fc_port_t *fcport, uint16_t sa_index, uint16_t handle) { struct edif_list_entry *entry; unsigned long flags = 0; /* if the entry exists, then just update the sa_index */ entry = qla_edif_list_find_sa_index(fcport, handle); if (entry) { entry->update_sa_index = sa_index; entry->count = 0; return 0; } /* * This is the normal path - there should be no existing entry * when update is called. The exception is at startup * when update is called for the first two sa_indexes * followed by a delete of the first sa_index */ entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC); if (!entry) return -ENOMEM; INIT_LIST_HEAD(&entry->next); entry->handle = handle; entry->update_sa_index = sa_index; entry->delete_sa_index = INVALID_EDIF_SA_INDEX; entry->count = 0; entry->flags = 0; timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0); spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); list_add_tail(&entry->next, &fcport->edif.edif_indx_list); spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); return 0; } /* remove an entry from the list */ static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry) { unsigned long flags = 0; spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); list_del(&entry->next); spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); } int qla_post_sa_replace_work(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE); if (!e) return QLA_FUNCTION_FAILED; e->u.sa_update.fcport = fcport; e->u.sa_update.sa_ctl = sa_ctl; e->u.sa_update.nport_handle = nport_handle; fcport->flags |= FCF_ASYNC_ACTIVE; return qla2x00_post_work(vha, e); } static void qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port *fcport) { ql_dbg(ql_dbg_edif, vha, 0x2058, "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n", fcport->node_name, fcport->port_name, fcport->d_id.b24); fcport->edif.tx_rekey_cnt = 0; fcport->edif.rx_rekey_cnt = 0; fcport->edif.tx_bytes = 0; fcport->edif.rx_bytes = 0; } static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job, fc_port_t *fcport) { struct extra_auth_els *p; struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct qla_bsg_auth_els_request *req = (struct qla_bsg_auth_els_request *)bsg_job->request; if (!vha->hw->flags.edif_enabled) { ql_dbg(ql_dbg_edif, vha, 0x9105, "%s edif not enabled\n", __func__); goto done; } if (DBELL_INACTIVE(vha)) { ql_dbg(ql_dbg_edif, vha, 0x09102, "%s doorbell not enabled\n", __func__); goto done; } p = &req->e; /* Get response */ if (p->sub_cmd == PULL_ELS) { struct qla_bsg_auth_els_reply *rpl = (struct qla_bsg_auth_els_reply *)bsg_job->reply; qla_pur_get_pending(vha, fcport, bsg_job); ql_dbg(ql_dbg_edif, vha, 0x911d, "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n", __func__, sc_to_str(p->sub_cmd), fcport->port_name, fcport->d_id.b24, rpl->rx_xchg_address, rpl->r.reply_payload_rcv_len, bsg_job); goto done; } return 0; done: bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return -EIO; } fc_port_t * qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id) { fc_port_t *f, *tf; f = NULL; list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { if (f->d_id.b24 == id->b24) return f; } return NULL; } /** * qla_edif_app_check(): check for valid application id. * @vha: host adapter pointer * @appid: application id * Return: false = fail, true = pass */ static bool qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid) { /* check that the app is allow/known to the driver */ if (appid.app_vid != EDIF_APP_ID) { ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)", __func__, appid.app_vid); return false; } if (appid.version != EDIF_VERSION1) { ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app version is not ok (%x)", __func__, appid.version); return false; } return true; } static void qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl, int index) { unsigned long flags = 0; spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); list_del(&sa_ctl->next); spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags); if (index >= 512) fcport->edif.tx_rekey_cnt--; else fcport->edif.rx_rekey_cnt--; kfree(sa_ctl); } /* return an index to the freepool */ static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir, uint16_t sa_index) { void *sa_id_map; struct scsi_qla_host *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; u16 lsa_index = sa_index; ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, "%s: entry\n", __func__); if (dir) { sa_id_map = ha->edif_tx_sa_id_map; lsa_index -= EDIF_TX_SA_INDEX_BASE; } else { sa_id_map = ha->edif_rx_sa_id_map; } spin_lock_irqsave(&ha->sadb_fp_lock, flags); clear_bit(lsa_index, sa_id_map); spin_unlock_irqrestore(&ha->sadb_fp_lock, flags); ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: index %d added to free pool\n", __func__, sa_index); } static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport, struct edif_sa_index_entry *entry, int pdir) { struct edif_list_entry *edif_entry; struct edif_sa_ctl *sa_ctl; int i, dir; int key_cnt = 0; for (i = 0; i < 2; i++) { if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX) continue; if (fcport->loop_id != entry->handle) { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n", __func__, i, entry->handle, fcport->loop_id, entry->sa_pair[i].sa_index); } /* release the sa_ctl */ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, entry->sa_pair[i].sa_index, pdir); if (sa_ctl && qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index); qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index); } else { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl); } /* Release the index */ ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: freeing sa_index %d, nph: 0x%x\n", __func__, entry->sa_pair[i].sa_index, entry->handle); dir = (entry->sa_pair[i].sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1; qla_edif_add_sa_index_to_freepool(fcport, dir, entry->sa_pair[i].sa_index); /* Delete timer on RX */ if (pdir != SAU_FLG_TX) { edif_entry = qla_edif_list_find_sa_index(fcport, entry->handle); if (edif_entry) { ql_dbg(ql_dbg_edif, vha, 0x5033, "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n", __func__, edif_entry, edif_entry->update_sa_index, edif_entry->delete_sa_index); qla_edif_list_delete_sa_index(fcport, edif_entry); /* * valid delete_sa_index indicates there is a rx * delayed delete queued */ if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) { timer_shutdown(&edif_entry->timer); /* build and send the aen */ fcport->edif.rx_sa_set = 1; fcport->edif.rx_sa_pending = 0; qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, QL_VND_SA_STAT_SUCCESS, QL_VND_RX_SA_KEY, fcport); } ql_dbg(ql_dbg_edif, vha, 0x5033, "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n", __func__, edif_entry, edif_entry->update_sa_index, edif_entry->delete_sa_index); kfree(edif_entry); } } key_cnt++; } ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: %d %s keys released\n", __func__, key_cnt, pdir ? "tx" : "rx"); } /* find an release all outstanding sadb sa_indicies */ void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport) { struct edif_sa_index_entry *entry, *tmp; struct qla_hw_data *ha = vha->hw; unsigned long flags; ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, "%s: Starting...\n", __func__); spin_lock_irqsave(&ha->sadb_lock, flags); list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) { if (entry->fcport == fcport) { list_del(&entry->next); spin_unlock_irqrestore(&ha->sadb_lock, flags); __qla2x00_release_all_sadb(vha, fcport, entry, 0); kfree(entry); spin_lock_irqsave(&ha->sadb_lock, flags); break; } } list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) { if (entry->fcport == fcport) { list_del(&entry->next); spin_unlock_irqrestore(&ha->sadb_lock, flags); __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX); kfree(entry); spin_lock_irqsave(&ha->sadb_lock, flags); break; } } spin_unlock_irqrestore(&ha->sadb_lock, flags); } /** * qla_delete_n2n_sess_and_wait: search for N2N session, tear it down and * wait for tear down to complete. In N2N topology, there is only one * session being active in tracking the remote device. * @vha: host adapter pointer * return code: 0 - found the session and completed the tear down. * 1 - timeout occurred. Caller to use link bounce to reset. */ static int qla_delete_n2n_sess_and_wait(scsi_qla_host_t *vha) { struct fc_port *fcport; int rc = -EIO; ulong expire = jiffies + 23 * HZ; if (!N2N_TOPO(vha->hw)) return 0; fcport = NULL; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (!fcport->n2n_flag) continue; ql_dbg(ql_dbg_disc, fcport->vha, 0x2016, "%s reset sess at app start \n", __func__); qla_edif_sa_ctl_init(vha, fcport); qlt_schedule_sess_for_deletion(fcport); while (time_before_eq(jiffies, expire)) { if (fcport->disc_state != DSC_DELETE_PEND) { rc = 0; break; } msleep(1); } set_bit(RELOGIN_NEEDED, &vha->dpc_flags); break; } return rc; } /** * qla_edif_app_start: application has announce its present * @vha: host adapter pointer * @bsg_job: user request * * Set/activate doorbell. Reset current sessions and re-login with * secure flag. */ static int qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { int32_t rval = 0; struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct app_start appstart; struct app_start_reply appreply; struct fc_port *fcport, *tf; ql_log(ql_log_info, vha, 0x1313, "EDIF application registration with driver, FC device connections will be re-established.\n"); sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &appstart, sizeof(struct app_start)); ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n", __func__, appstart.app_info.app_vid, appstart.app_start_flags); if (DBELL_INACTIVE(vha)) { /* mark doorbell as active since an app is now present */ vha->e_dbell.db_flags |= EDB_ACTIVE; } else { goto out; } if (N2N_TOPO(vha->hw)) { list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) fcport->n2n_link_reset_cnt = 0; if (vha->hw->flags.n2n_fw_acc_sec) { bool link_bounce = false; /* * While authentication app was not running, remote device * could still try to login with this local port. Let's * reset the session, reconnect and re-authenticate. */ if (qla_delete_n2n_sess_and_wait(vha)) link_bounce = true; /* bounce the link to start login */ if (!vha->hw->flags.n2n_bigger || link_bounce) { set_bit(N2N_LINK_RESET, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } else { qla2x00_wait_for_hba_online(vha); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); qla2x00_wait_for_hba_online(vha); } } else { list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { ql_dbg(ql_dbg_edif, vha, 0x2058, "FCSP - nn %8phN pn %8phN portid=%06x.\n", fcport->node_name, fcport->port_name, fcport->d_id.b24); ql_dbg(ql_dbg_edif, vha, 0xf084, "%s: se_sess %p / sess %p from port %8phC " "loop_id %#04x s_id %06x logout %d " "keep %d els_logo %d disc state %d auth state %d" "stop state %d\n", __func__, fcport->se_sess, fcport, fcport->port_name, fcport->loop_id, fcport->d_id.b24, fcport->logout_on_delete, fcport->keep_nport_handle, fcport->send_els_logo, fcport->disc_state, fcport->edif.auth_state, fcport->edif.app_stop); if (atomic_read(&vha->loop_state) == LOOP_DOWN) break; fcport->login_retry = vha->hw->login_retry_count; fcport->edif.app_stop = 0; fcport->edif.app_sess_online = 0; if (fcport->scan_state != QLA_FCPORT_FOUND) continue; if (fcport->port_type == FCT_UNKNOWN && !fcport->fc4_features) rval = qla24xx_async_gffid(vha, fcport, true); if (!rval && !(fcport->fc4_features & FC4_FF_TARGET || fcport->port_type & (FCT_TARGET|FCT_NVME_TARGET))) continue; rval = 0; ql_dbg(ql_dbg_edif, vha, 0x911e, "%s wwpn %8phC calling qla_edif_reset_auth_wait\n", __func__, fcport->port_name); qlt_schedule_sess_for_deletion(fcport); qla_edif_sa_ctl_init(vha, fcport); } set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { /* mark as active since an app is now present */ vha->pur_cinfo.enode_flags = ENODE_ACTIVE; } else { ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n", __func__); } out: appreply.host_support_edif = vha->hw->flags.edif_enabled; appreply.edif_enode_active = vha->pur_cinfo.enode_flags; appreply.edif_edb_active = vha->e_dbell.db_flags; appreply.version = EDIF_VERSION1; bsg_job->reply_len = sizeof(struct fc_bsg_reply); SET_DID_STATUS(bsg_reply->result, DID_OK); bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &appreply, sizeof(struct app_start_reply)); ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app start completed with 0x%x\n", __func__, rval); return rval; } /** * qla_edif_app_stop - app has announced it's exiting. * @vha: host adapter pointer * @bsg_job: user space command pointer * * Free any in flight messages, clear all doorbell events * to application. Reject any message relate to security. */ static int qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { struct app_stop appstop; struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct fc_port *fcport, *tf; sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &appstop, sizeof(struct app_stop)); ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n", __func__, appstop.app_info.app_vid); /* Call db stop and enode stop functions */ /* if we leave this running short waits are operational < 16 secs */ qla_enode_stop(vha); /* stop enode */ qla_edb_stop(vha); /* stop db */ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { if (!(fcport->flags & FCF_FCSP_DEVICE)) continue; if (fcport->flags & FCF_FCSP_DEVICE) { ql_dbg(ql_dbg_edif, vha, 0xf084, "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n", __func__, fcport, fcport->port_name, fcport->loop_id, fcport->d_id.b24, fcport->logout_on_delete, fcport->keep_nport_handle, fcport->send_els_logo); if (atomic_read(&vha->loop_state) == LOOP_DOWN) break; fcport->edif.app_stop = 1; ql_dbg(ql_dbg_edif, vha, 0x911e, "%s wwpn %8phC calling qla_edif_reset_auth_wait\n", __func__, fcport->port_name); fcport->send_els_logo = 1; qlt_schedule_sess_for_deletion(fcport); } } bsg_job->reply_len = sizeof(struct fc_bsg_reply); SET_DID_STATUS(bsg_reply->result, DID_OK); /* no return interface to app - it assumes we cleaned up ok */ return 0; } static int qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport, struct app_plogi_reply *appplogireply) { int ret = 0; if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) { ql_dbg(ql_dbg_edif, vha, 0x911e, "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n", __func__, fcport->port_name, fcport->edif.tx_sa_set, fcport->edif.rx_sa_set); appplogireply->prli_status = 0; ret = 1; } else { ql_dbg(ql_dbg_edif, vha, 0x911e, "%s wwpn %8phC Both SA(s) updated.\n", __func__, fcport->port_name); fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0; fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0; appplogireply->prli_status = 1; } return ret; } /** * qla_edif_app_authok - authentication by app succeeded. Driver can proceed * with prli * @vha: host adapter pointer * @bsg_job: user request */ static int qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { struct auth_complete_cmd appplogiok; struct app_plogi_reply appplogireply = {0}; struct fc_bsg_reply *bsg_reply = bsg_job->reply; fc_port_t *fcport = NULL; port_id_t portid = {0}; sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &appplogiok, sizeof(struct auth_complete_cmd)); /* silent unaligned access warning */ portid.b.domain = appplogiok.u.d_id.b.domain; portid.b.area = appplogiok.u.d_id.b.area; portid.b.al_pa = appplogiok.u.d_id.b.al_pa; appplogireply.version = EDIF_VERSION1; switch (appplogiok.type) { case PL_TYPE_WWPN: fcport = qla2x00_find_fcport_by_wwpn(vha, appplogiok.u.wwpn, 0); if (!fcport) ql_dbg(ql_dbg_edif, vha, 0x911d, "%s wwpn lookup failed: %8phC\n", __func__, appplogiok.u.wwpn); break; case PL_TYPE_DID: fcport = qla2x00_find_fcport_by_pid(vha, &portid); if (!fcport) ql_dbg(ql_dbg_edif, vha, 0x911d, "%s d_id lookup failed: %x\n", __func__, portid.b24); break; default: ql_dbg(ql_dbg_edif, vha, 0x911d, "%s undefined type: %x\n", __func__, appplogiok.type); break; } if (!fcport) { SET_DID_STATUS(bsg_reply->result, DID_ERROR); goto errstate_exit; } /* * if port is online then this is a REKEY operation * Only do sa update checking */ if (atomic_read(&fcport->state) == FCS_ONLINE) { ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Skipping PRLI complete based on rekey\n", __func__); appplogireply.prli_status = 1; SET_DID_STATUS(bsg_reply->result, DID_OK); qla_edif_app_chk_sa_update(vha, fcport, &appplogireply); goto errstate_exit; } /* make sure in AUTH_PENDING or else reject */ if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) { ql_dbg(ql_dbg_edif, vha, 0x911e, "%s wwpn %8phC is not in auth pending state (%x)\n", __func__, fcport->port_name, fcport->disc_state); SET_DID_STATUS(bsg_reply->result, DID_OK); appplogireply.prli_status = 0; goto errstate_exit; } SET_DID_STATUS(bsg_reply->result, DID_OK); appplogireply.prli_status = 1; fcport->edif.authok = 1; if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) { ql_dbg(ql_dbg_edif, vha, 0x911e, "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n", __func__, fcport->port_name, fcport->edif.tx_sa_set, fcport->edif.rx_sa_set); SET_DID_STATUS(bsg_reply->result, DID_OK); appplogireply.prli_status = 0; goto errstate_exit; } else { ql_dbg(ql_dbg_edif, vha, 0x911e, "%s wwpn %8phC Both SA(s) updated.\n", __func__, fcport->port_name); fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0; fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0; } if (qla_ini_mode_enabled(vha)) { ql_dbg(ql_dbg_edif, vha, 0x911e, "%s AUTH complete - RESUME with prli for wwpn %8phC\n", __func__, fcport->port_name); qla24xx_post_prli_work(vha, fcport); } errstate_exit: bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &appplogireply, sizeof(struct app_plogi_reply)); return 0; } /** * qla_edif_app_authfail - authentication by app has failed. Driver is given * notice to tear down current session. * @vha: host adapter pointer * @bsg_job: user request */ static int qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { int32_t rval = 0; struct auth_complete_cmd appplogifail; struct fc_bsg_reply *bsg_reply = bsg_job->reply; fc_port_t *fcport = NULL; port_id_t portid = {0}; ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__); sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &appplogifail, sizeof(struct auth_complete_cmd)); /* silent unaligned access warning */ portid.b.domain = appplogifail.u.d_id.b.domain; portid.b.area = appplogifail.u.d_id.b.area; portid.b.al_pa = appplogifail.u.d_id.b.al_pa; /* * TODO: edif: app has failed this plogi. Inform driver to * take any action (if any). */ switch (appplogifail.type) { case PL_TYPE_WWPN: fcport = qla2x00_find_fcport_by_wwpn(vha, appplogifail.u.wwpn, 0); SET_DID_STATUS(bsg_reply->result, DID_OK); break; case PL_TYPE_DID: fcport = qla2x00_find_fcport_by_pid(vha, &portid); if (!fcport) ql_dbg(ql_dbg_edif, vha, 0x911d, "%s d_id lookup failed: %x\n", __func__, portid.b24); SET_DID_STATUS(bsg_reply->result, DID_OK); break; default: ql_dbg(ql_dbg_edif, vha, 0x911e, "%s undefined type: %x\n", __func__, appplogifail.type); bsg_job->reply_len = sizeof(struct fc_bsg_reply); SET_DID_STATUS(bsg_reply->result, DID_ERROR); rval = -1; break; } ql_dbg(ql_dbg_edif, vha, 0x911d, "%s fcport is 0x%p\n", __func__, fcport); if (fcport) { /* set/reset edif values and flags */ ql_dbg(ql_dbg_edif, vha, 0x911e, "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n", __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24); if (qla_ini_mode_enabled(fcport->vha)) { fcport->send_els_logo = 1; qlt_schedule_sess_for_deletion(fcport); } } return rval; } /** * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid, * [initiator|target] mode. It can specific session with specific nport id or * all sessions. * @vha: host adapter pointer * @bsg_job: user request pointer */ static int qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { int32_t rval = 0; int32_t pcnt = 0; struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct app_pinfo_req app_req; struct app_pinfo_reply *app_reply; port_id_t tdid; ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__); sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &app_req, sizeof(struct app_pinfo_req)); app_reply = kzalloc((sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * app_req.num_ports), GFP_KERNEL); if (!app_reply) { SET_DID_STATUS(bsg_reply->result, DID_ERROR); rval = -1; } else { struct fc_port *fcport = NULL, *tf; app_reply->version = EDIF_VERSION1; list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { if (!(fcport->flags & FCF_FCSP_DEVICE)) continue; tdid.b.domain = app_req.remote_pid.domain; tdid.b.area = app_req.remote_pid.area; tdid.b.al_pa = app_req.remote_pid.al_pa; ql_dbg(ql_dbg_edif, vha, 0x2058, "APP request entry - portid=%06x.\n", tdid.b24); /* Ran out of space */ if (pcnt >= app_req.num_ports) break; if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24) continue; if (!N2N_TOPO(vha->hw)) { if (fcport->scan_state != QLA_FCPORT_FOUND) continue; if (fcport->port_type == FCT_UNKNOWN && !fcport->fc4_features) rval = qla24xx_async_gffid(vha, fcport, true); if (!rval && !(fcport->fc4_features & FC4_FF_TARGET || fcport->port_type & (FCT_TARGET | FCT_NVME_TARGET))) continue; } rval = 0; app_reply->ports[pcnt].version = EDIF_VERSION1; app_reply->ports[pcnt].remote_type = VND_CMD_RTYPE_UNKNOWN; if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET)) app_reply->ports[pcnt].remote_type |= VND_CMD_RTYPE_TARGET; if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR)) app_reply->ports[pcnt].remote_type |= VND_CMD_RTYPE_INITIATOR; app_reply->ports[pcnt].remote_pid = fcport->d_id; ql_dbg(ql_dbg_edif, vha, 0x2058, "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x secure %d.\n", fcport->node_name, fcport->port_name, pcnt, fcport->d_id.b24, fcport->flags & FCF_FCSP_DEVICE); switch (fcport->edif.auth_state) { case VND_CMD_AUTH_STATE_ELS_RCVD: if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) { fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED; app_reply->ports[pcnt].auth_state = VND_CMD_AUTH_STATE_NEEDED; } else { app_reply->ports[pcnt].auth_state = VND_CMD_AUTH_STATE_ELS_RCVD; } break; default: app_reply->ports[pcnt].auth_state = fcport->edif.auth_state; break; } memcpy(app_reply->ports[pcnt].remote_wwpn, fcport->port_name, 8); app_reply->ports[pcnt].remote_state = (atomic_read(&fcport->state) == FCS_ONLINE ? 1 : 0); pcnt++; if (tdid.b24 != 0) break; } app_reply->port_count = pcnt; SET_DID_STATUS(bsg_reply->result, DID_OK); } bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, app_reply, sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * pcnt); kfree(app_reply); return rval; } /** * qla_edif_app_getstats - app would like to read various statistics info * @vha: host adapter pointer * @bsg_job: user request */ static int32_t qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { int32_t rval = 0; struct fc_bsg_reply *bsg_reply = bsg_job->reply; uint32_t size; struct app_sinfo_req app_req; struct app_stats_reply *app_reply; uint32_t pcnt = 0; sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &app_req, sizeof(struct app_sinfo_req)); if (app_req.num_ports == 0) { ql_dbg(ql_dbg_async, vha, 0x911d, "%s app did not indicate number of ports to return\n", __func__); SET_DID_STATUS(bsg_reply->result, DID_ERROR); rval = -1; } size = sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * app_req.num_ports); app_reply = kzalloc(size, GFP_KERNEL); if (!app_reply) { SET_DID_STATUS(bsg_reply->result, DID_ERROR); rval = -1; } else { struct fc_port *fcport = NULL, *tf; app_reply->version = EDIF_VERSION1; list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { if (fcport->edif.enable) { if (pcnt > app_req.num_ports) break; app_reply->elem[pcnt].rekey_count = fcport->edif.rekey_cnt; app_reply->elem[pcnt].tx_bytes = fcport->edif.tx_bytes; app_reply->elem[pcnt].rx_bytes = fcport->edif.rx_bytes; memcpy(app_reply->elem[pcnt].remote_wwpn, fcport->port_name, 8); pcnt++; } } app_reply->elem_count = pcnt; SET_DID_STATUS(bsg_reply->result, DID_OK); } bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, app_reply, sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * pcnt)); kfree(app_reply); return rval; } static int32_t qla_edif_ack(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { struct fc_port *fcport; struct aen_complete_cmd ack; struct fc_bsg_reply *bsg_reply = bsg_job->reply; sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &ack, sizeof(ack)); ql_dbg(ql_dbg_edif, vha, 0x70cf, "%s: %06x event_code %x\n", __func__, ack.port_id.b24, ack.event_code); fcport = qla2x00_find_fcport_by_pid(vha, &ack.port_id); SET_DID_STATUS(bsg_reply->result, DID_OK); if (!fcport) { ql_dbg(ql_dbg_edif, vha, 0x70cf, "%s: unable to find fcport %06x \n", __func__, ack.port_id.b24); return 0; } switch (ack.event_code) { case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: fcport->edif.sess_down_acked = 1; break; default: break; } return 0; } static int qla_edif_consume_dbell(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; u32 sg_skip, reply_payload_len; bool keep; struct edb_node *dbnode = NULL; struct edif_app_dbell ap; int dat_size = 0; sg_skip = 0; reply_payload_len = bsg_job->reply_payload.payload_len; while ((reply_payload_len - sg_skip) >= sizeof(struct edb_node)) { dbnode = qla_edb_getnext(vha); if (dbnode) { keep = true; dat_size = 0; ap.event_code = dbnode->ntype; switch (dbnode->ntype) { case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: case VND_CMD_AUTH_STATE_NEEDED: ap.port_id = dbnode->u.plogi_did; dat_size += sizeof(ap.port_id); break; case VND_CMD_AUTH_STATE_ELS_RCVD: ap.port_id = dbnode->u.els_sid; dat_size += sizeof(ap.port_id); break; case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: ap.port_id = dbnode->u.sa_aen.port_id; memcpy(&ap.event_data, &dbnode->u, sizeof(struct edif_sa_update_aen)); dat_size += sizeof(struct edif_sa_update_aen); break; default: keep = false; ql_log(ql_log_warn, vha, 0x09102, "%s unknown DB type=%d %p\n", __func__, dbnode->ntype, dbnode); break; } ap.event_data_size = dat_size; /* 8 = sizeof(ap.event_code + ap.event_data_size) */ dat_size += 8; if (keep) sg_skip += sg_copy_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, &ap, dat_size, sg_skip, false); ql_dbg(ql_dbg_edif, vha, 0x09102, "%s Doorbell consumed : type=%d %p\n", __func__, dbnode->ntype, dbnode); kfree(dbnode); } else { break; } } SET_DID_STATUS(bsg_reply->result, DID_OK); bsg_reply->reply_payload_rcv_len = sg_skip; bsg_job->reply_len = sizeof(struct fc_bsg_reply); return 0; } static void __qla_edif_dbell_bsg_done(scsi_qla_host_t *vha, struct bsg_job *bsg_job, u32 delay) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; /* small sleep for doorbell events to accumulate */ if (delay) msleep(delay); qla_edif_consume_dbell(vha, bsg_job); bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } static void qla_edif_dbell_bsg_done(scsi_qla_host_t *vha) { unsigned long flags; struct bsg_job *prev_bsg_job = NULL; spin_lock_irqsave(&vha->e_dbell.db_lock, flags); if (vha->e_dbell.dbell_bsg_job) { prev_bsg_job = vha->e_dbell.dbell_bsg_job; vha->e_dbell.dbell_bsg_job = NULL; } spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); if (prev_bsg_job) __qla_edif_dbell_bsg_done(vha, prev_bsg_job, 0); } static int qla_edif_dbell_bsg(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { unsigned long flags; bool return_bsg = false; /* flush previous dbell bsg */ qla_edif_dbell_bsg_done(vha); spin_lock_irqsave(&vha->e_dbell.db_lock, flags); if (list_empty(&vha->e_dbell.head) && DBELL_ACTIVE(vha)) { /* * when the next db event happens, bsg_job will return. * Otherwise, timer will return it. */ vha->e_dbell.dbell_bsg_job = bsg_job; vha->e_dbell.bsg_expire = jiffies + 10 * HZ; } else { return_bsg = true; } spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); if (return_bsg) __qla_edif_dbell_bsg_done(vha, bsg_job, 1); return 0; } int32_t qla_edif_app_mgmt(struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct app_id appcheck; bool done = true; int32_t rval = 0; uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; u32 level = ql_dbg_edif; /* doorbell is high traffic */ if (vnd_sc == QL_VND_SC_READ_DBELL) level = 0; ql_dbg(level, vha, 0x911d, "%s vnd subcmd=%x\n", __func__, vnd_sc); sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &appcheck, sizeof(struct app_id)); if (!vha->hw->flags.edif_enabled || test_bit(VPORT_DELETE, &vha->dpc_flags)) { ql_dbg(level, vha, 0x911d, "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n", __func__, bsg_job, vha->dpc_flags); SET_DID_STATUS(bsg_reply->result, DID_ERROR); goto done; } if (!qla_edif_app_check(vha, appcheck)) { ql_dbg(level, vha, 0x911d, "%s app checked failed.\n", __func__); bsg_job->reply_len = sizeof(struct fc_bsg_reply); SET_DID_STATUS(bsg_reply->result, DID_ERROR); goto done; } switch (vnd_sc) { case QL_VND_SC_SA_UPDATE: done = false; rval = qla24xx_sadb_update(bsg_job); break; case QL_VND_SC_APP_START: rval = qla_edif_app_start(vha, bsg_job); break; case QL_VND_SC_APP_STOP: rval = qla_edif_app_stop(vha, bsg_job); break; case QL_VND_SC_AUTH_OK: rval = qla_edif_app_authok(vha, bsg_job); break; case QL_VND_SC_AUTH_FAIL: rval = qla_edif_app_authfail(vha, bsg_job); break; case QL_VND_SC_GET_FCINFO: rval = qla_edif_app_getfcinfo(vha, bsg_job); break; case QL_VND_SC_GET_STATS: rval = qla_edif_app_getstats(vha, bsg_job); break; case QL_VND_SC_AEN_COMPLETE: rval = qla_edif_ack(vha, bsg_job); break; case QL_VND_SC_READ_DBELL: rval = qla_edif_dbell_bsg(vha, bsg_job); done = false; break; default: ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n", __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[1]); rval = EXT_STATUS_INVALID_PARAM; done = false; break; } done: if (done) { ql_dbg(level, vha, 0x7009, "%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job); bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } return rval; } static struct edif_sa_ctl * qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame, int dir) { struct edif_sa_ctl *sa_ctl; struct qla_sa_update_frame *sap; int index = sa_frame->fast_sa_index; unsigned long flags = 0; sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL); if (!sa_ctl) { /* couldn't get space */ ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, "unable to allocate SA CTL\n"); return NULL; } /* * need to allocate sa_index here and save it * in both sa_ctl->index and sa_frame->fast_sa_index; * If alloc fails then delete sa_ctl and return NULL */ INIT_LIST_HEAD(&sa_ctl->next); sap = &sa_ctl->sa_frame; *sap = *sa_frame; sa_ctl->index = index; sa_ctl->fcport = fcport; sa_ctl->flags = 0; sa_ctl->state = 0L; ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, "%s: Added sa_ctl %p, index %d, state 0x%lx\n", __func__, sa_ctl, sa_ctl->index, sa_ctl->state); spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); if (dir == SAU_FLG_TX) list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list); else list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list); spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags); return sa_ctl; } void qla_edif_flush_sa_ctl_lists(fc_port_t *fcport) { struct edif_sa_ctl *sa_ctl, *tsa_ctl; unsigned long flags = 0; spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list, next) { list_del(&sa_ctl->next); kfree(sa_ctl); } list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list, next) { list_del(&sa_ctl->next); kfree(sa_ctl); } spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags); } struct edif_sa_ctl * qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir) { struct edif_sa_ctl *sa_ctl, *tsa_ctl; struct list_head *sa_list; if (dir == SAU_FLG_TX) sa_list = &fcport->edif.tx_sa_list; else sa_list = &fcport->edif.rx_sa_list; list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) { if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) && sa_ctl->index == index) return sa_ctl; } return NULL; } /* add the sa to the correct list */ static int qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport, struct qla_sa_update_frame *sa_frame) { struct edif_sa_ctl *sa_ctl = NULL; int dir; uint16_t sa_index; dir = (sa_frame->flags & SAU_FLG_TX); /* map the spi to an sa_index */ sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame); if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) { /* process rx delete */ ql_dbg(ql_dbg_edif, fcport->vha, 0x3063, "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n", __func__, fcport->loop_id, sa_frame->spi); /* build and send the aen */ fcport->edif.rx_sa_set = 1; fcport->edif.rx_sa_pending = 0; qla_edb_eventcreate(fcport->vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, QL_VND_SA_STAT_SUCCESS, QL_VND_RX_SA_KEY, fcport); /* force a return of good bsg status; */ return RX_DELETE_NO_EDIF_SA_INDEX; } else if (sa_index == INVALID_EDIF_SA_INDEX) { ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, "%s: Failed to get sa_index for spi 0x%x, dir: %d\n", __func__, sa_frame->spi, dir); return INVALID_EDIF_SA_INDEX; } ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n", __func__, sa_index, sa_frame->spi, dir, fcport->loop_id); /* This is a local copy of sa_frame. */ sa_frame->fast_sa_index = sa_index; /* create the sa_ctl */ sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir); if (!sa_ctl) { ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n", __func__, sa_frame->spi, dir, sa_index); return -1; } set_bit(EDIF_SA_CTL_USED, &sa_ctl->state); if (dir == SAU_FLG_TX) fcport->edif.tx_rekey_cnt++; else fcport->edif.rx_rekey_cnt++; ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n", __func__, sa_ctl, sa_ctl->index, sa_ctl->state, fcport->edif.tx_rekey_cnt, fcport->edif.rx_rekey_cnt, fcport->loop_id); return 0; } #define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0 #define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2 #define EDIF_MSLEEP_INTERVAL 100 #define EDIF_RETRY_COUNT 50 int qla24xx_sadb_update(struct bsg_job *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); fc_port_t *fcport = NULL; srb_t *sp = NULL; struct edif_list_entry *edif_entry = NULL; int found = 0; int rval = 0; int result = 0, cnt; struct qla_sa_update_frame sa_frame; struct srb_iocb *iocb_cmd; port_id_t portid; ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s entered, vha: 0x%p\n", __func__, vha); sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, &sa_frame, sizeof(struct qla_sa_update_frame)); /* Check if host is online */ if (!vha->flags.online) { ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n"); rval = -EIO; SET_DID_STATUS(bsg_reply->result, DID_ERROR); goto done; } if (DBELL_INACTIVE(vha)) { ql_log(ql_log_warn, vha, 0x70a1, "App not started\n"); rval = -EIO; SET_DID_STATUS(bsg_reply->result, DID_ERROR); goto done; } /* silent unaligned access warning */ portid.b.domain = sa_frame.port_id.b.domain; portid.b.area = sa_frame.port_id.b.area; portid.b.al_pa = sa_frame.port_id.b.al_pa; fcport = qla2x00_find_fcport_by_pid(vha, &portid); if (fcport) { found = 1; if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY) fcport->edif.tx_bytes = 0; if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY) fcport->edif.rx_bytes = 0; } if (!found) { ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n", sa_frame.port_id.b24); rval = -EINVAL; SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT); goto done; } /* make sure the nport_handle is valid */ if (fcport->loop_id == FC_NO_LOOP_ID) { ql_dbg(ql_dbg_edif, vha, 0x70e1, "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n", __func__, fcport->port_name, sa_frame.spi, fcport->disc_state); rval = -EINVAL; SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT); goto done; } /* allocate and queue an sa_ctl */ result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame); /* failure of bsg */ if (result == INVALID_EDIF_SA_INDEX) { ql_dbg(ql_dbg_edif, vha, 0x70e1, "%s: %8phN, skipping update.\n", __func__, fcport->port_name); rval = -EINVAL; SET_DID_STATUS(bsg_reply->result, DID_ERROR); goto done; /* rx delete failure */ } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) { ql_dbg(ql_dbg_edif, vha, 0x70e1, "%s: %8phN, skipping rx delete.\n", __func__, fcport->port_name); SET_DID_STATUS(bsg_reply->result, DID_OK); goto done; } ql_dbg(ql_dbg_edif, vha, 0x70e1, "%s: %8phN, sa_index in sa_frame: %d flags %xh\n", __func__, fcport->port_name, sa_frame.fast_sa_index, sa_frame.flags); /* looking for rx index and delete */ if (((sa_frame.flags & SAU_FLG_TX) == 0) && (sa_frame.flags & SAU_FLG_INV)) { uint16_t nport_handle = fcport->loop_id; uint16_t sa_index = sa_frame.fast_sa_index; /* * make sure we have an existing rx key, otherwise just process * this as a straight delete just like TX * This is NOT a normal case, it indicates an error recovery or key cleanup * by the ipsec code above us. */ edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id); if (!edif_entry) { ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n", __func__, fcport->loop_id, sa_index); goto force_rx_delete; } /* * if we have a forced delete for rx, remove the sa_index from the edif list * and proceed with normal delete. The rx delay timer should not be running */ if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) { qla_edif_list_delete_sa_index(fcport, edif_entry); ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n", __func__, fcport->loop_id, sa_index); kfree(edif_entry); goto force_rx_delete; } /* * delayed rx delete * * if delete_sa_index is not invalid then there is already * a delayed index in progress, return bsg bad status */ if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) { struct edif_sa_ctl *sa_ctl; ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: delete for lid 0x%x, delete_sa_index %d is pending\n", __func__, edif_entry->handle, edif_entry->delete_sa_index); /* free up the sa_ctl that was allocated with the sa_index */ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index, (sa_frame.flags & SAU_FLG_TX)); if (sa_ctl) { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index); qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index); } /* release the sa_index */ ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: freeing sa_index %d, nph: 0x%x\n", __func__, sa_index, nport_handle); qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index); rval = -EINVAL; SET_DID_STATUS(bsg_reply->result, DID_ERROR); goto done; } fcport->edif.rekey_cnt++; /* configure and start the rx delay timer */ edif_entry->fcport = fcport; edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ; ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n", __func__, edif_entry, sa_index, nport_handle); /* * Start the timer when we queue the delayed rx delete. * This is an activity timer that goes off if we have not * received packets with the new sa_index */ add_timer(&edif_entry->timer); /* * sa_delete for rx key with an active rx key including this one * add the delete rx sa index to the hash so we can look for it * in the rsp queue. Do this after making any changes to the * edif_entry as part of the rx delete. */ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n", __func__, sa_index, nport_handle, bsg_job); edif_entry->delete_sa_index = sa_index; bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_reply->result = DID_OK << 16; goto done; /* * rx index and update * add the index to the list and continue with normal update */ } else if (((sa_frame.flags & SAU_FLG_TX) == 0) && ((sa_frame.flags & SAU_FLG_INV) == 0)) { /* sa_update for rx key */ uint32_t nport_handle = fcport->loop_id; uint16_t sa_index = sa_frame.fast_sa_index; int result; /* * add the update rx sa index to the hash so we can look for it * in the rsp queue and continue normally */ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: adding update sa_index %d, lid 0x%x to edif_list\n", __func__, sa_index, nport_handle); result = qla_edif_list_add_sa_update_index(fcport, sa_index, nport_handle); if (result) { ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n", __func__, sa_index, nport_handle); } } if (sa_frame.flags & SAU_FLG_GMAC_MODE) fcport->edif.aes_gmac = 1; else fcport->edif.aes_gmac = 0; force_rx_delete: /* * sa_update for both rx and tx keys, sa_delete for tx key * immediately process the request */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { rval = -ENOMEM; SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); goto done; } sp->type = SRB_SA_UPDATE; sp->name = "bsg_sa_update"; sp->u.bsg_job = bsg_job; /* sp->free = qla2x00_bsg_sp_free; */ sp->free = qla2x00_rel_sp; sp->done = qla2x00_bsg_job_done; iocb_cmd = &sp->u.iocb_cmd; iocb_cmd->u.sa_update.sa_frame = sa_frame; cnt = 0; retry: rval = qla2x00_start_sp(sp); switch (rval) { case QLA_SUCCESS: break; case EAGAIN: msleep(EDIF_MSLEEP_INTERVAL); cnt++; if (cnt < EDIF_RETRY_COUNT) goto retry; fallthrough; default: ql_log(ql_dbg_edif, vha, 0x70e3, "%s qla2x00_start_sp failed=%d.\n", __func__, rval); qla2x00_rel_sp(sp); rval = -EIO; SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); goto done; } ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: %s sent, hdl=%x, portid=%06x.\n", __func__, sp->name, sp->handle, fcport->d_id.b24); fcport->edif.rekey_cnt++; bsg_job->reply_len = sizeof(struct fc_bsg_reply); SET_DID_STATUS(bsg_reply->result, DID_OK); return 0; /* * send back error status */ done: bsg_job->reply_len = sizeof(struct fc_bsg_reply); ql_dbg(ql_dbg_edif, vha, 0x911d, "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n", __func__, bsg_reply->result, bsg_job); bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; } static void qla_enode_free(scsi_qla_host_t *vha, struct enode *node) { node->ntype = N_UNDEF; kfree(node); } /** * qla_enode_init - initialize enode structs & lock * @vha: host adapter pointer * * should only be called when driver attaching */ void qla_enode_init(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; char name[32]; if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) { /* list still active - error */ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n", __func__); return; } /* initialize lock which protects pur_core & init list */ spin_lock_init(&vha->pur_cinfo.pur_lock); INIT_LIST_HEAD(&vha->pur_cinfo.head); snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME, ha->pdev->device); } /** * qla_enode_stop - stop and clear and enode data * @vha: host adapter pointer * * called when app notified it is exiting */ void qla_enode_stop(scsi_qla_host_t *vha) { unsigned long flags; struct enode *node, *q; if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { /* doorbell list not enabled */ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode not active\n", __func__); return; } /* grab lock so list doesn't move */ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */ /* hopefully this is a null list at this point */ list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) { ql_dbg(ql_dbg_edif, vha, 0x910f, "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype, node->dinfo.nodecnt); list_del_init(&node->list); qla_enode_free(vha, node); } spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); } static void qla_enode_clear(scsi_qla_host_t *vha, port_id_t portid) { unsigned long flags; struct enode *e, *tmp; struct purexevent *purex; LIST_HEAD(enode_list); if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode not active\n", __func__); return; } spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); list_for_each_entry_safe(e, tmp, &vha->pur_cinfo.head, list) { purex = &e->u.purexinfo; if (purex->pur_info.pur_sid.b24 == portid.b24) { ql_dbg(ql_dbg_edif, vha, 0x911d, "%s free ELS sid=%06x. xchg %x, nb=%xh\n", __func__, portid.b24, purex->pur_info.pur_rx_xchg_address, purex->pur_info.pur_bytes_rcvd); list_del_init(&e->list); list_add_tail(&e->list, &enode_list); } } spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); list_for_each_entry_safe(e, tmp, &enode_list, list) { list_del_init(&e->list); qla_enode_free(vha, e); } } /* * allocate enode struct and populate buffer * returns: enode pointer with buffers * NULL on error */ static struct enode * qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype) { struct enode *node; struct purexevent *purex; node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC); if (!node) return NULL; purex = &node->u.purexinfo; purex->msgp = (u8 *)(node + 1); purex->msgp_len = ELS_MAX_PAYLOAD; node->ntype = ntype; INIT_LIST_HEAD(&node->list); return node; } static void qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr) { unsigned long flags; ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109, "%s add enode for type=%x, cnt=%x\n", __func__, ptr->ntype, ptr->dinfo.nodecnt); spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); list_add_tail(&ptr->list, &vha->pur_cinfo.head); spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); return; } static struct enode * qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2) { struct enode *node_rtn = NULL; struct enode *list_node, *q; unsigned long flags; uint32_t sid; struct purexevent *purex; /* secure the list from moving under us */ spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) { /* node type determines what p1 and p2 are */ purex = &list_node->u.purexinfo; sid = p1; if (purex->pur_info.pur_sid.b24 == sid) { /* found it and its complete */ node_rtn = list_node; list_del(&list_node->list); break; } } spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); return node_rtn; } /** * qla_pur_get_pending - read/return authentication message sent * from remote port * @vha: host adapter pointer * @fcport: session pointer * @bsg_job: user request where the message is copy to. */ static int qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport, struct bsg_job *bsg_job) { struct enode *ptr; struct purexevent *purex; struct qla_bsg_auth_els_reply *rpl = (struct qla_bsg_auth_els_reply *)bsg_job->reply; bsg_job->reply_len = sizeof(*rpl); ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET); if (!ptr) { ql_dbg(ql_dbg_edif, vha, 0x9111, "%s no enode data found for %8phN sid=%06x\n", __func__, fcport->port_name, fcport->d_id.b24); SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY); return -EIO; } /* * enode is now off the linked list and is ours to deal with */ purex = &ptr->u.purexinfo; /* Copy info back to caller */ rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address; SET_DID_STATUS(rpl->r.result, DID_OK); rpl->r.reply_payload_rcv_len = sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, purex->msgp, purex->pur_info.pur_bytes_rcvd, 0); /* data copy / passback completed - destroy enode */ qla_enode_free(vha, ptr); return 0; } /* it is assume qpair lock is held */ static int qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp, struct qla_els_pt_arg *a) { struct els_entry_24xx *els_iocb; els_iocb = __qla2x00_alloc_iocbs(qp, NULL); if (!els_iocb) { ql_log(ql_log_warn, vha, 0x700c, "qla2x00_alloc_iocbs failed.\n"); return QLA_FUNCTION_FAILED; } qla_els_pt_iocb(vha, els_iocb, a); ql_dbg(ql_dbg_edif, vha, 0x0183, "Sending ELS reject ox_id %04x s:%06x -> d:%06x\n", a->ox_id, a->sid.b24, a->did.b24); ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185, vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c)); /* flush iocb to mem before notifying hw doorbell */ wmb(); qla2x00_start_iocbs(vha, qp->req); return 0; } void qla_edb_init(scsi_qla_host_t *vha) { if (DBELL_ACTIVE(vha)) { /* list already init'd - error */ ql_dbg(ql_dbg_edif, vha, 0x09102, "edif db already initialized, cannot reinit\n"); return; } /* initialize lock which protects doorbell & init list */ spin_lock_init(&vha->e_dbell.db_lock); INIT_LIST_HEAD(&vha->e_dbell.head); } static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid) { unsigned long flags; struct edb_node *e, *tmp; port_id_t sid; LIST_HEAD(edb_list); if (DBELL_INACTIVE(vha)) { /* doorbell list not enabled */ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s doorbell not enabled\n", __func__); return; } /* grab lock so list doesn't move */ spin_lock_irqsave(&vha->e_dbell.db_lock, flags); list_for_each_entry_safe(e, tmp, &vha->e_dbell.head, list) { switch (e->ntype) { case VND_CMD_AUTH_STATE_NEEDED: case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: sid = e->u.plogi_did; break; case VND_CMD_AUTH_STATE_ELS_RCVD: sid = e->u.els_sid; break; case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: /* app wants to see this */ continue; default: ql_log(ql_log_warn, vha, 0x09102, "%s unknown node type: %x\n", __func__, e->ntype); sid.b24 = 0; break; } if (sid.b24 == portid.b24) { ql_dbg(ql_dbg_edif, vha, 0x910f, "%s free doorbell event : node type = %x %p\n", __func__, e->ntype, e); list_del_init(&e->list); list_add_tail(&e->list, &edb_list); } } spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); list_for_each_entry_safe(e, tmp, &edb_list, list) qla_edb_node_free(vha, e); } /* function called when app is stopping */ void qla_edb_stop(scsi_qla_host_t *vha) { unsigned long flags; struct edb_node *node, *q; if (DBELL_INACTIVE(vha)) { /* doorbell list not enabled */ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s doorbell not enabled\n", __func__); return; } /* grab lock so list doesn't move */ spin_lock_irqsave(&vha->e_dbell.db_lock, flags); vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */ /* hopefully this is a null list at this point */ list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) { ql_dbg(ql_dbg_edif, vha, 0x910f, "%s freeing edb_node type=%x\n", __func__, node->ntype); qla_edb_node_free(vha, node); } spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); qla_edif_dbell_bsg_done(vha); } static struct edb_node * qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype) { struct edb_node *node; node = kzalloc(sizeof(*node), GFP_ATOMIC); if (!node) { /* couldn't get space */ ql_dbg(ql_dbg_edif, vha, 0x9100, "edb node unable to be allocated\n"); return NULL; } node->ntype = ntype; INIT_LIST_HEAD(&node->list); return node; } /* adds a already allocated enode to the linked list */ static bool qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr) { unsigned long flags; if (DBELL_INACTIVE(vha)) { /* doorbell list not enabled */ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s doorbell not enabled\n", __func__); return false; } spin_lock_irqsave(&vha->e_dbell.db_lock, flags); list_add_tail(&ptr->list, &vha->e_dbell.head); spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); return true; } /* adds event to doorbell list */ void qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, uint32_t data, uint32_t data2, fc_port_t *sfcport) { struct edb_node *edbnode; fc_port_t *fcport = sfcport; port_id_t id; if (!vha->hw->flags.edif_enabled) { /* edif not enabled */ return; } if (DBELL_INACTIVE(vha)) { if (fcport) fcport->edif.auth_state = dbtype; /* doorbell list not enabled */ ql_dbg(ql_dbg_edif, vha, 0x09102, "%s doorbell not enabled (type=%d\n", __func__, dbtype); return; } edbnode = qla_edb_node_alloc(vha, dbtype); if (!edbnode) { ql_dbg(ql_dbg_edif, vha, 0x09102, "%s unable to alloc db node\n", __func__); return; } if (!fcport) { id.b.domain = (data >> 16) & 0xff; id.b.area = (data >> 8) & 0xff; id.b.al_pa = data & 0xff; ql_dbg(ql_dbg_edif, vha, 0x09222, "%s: Arrived s_id: %06x\n", __func__, id.b24); fcport = qla2x00_find_fcport_by_pid(vha, &id); if (!fcport) { ql_dbg(ql_dbg_edif, vha, 0x09102, "%s can't find fcport for sid= 0x%x - ignoring\n", __func__, id.b24); kfree(edbnode); return; } } /* populate the edb node */ switch (dbtype) { case VND_CMD_AUTH_STATE_NEEDED: case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: edbnode->u.plogi_did.b24 = fcport->d_id.b24; break; case VND_CMD_AUTH_STATE_ELS_RCVD: edbnode->u.els_sid.b24 = fcport->d_id.b24; break; case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: edbnode->u.sa_aen.port_id = fcport->d_id; edbnode->u.sa_aen.status = data; edbnode->u.sa_aen.key_type = data2; edbnode->u.sa_aen.version = EDIF_VERSION1; break; default: ql_dbg(ql_dbg_edif, vha, 0x09102, "%s unknown type: %x\n", __func__, dbtype); kfree(edbnode); edbnode = NULL; break; } if (edbnode) { if (!qla_edb_node_add(vha, edbnode)) { ql_dbg(ql_dbg_edif, vha, 0x09102, "%s unable to add dbnode\n", __func__); kfree(edbnode); return; } ql_dbg(ql_dbg_edif, vha, 0x09102, "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode); qla_edif_dbell_bsg_done(vha); if (fcport) fcport->edif.auth_state = dbtype; } } void qla_edif_timer(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) { if (DBELL_INACTIVE(vha) && ha->edif_post_stop_cnt_down) { ha->edif_post_stop_cnt_down--; /* * turn off auto 'Plogi Acc + secure=1' feature * Set Add FW option[3] * BIT_15, if. */ if (ha->edif_post_stop_cnt_down == 0) { ql_dbg(ql_dbg_async, vha, 0x911d, "%s chip reset to turn off PLOGI ACC + secure\n", __func__); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } } else { ha->edif_post_stop_cnt_down = 60; } } if (vha->e_dbell.dbell_bsg_job && time_after_eq(jiffies, vha->e_dbell.bsg_expire)) qla_edif_dbell_bsg_done(vha); } static void qla_noop_sp_done(srb_t *sp, int res) { sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); } /* * Called from work queue * build and send the sa_update iocb to delete an rx sa_index */ int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e) { srb_t *sp; fc_port_t *fcport = NULL; struct srb_iocb *iocb_cmd = NULL; int rval = QLA_SUCCESS; struct edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl; uint16_t nport_handle = e->u.sa_update.nport_handle; ql_dbg(ql_dbg_edif, vha, 0x70e6, "%s: starting, sa_ctl: %p\n", __func__, sa_ctl); if (!sa_ctl) { ql_dbg(ql_dbg_edif, vha, 0x70e6, "sa_ctl allocation failed\n"); rval = -ENOMEM; return rval; } fcport = sa_ctl->fcport; /* Alloc SRB structure */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { ql_dbg(ql_dbg_edif, vha, 0x70e6, "SRB allocation failed\n"); rval = -ENOMEM; goto done; } fcport->flags |= FCF_ASYNC_SENT; iocb_cmd = &sp->u.iocb_cmd; iocb_cmd->u.sa_update.sa_ctl = sa_ctl; ql_dbg(ql_dbg_edif, vha, 0x3073, "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n", fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle); /* * if this is a sadb cleanup delete, mark it so the isr can * take the correct action */ if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) { /* mark this srb as a cleanup delete */ sp->flags |= SRB_EDIF_CLEANUP_DELETE; ql_dbg(ql_dbg_edif, vha, 0x70e6, "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp); } sp->type = SRB_SA_REPLACE; sp->name = "SA_REPLACE"; sp->fcport = fcport; sp->free = qla2x00_rel_sp; sp->done = qla_noop_sp_done; rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { goto done_free_sp; } return rval; done_free_sp: kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; done: fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb) { int itr = 0; struct scsi_qla_host *vha = sp->vha; struct qla_sa_update_frame *sa_frame = &sp->u.iocb_cmd.u.sa_update.sa_frame; u8 flags = 0; switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) { case 0: ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n", __func__, vha, sa_frame->fast_sa_index); break; case 1: ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n", __func__, vha, sa_frame->fast_sa_index); flags |= SA_FLAG_INVALIDATE; break; case 2: ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n", __func__, vha, sa_frame->fast_sa_index); flags |= SA_FLAG_TX; break; case 3: ql_dbg(ql_dbg_edif, vha, 0x911d, "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n", __func__, vha, sa_frame->fast_sa_index); flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE; break; } sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE; sa_update_iocb->entry_count = 1; sa_update_iocb->sys_define = 0; sa_update_iocb->entry_status = 0; sa_update_iocb->handle = sp->handle; sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id); sa_update_iocb->vp_index = sp->fcport->vha->vp_idx; sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area; sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain; sa_update_iocb->flags = flags; sa_update_iocb->salt = cpu_to_le32(sa_frame->salt); sa_update_iocb->spi = cpu_to_le32(sa_frame->spi); sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index); sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP; if (sp->fcport->edif.aes_gmac) sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC; if (sa_frame->flags & SAU_FLG_KEY256) { sa_update_iocb->sa_control |= SA_CNTL_KEY256; for (itr = 0; itr < 32; itr++) sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr]; } else { sa_update_iocb->sa_control |= SA_CNTL_KEY128; for (itr = 0; itr < 16; itr++) sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr]; } ql_dbg(ql_dbg_edif, vha, 0x921d, "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n", __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1], sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index, sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle, sp->fcport->edif.aes_gmac); if (sa_frame->flags & SAU_FLG_TX) sp->fcport->edif.tx_sa_pending = 1; else sp->fcport->edif.rx_sa_pending = 1; sp->fcport->vha->qla_stats.control_requests++; } void qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb) { struct scsi_qla_host *vha = sp->vha; struct srb_iocb *srb_iocb = &sp->u.iocb_cmd; struct edif_sa_ctl *sa_ctl = srb_iocb->u.sa_update.sa_ctl; uint16_t nport_handle = sp->fcport->loop_id; sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE; sa_update_iocb->entry_count = 1; sa_update_iocb->sys_define = 0; sa_update_iocb->entry_status = 0; sa_update_iocb->handle = sp->handle; sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle); sa_update_iocb->vp_index = sp->fcport->vha->vp_idx; sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area; sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain; /* Invalidate the index. salt, spi, control & key are ignore */ sa_update_iocb->flags = SA_FLAG_INVALIDATE; sa_update_iocb->salt = 0; sa_update_iocb->spi = 0; sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index); sa_update_iocb->sa_control = 0; ql_dbg(ql_dbg_edif, vha, 0x921d, "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n", __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1], sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags, sa_update_iocb->sa_index, sp->handle); sp->fcport->vha->qla_stats.control_requests++; } void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) { struct purex_entry_24xx *p = *pkt; struct enode *ptr; int sid; u16 totlen; struct purexevent *purex; struct scsi_qla_host *host = NULL; int rc; struct fc_port *fcport; struct qla_els_pt_arg a; be_id_t beid; memset(&a, 0, sizeof(a)); a.els_opcode = ELS_AUTH_ELS; a.nport_handle = p->nport_handle; a.rx_xchg_address = p->rx_xchg_addr; a.did.b.domain = p->s_id[2]; a.did.b.area = p->s_id[1]; a.did.b.al_pa = p->s_id[0]; a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt); a.tx_addr = vha->hw->elsrej.cdma; a.vp_idx = vha->vp_idx; a.control_flags = EPD_ELS_RJT; a.ox_id = le16_to_cpu(p->ox_id); sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16); totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE; if (le16_to_cpu(p->status_flags) & 0x8000) { totlen = le16_to_cpu(p->trunc_frame_size); qla_els_reject_iocb(vha, (*rsp)->qpair, &a); __qla_consume_iocb(vha, pkt, rsp); return; } if (totlen > ELS_MAX_PAYLOAD) { ql_dbg(ql_dbg_edif, vha, 0x0910d, "%s WARNING: verbose ELS frame received (totlen=%x)\n", __func__, totlen); qla_els_reject_iocb(vha, (*rsp)->qpair, &a); __qla_consume_iocb(vha, pkt, rsp); return; } if (!vha->hw->flags.edif_enabled) { /* edif support not enabled */ ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n", __func__); qla_els_reject_iocb(vha, (*rsp)->qpair, &a); __qla_consume_iocb(vha, pkt, rsp); return; } ptr = qla_enode_alloc(vha, N_PUREX); if (!ptr) { ql_dbg(ql_dbg_edif, vha, 0x09109, "WARNING: enode alloc failed for sid=%x\n", sid); qla_els_reject_iocb(vha, (*rsp)->qpair, &a); __qla_consume_iocb(vha, pkt, rsp); return; } purex = &ptr->u.purexinfo; purex->pur_info.pur_sid = a.did; purex->pur_info.pur_bytes_rcvd = totlen; purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr); purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle); purex->pur_info.pur_did.b.domain = p->d_id[2]; purex->pur_info.pur_did.b.area = p->d_id[1]; purex->pur_info.pur_did.b.al_pa = p->d_id[0]; purex->pur_info.vp_idx = p->vp_idx; a.sid = purex->pur_info.pur_did; rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp, purex->msgp_len); if (rc) { qla_els_reject_iocb(vha, (*rsp)->qpair, &a); qla_enode_free(vha, ptr); return; } beid.al_pa = purex->pur_info.pur_did.b.al_pa; beid.area = purex->pur_info.pur_did.b.area; beid.domain = purex->pur_info.pur_did.b.domain; host = qla_find_host_by_d_id(vha, beid); if (!host) { ql_log(ql_log_fatal, vha, 0x508b, "%s Drop ELS due to unable to find host %06x\n", __func__, purex->pur_info.pur_did.b24); qla_els_reject_iocb(vha, (*rsp)->qpair, &a); qla_enode_free(vha, ptr); return; } fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid); if (DBELL_INACTIVE(vha)) { ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n", __func__, host->e_dbell.db_flags, fcport ? fcport->d_id.b24 : 0); qla_els_reject_iocb(host, (*rsp)->qpair, &a); qla_enode_free(host, ptr); return; } if (fcport && EDIF_SESSION_DOWN(fcport)) { ql_dbg(ql_dbg_edif, host, 0x13b6, "%s terminate exchange. Send logo to 0x%x\n", __func__, a.did.b24); a.tx_byte_count = a.tx_len = 0; a.tx_addr = 0; a.control_flags = EPD_RX_XCHG; /* EPD_RX_XCHG = terminate cmd */ qla_els_reject_iocb(host, (*rsp)->qpair, &a); qla_enode_free(host, ptr); /* send logo to let remote port knows to tear down session */ fcport->send_els_logo = 1; qlt_schedule_sess_for_deletion(fcport); return; } /* add the local enode to the list */ qla_enode_add(host, ptr); ql_dbg(ql_dbg_edif, host, 0x0910c, "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n", __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24, purex->pur_info.pur_did.b24, purex->pur_info.pur_rx_xchg_address); qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL); } static uint16_t qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir) { struct scsi_qla_host *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; void *sa_id_map; unsigned long flags = 0; u16 sa_index; ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, "%s: entry\n", __func__); if (dir) sa_id_map = ha->edif_tx_sa_id_map; else sa_id_map = ha->edif_rx_sa_id_map; spin_lock_irqsave(&ha->sadb_fp_lock, flags); sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX); if (sa_index >= EDIF_NUM_SA_INDEX) { spin_unlock_irqrestore(&ha->sadb_fp_lock, flags); return INVALID_EDIF_SA_INDEX; } set_bit(sa_index, sa_id_map); spin_unlock_irqrestore(&ha->sadb_fp_lock, flags); if (dir) sa_index += EDIF_TX_SA_INDEX_BASE; ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: index retrieved from free pool %d\n", __func__, sa_index); return sa_index; } /* find an sadb entry for an nport_handle */ static struct edif_sa_index_entry * qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle, struct list_head *sa_list) { struct edif_sa_index_entry *entry; struct edif_sa_index_entry *tentry; struct list_head *indx_list = sa_list; list_for_each_entry_safe(entry, tentry, indx_list, next) { if (entry->handle == nport_handle) return entry; } return NULL; } /* remove an sa_index from the nport_handle and return it to the free pool */ static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle, uint16_t sa_index) { struct edif_sa_index_entry *entry; struct list_head *sa_list; int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1; int slot = 0; int free_slot_count = 0; scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: entry\n", __func__); if (dir) sa_list = &ha->sadb_tx_index_list; else sa_list = &ha->sadb_rx_index_list; entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list); if (!entry) { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: no entry found for nport_handle 0x%x\n", __func__, nport_handle); return -1; } spin_lock_irqsave(&ha->sadb_lock, flags); /* * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic * the other is use at re-key time. */ for (slot = 0; slot < 2; slot++) { if (entry->sa_pair[slot].sa_index == sa_index) { entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX; entry->sa_pair[slot].spi = 0; free_slot_count++; qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index); } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) { free_slot_count++; } } if (free_slot_count == 2) { list_del(&entry->next); kfree(entry); } spin_unlock_irqrestore(&ha->sadb_lock, flags); ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: sa_index %d removed, free_slot_count: %d\n", __func__, sa_index, free_slot_count); return 0; } void qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req, struct sa_update_28xx *pkt) { const char *func = "SA_UPDATE_RESPONSE_IOCB"; srb_t *sp; struct edif_sa_ctl *sa_ctl; int old_sa_deleted = 1; uint16_t nport_handle; struct scsi_qla_host *vha; sp = qla2x00_get_sp_from_handle(v, func, req, pkt); if (!sp) { ql_dbg(ql_dbg_edif, v, 0x3063, "%s: no sp found for pkt\n", __func__); return; } /* use sp->vha due to npiv */ vha = sp->vha; switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) { case 0: ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n", __func__, vha, pkt->sa_index); break; case 1: ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n", __func__, vha, pkt->sa_index); break; case 2: ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n", __func__, vha, pkt->sa_index); break; case 3: ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n", __func__, vha, pkt->sa_index); break; } /* * dig the nport handle out of the iocb, fcport->loop_id can not be trusted * to be correct during cleanup sa_update iocbs. */ nport_handle = sp->fcport->loop_id; ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n", __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info, nport_handle, pkt->sa_index, pkt->flags, sp->handle); /* if rx delete, remove the timer */ if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) == SA_FLAG_INVALIDATE) { struct edif_list_entry *edif_entry; sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle); if (edif_entry) { ql_dbg(ql_dbg_edif, vha, 0x5033, "%s: removing edif_entry %p, new sa_index: 0x%x\n", __func__, edif_entry, pkt->sa_index); qla_edif_list_delete_sa_index(sp->fcport, edif_entry); timer_shutdown(&edif_entry->timer); ql_dbg(ql_dbg_edif, vha, 0x5033, "%s: releasing edif_entry %p, new sa_index: 0x%x\n", __func__, edif_entry, pkt->sa_index); kfree(edif_entry); } } /* * if this is a delete for either tx or rx, make sure it succeeded. * The new_sa_info field should be 0xffff on success */ if (pkt->flags & SA_FLAG_INVALIDATE) old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0; /* Process update and delete the same way */ /* If this is an sadb cleanup delete, bypass sending events to IPSEC */ if (sp->flags & SRB_EDIF_CLEANUP_DELETE) { sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: nph 0x%x, sa_index %d removed from fw\n", __func__, sp->fcport->loop_id, pkt->sa_index); } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) && old_sa_deleted) { /* * Note: Wa are only keeping track of latest SA, * so we know when we can start enableing encryption per I/O. * If all SA's get deleted, let FW reject the IOCB. * TODO: edif: don't set enabled here I think * TODO: edif: prli complete is where it should be set */ ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, "SA(%x)updated for s_id %02x%02x%02x\n", pkt->new_sa_info, pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]); sp->fcport->edif.enable = 1; if (pkt->flags & SA_FLAG_TX) { sp->fcport->edif.tx_sa_set = 1; sp->fcport->edif.tx_sa_pending = 0; qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, QL_VND_SA_STAT_SUCCESS, QL_VND_TX_SA_KEY, sp->fcport); } else { sp->fcport->edif.rx_sa_set = 1; sp->fcport->edif.rx_sa_pending = 0; qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, QL_VND_SA_STAT_SUCCESS, QL_VND_RX_SA_KEY, sp->fcport); } } else { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n", __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info, pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]); if (pkt->flags & SA_FLAG_TX) qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED, QL_VND_TX_SA_KEY, sp->fcport); else qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED, QL_VND_RX_SA_KEY, sp->fcport); } /* for delete, release sa_ctl, sa_index */ if (pkt->flags & SA_FLAG_INVALIDATE) { /* release the sa_ctl */ sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport, le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX)); if (sa_ctl && qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index, (pkt->flags & SA_FLAG_TX)) != NULL) { ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index); qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index); } else { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl); } ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: freeing sa_index %d, nph: 0x%x\n", __func__, le16_to_cpu(pkt->sa_index), nport_handle); qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle, le16_to_cpu(pkt->sa_index)); /* * check for a failed sa_update and remove * the sadb entry. */ } else if (pkt->u.comp_sts) { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: freeing sa_index %d, nph: 0x%x\n", __func__, pkt->sa_index, nport_handle); qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle, le16_to_cpu(pkt->sa_index)); switch (le16_to_cpu(pkt->u.comp_sts)) { case CS_PORT_EDIF_UNAVAIL: case CS_PORT_EDIF_LOGOUT: qlt_schedule_sess_for_deletion(sp->fcport); break; default: break; } } sp->done(sp, 0); } /** * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP * @sp: command to send to the ISP * * Return: non-zero if a failure occurred, else zero. */ int qla28xx_start_scsi_edif(srb_t *sp) { int nseg; unsigned long flags; struct scsi_cmnd *cmd; uint32_t *clr_ptr; uint32_t index, i; uint32_t handle; uint16_t cnt; int16_t req_cnt; uint16_t tot_dsds; __be32 *fcp_dl; uint8_t additional_cdb_len; struct ct6_dsd *ctx; struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct cmd_type_6 *cmd_pkt; struct dsd64 *cur_dsd; uint8_t avail_dsds = 0; struct scatterlist *sg; struct req_que *req = sp->qpair->req; spinlock_t *lock = sp->qpair->qp_lock_ptr; /* Setup device pointers. */ cmd = GET_CMD_SP(sp); /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; /* Send marker if required */ if (vha->marker_needed != 0) { if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x300c, "qla2x00_marker failed for cmd=%p.\n", cmd); return QLA_FUNCTION_FAILED; } vha->marker_needed = 0; } /* Acquire ring specific lock */ spin_lock_irqsave(lock, flags); /* Check for room in outstanding command list. */ handle = req->current_outstanding_cmd; for (index = 1; index < req->num_outstanding_cmds; index++) { handle++; if (handle == req->num_outstanding_cmds) handle = 1; if (!req->outstanding_cmds[handle]) break; } if (index == req->num_outstanding_cmds) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ if (scsi_sg_count(cmd)) { nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), scsi_sg_count(cmd), cmd->sc_data_direction); if (unlikely(!nseg)) goto queuing_error; } else { nseg = 0; } tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = req_cnt; if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : rd_reg_dword(req->req_q_out); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); if (req->cnt < (req_cnt + 2)) goto queuing_error; } if (qla_get_buf(vha, sp->qpair, &sp->u.scmd.buf_dsc)) { ql_log(ql_log_fatal, vha, 0x3011, "Failed to allocate buf for fcp_cmnd for cmd=%p.\n", cmd); goto queuing_error; } sp->flags |= SRB_GOT_BUF; ctx = &sp->u.scmd.ct6_ctx; ctx->fcp_cmnd = sp->u.scmd.buf_dsc.buf; ctx->fcp_cmnd_dma = sp->u.scmd.buf_dsc.buf_dma; if (cmd->cmd_len > 16) { additional_cdb_len = cmd->cmd_len - 16; if ((cmd->cmd_len % 4) != 0) { /* * SCSI command bigger than 16 bytes must be * multiple of 4 */ ql_log(ql_log_warn, vha, 0x3012, "scsi cmd len %d not multiple of 4 for cmd=%p.\n", cmd->cmd_len, cmd); goto queuing_error_fcp_cmnd; } ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; } else { additional_cdb_len = 0; ctx->fcp_cmnd_len = 12 + 16 + 4; } cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; cmd_pkt->handle = make_handle(req->id, handle); /* * Zero out remaining portion of packet. * tagged queuing modifier -- default is TSK_SIMPLE (0). */ clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { cmd_pkt->byte_count = cpu_to_le32(0); goto no_dsds; } /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); vha->qla_stats.output_bytes += scsi_bufflen(cmd); vha->qla_stats.output_requests++; sp->fcport->edif.tx_bytes += scsi_bufflen(cmd); } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); vha->qla_stats.input_bytes += scsi_bufflen(cmd); vha->qla_stats.input_requests++; sp->fcport->edif.rx_bytes += scsi_bufflen(cmd); } cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF); cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA)); /* One DSD is available in the Command Type 6 IOCB */ avail_dsds = 1; cur_dsd = &cmd_pkt->fcp_dsd; /* Load data segments */ scsi_for_each_sg(cmd, sg, tot_dsds, i) { dma_addr_t sle_dma; cont_a64_entry_t *cont_pkt; /* Allocate additional continuation packets? */ if (avail_dsds == 0) { /* * Five DSDs are available in the Continuation * Type 1 IOCB. */ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); cur_dsd = cont_pkt->dsd; avail_dsds = 5; } sle_dma = sg_dma_address(sg); put_unaligned_le64(sle_dma, &cur_dsd->address); cur_dsd->length = cpu_to_le32(sg_dma_len(sg)); cur_dsd++; avail_dsds--; } no_dsds: /* Set NPORT-ID and LUN number*/ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; cmd_pkt->vp_index = sp->vha->vp_idx; cmd_pkt->entry_type = COMMAND_TYPE_6; /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); /* build FCP_CMND IU */ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; if (cmd->sc_data_direction == DMA_TO_DEVICE) ctx->fcp_cmnd->additional_cdb_len |= 1; else if (cmd->sc_data_direction == DMA_FROM_DEVICE) ctx->fcp_cmnd->additional_cdb_len |= 2; /* Populate the FCP_PRIO. */ if (ha->flags.fcp_prio_enabled) ctx->fcp_cmnd->task_attribute |= sp->fcport->fcp_prio << 3; memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + additional_cdb_len); *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address); cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; cmd_pkt->entry_status = 0; /* Build command packet. */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; sp->handle = handle; cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; /* Adjust ring index. */ wmb(); req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else { req->ring_ptr++; } sp->qpair->cmd_cnt++; /* Set chip new ring index. */ wrt_reg_dword(req->req_q_in, req->ring_index); spin_unlock_irqrestore(lock, flags); return QLA_SUCCESS; queuing_error_fcp_cmnd: queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(lock, flags); return QLA_FUNCTION_FAILED; } /********************************************** * edif update/delete sa_index list functions * **********************************************/ /* clear the edif_indx_list for this port */ void qla_edif_list_del(fc_port_t *fcport) { struct edif_list_entry *indx_lst; struct edif_list_entry *tindx_lst; struct list_head *indx_list = &fcport->edif.edif_indx_list; unsigned long flags = 0; spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) { list_del(&indx_lst->next); kfree(indx_lst); } spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); } /****************** * SADB functions * ******************/ /* allocate/retrieve an sa_index for a given spi */ static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame) { struct edif_sa_index_entry *entry; struct list_head *sa_list; uint16_t sa_index; int dir = sa_frame->flags & SAU_FLG_TX; int slot = 0; int free_slot = -1; scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; uint16_t nport_handle = fcport->loop_id; ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: entry fc_port: %p, nport_handle: 0x%x\n", __func__, fcport, nport_handle); if (dir) sa_list = &ha->sadb_tx_index_list; else sa_list = &ha->sadb_rx_index_list; entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list); if (!entry) { if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: rx delete request with no entry\n", __func__); return RX_DELETE_NO_EDIF_SA_INDEX; } /* if there is no entry for this nport, add one */ entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC); if (!entry) return INVALID_EDIF_SA_INDEX; sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir); if (sa_index == INVALID_EDIF_SA_INDEX) { kfree(entry); return INVALID_EDIF_SA_INDEX; } INIT_LIST_HEAD(&entry->next); entry->handle = nport_handle; entry->fcport = fcport; entry->sa_pair[0].spi = sa_frame->spi; entry->sa_pair[0].sa_index = sa_index; entry->sa_pair[1].spi = 0; entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX; spin_lock_irqsave(&ha->sadb_lock, flags); list_add_tail(&entry->next, sa_list); spin_unlock_irqrestore(&ha->sadb_lock, flags); ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n", __func__, nport_handle, sa_frame->spi, sa_index); return sa_index; } spin_lock_irqsave(&ha->sadb_lock, flags); /* see if we already have an entry for this spi */ for (slot = 0; slot < 2; slot++) { if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) { free_slot = slot; } else { if (entry->sa_pair[slot].spi == sa_frame->spi) { spin_unlock_irqrestore(&ha->sadb_lock, flags); ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n", __func__, slot, entry->handle, sa_frame->spi, entry->sa_pair[slot].sa_index); return entry->sa_pair[slot].sa_index; } } } spin_unlock_irqrestore(&ha->sadb_lock, flags); /* both slots are used */ if (free_slot == -1) { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n", __func__, entry->handle, sa_frame->spi); ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: Slot 0 spi: 0x%x sa_index: %d, Slot 1 spi: 0x%x sa_index: %d\n", __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index, entry->sa_pair[1].spi, entry->sa_pair[1].sa_index); return INVALID_EDIF_SA_INDEX; } /* there is at least one free slot, use it */ sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir); if (sa_index == INVALID_EDIF_SA_INDEX) { ql_dbg(ql_dbg_edif, fcport->vha, 0x3063, "%s: empty freepool!!\n", __func__); return INVALID_EDIF_SA_INDEX; } spin_lock_irqsave(&ha->sadb_lock, flags); entry->sa_pair[free_slot].spi = sa_frame->spi; entry->sa_pair[free_slot].sa_index = sa_index; spin_unlock_irqrestore(&ha->sadb_lock, flags); ql_dbg(ql_dbg_edif, fcport->vha, 0x3063, "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n", __func__, free_slot, entry->handle, sa_frame->spi, sa_index); return sa_index; } /* release any sadb entries -- only done at teardown */ void qla_edif_sadb_release(struct qla_hw_data *ha) { struct edif_sa_index_entry *entry, *tmp; list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) { list_del(&entry->next); kfree(entry); } list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) { list_del(&entry->next); kfree(entry); } } /************************** * sadb freepool functions **************************/ /* build the rx and tx sa_index free pools -- only done at fcport init */ int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha) { ha->edif_tx_sa_id_map = kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL); if (!ha->edif_tx_sa_id_map) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0009, "Unable to allocate memory for sadb tx.\n"); return -ENOMEM; } ha->edif_rx_sa_id_map = kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL); if (!ha->edif_rx_sa_id_map) { kfree(ha->edif_tx_sa_id_map); ha->edif_tx_sa_id_map = NULL; ql_log_pci(ql_log_fatal, ha->pdev, 0x0009, "Unable to allocate memory for sadb rx.\n"); return -ENOMEM; } return 0; } /* release the free pool - only done during fcport teardown */ void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha) { kfree(ha->edif_tx_sa_id_map); ha->edif_tx_sa_id_map = NULL; kfree(ha->edif_rx_sa_id_map); ha->edif_rx_sa_id_map = NULL; } static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport, uint32_t handle, uint16_t sa_index) { struct edif_list_entry *edif_entry; struct edif_sa_ctl *sa_ctl; uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX; unsigned long flags = 0; uint16_t nport_handle = fcport->loop_id; uint16_t cached_nport_handle; spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle); if (!edif_entry) { spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); return; /* no pending delete for this handle */ } /* * check for no pending delete for this index or iocb does not * match rx sa_index */ if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX || edif_entry->update_sa_index != sa_index) { spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); return; } /* * wait until we have seen at least EDIF_DELAY_COUNT transfers before * queueing RX delete */ if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) { spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); return; } ql_dbg(ql_dbg_edif, vha, 0x5033, "%s: invalidating delete_sa_index, update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n", __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index); delete_sa_index = edif_entry->delete_sa_index; edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX; cached_nport_handle = edif_entry->handle; spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); /* sanity check on the nport handle */ if (nport_handle != cached_nport_handle) { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n", __func__, nport_handle, cached_nport_handle); } /* find the sa_ctl for the delete and schedule the delete */ sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0); if (sa_ctl) { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n", __func__, sa_ctl, sa_index); ql_dbg(ql_dbg_edif, vha, 0x3063, "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n", delete_sa_index, edif_entry->update_sa_index, nport_handle, handle); sa_ctl->flags = EDIF_SA_CTL_FLG_DEL; set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state); qla_post_sa_replace_work(fcport->vha, fcport, nport_handle, sa_ctl); } else { ql_dbg(ql_dbg_edif, vha, 0x3063, "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n", __func__, delete_sa_index); } } void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, srb_t *sp, struct sts_entry_24xx *sts24) { fc_port_t *fcport = sp->fcport; /* sa_index used by this iocb */ struct scsi_cmnd *cmd = GET_CMD_SP(sp); uint32_t handle; handle = (uint32_t)LSW(sts24->handle); /* find out if this status iosb is for a scsi read */ if (cmd->sc_data_direction != DMA_FROM_DEVICE) return; return __chk_edif_rx_sa_delete_pending(vha, fcport, handle, le16_to_cpu(sts24->edif_sa_index)); } void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport, struct ctio7_from_24xx *pkt) { __chk_edif_rx_sa_delete_pending(vha, fcport, pkt->handle, le16_to_cpu(pkt->edif_sa_index)); } static void qla_parse_auth_els_ctl(struct srb *sp) { struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg; struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job; struct fc_bsg_request *request = bsg_job->request; struct qla_bsg_auth_els_request *p = (struct qla_bsg_auth_els_request *)bsg_job->request; a->tx_len = a->tx_byte_count = sp->remap.req.len; a->tx_addr = sp->remap.req.dma; a->rx_len = a->rx_byte_count = sp->remap.rsp.len; a->rx_addr = sp->remap.rsp.dma; if (p->e.sub_cmd == SEND_ELS_REPLY) { a->control_flags = p->e.extra_control_flags << 13; a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address); if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC) a->els_opcode = ELS_LS_ACC; else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT) a->els_opcode = ELS_LS_RJT; } a->did = sp->fcport->d_id; a->els_opcode = request->rqst_data.h_els.command_code; a->nport_handle = cpu_to_le16(sp->fcport->loop_id); a->vp_idx = sp->vha->vp_idx; } int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; fc_port_t *fcport = NULL; struct qla_hw_data *ha = vha->hw; srb_t *sp; int rval = (DID_ERROR << 16), cnt; port_id_t d_id; struct qla_bsg_auth_els_request *p = (struct qla_bsg_auth_els_request *)bsg_job->request; struct qla_bsg_auth_els_reply *rpl = (struct qla_bsg_auth_els_reply *)bsg_job->reply; rpl->version = EDIF_VERSION1; d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2]; d_id.b.area = bsg_request->rqst_data.h_els.port_id[1]; d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0]; /* find matching d_id in fcport list */ fcport = qla2x00_find_fcport_by_pid(vha, &d_id); if (!fcport) { ql_dbg(ql_dbg_edif, vha, 0x911a, "%s fcport not find online portid=%06x.\n", __func__, d_id.b24); SET_DID_STATUS(bsg_reply->result, DID_ERROR); return -EIO; } if (qla_bsg_check(vha, bsg_job, fcport)) return 0; if (EDIF_SESS_DELETE(fcport)) { ql_dbg(ql_dbg_edif, vha, 0x910d, "%s ELS code %x, no loop id.\n", __func__, bsg_request->rqst_data.r_els.els_code); SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET); return -ENXIO; } if (!vha->flags.online) { ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET); rval = -EIO; goto done; } /* pass through is supported only for ISP 4Gb or higher */ if (!IS_FWI2_CAPABLE(ha)) { ql_dbg(ql_dbg_user, vha, 0x7001, "ELS passthru not supported for ISP23xx based adapters.\n"); SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET); rval = -EPERM; goto done; } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { ql_dbg(ql_dbg_user, vha, 0x7004, "Failed get sp pid=%06x\n", fcport->d_id.b24); rval = -ENOMEM; SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); goto done; } sp->remap.req.len = bsg_job->request_payload.payload_len; sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool, GFP_KERNEL, &sp->remap.req.dma); if (!sp->remap.req.buf) { ql_dbg(ql_dbg_user, vha, 0x7005, "Failed allocate request dma len=%x\n", bsg_job->request_payload.payload_len); rval = -ENOMEM; SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); goto done_free_sp; } sp->remap.rsp.len = bsg_job->reply_payload.payload_len; sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool, GFP_KERNEL, &sp->remap.rsp.dma); if (!sp->remap.rsp.buf) { ql_dbg(ql_dbg_user, vha, 0x7006, "Failed allocate response dma len=%x\n", bsg_job->reply_payload.payload_len); rval = -ENOMEM; SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); goto done_free_remap_req; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, sp->remap.req.buf, sp->remap.req.len); sp->remap.remapped = true; sp->type = SRB_ELS_CMD_HST_NOLOGIN; sp->name = "SPCN_BSG_HST_NOLOGIN"; sp->u.bsg_cmd.bsg_job = bsg_job; qla_parse_auth_els_ctl(sp); sp->free = qla2x00_bsg_sp_free; sp->done = qla2x00_bsg_job_done; cnt = 0; retry: rval = qla2x00_start_sp(sp); switch (rval) { case QLA_SUCCESS: ql_dbg(ql_dbg_edif, vha, 0x700a, "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n", __func__, sc_to_str(p->e.sub_cmd), fcport->port_name, p->e.extra_rx_xchg_address, p->e.extra_control_flags, sp->handle, sp->remap.req.len, bsg_job); break; case EAGAIN: msleep(EDIF_MSLEEP_INTERVAL); cnt++; if (cnt < EDIF_RETRY_COUNT) goto retry; fallthrough; default: ql_log(ql_log_warn, vha, 0x700e, "%s qla2x00_start_sp failed = %d\n", __func__, rval); SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); rval = -EIO; goto done_free_remap_rsp; } return rval; done_free_remap_rsp: dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf, sp->remap.rsp.dma); done_free_remap_req: dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf, sp->remap.req.dma); done_free_sp: qla2x00_rel_sp(sp); done: return rval; } void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess) { u16 cnt = 0; if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) { ql_dbg(ql_dbg_disc, vha, 0xf09c, "%s: sess %8phN send port_offline event\n", __func__, sess->port_name); sess->edif.app_sess_online = 0; sess->edif.sess_down_acked = 0; qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN, sess->d_id.b24, 0, sess); qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24); while (!READ_ONCE(sess->edif.sess_down_acked) && !test_bit(VPORT_DELETE, &vha->dpc_flags)) { msleep(100); cnt++; if (cnt > 100) break; } sess->edif.sess_down_acked = 0; ql_dbg(ql_dbg_disc, vha, 0xf09c, "%s: sess %8phN port_offline event completed\n", __func__, sess->port_name); } } void qla_edif_clear_appdata(struct scsi_qla_host *vha, struct fc_port *fcport) { if (!(fcport->flags & FCF_FCSP_DEVICE)) return; qla_edb_clear(vha, fcport->d_id); qla_enode_clear(vha, fcport->d_id); }
linux-master
drivers/scsi/qla2xxx/qla_edif.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ /* * Table for showing the current message id in use for particular level * Change this table for addition of log/debug messages. * ---------------------------------------------------------------------- * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- * | Module Init and Probe | 0x0199 | | * | Mailbox commands | 0x1206 | 0x11a5-0x11ff | * | Device Discovery | 0x2134 | 0x2112-0x2115 | * | | | 0x2127-0x2128 | * | Queue Command and IO tracing | 0x3074 | 0x300b | * | | | 0x3027-0x3028 | * | | | 0x303d-0x3041 | * | | | 0x302e,0x3033 | * | | | 0x3036,0x3038 | * | | | 0x303a | * | DPC Thread | 0x4023 | 0x4002,0x4013 | * | Async Events | 0x509c | | * | Timer Routines | 0x6012 | | * | User Space Interactions | 0x70e3 | 0x7018,0x702e | * | | | 0x7020,0x7024 | * | | | 0x7039,0x7045 | * | | | 0x7073-0x7075 | * | | | 0x70a5-0x70a6 | * | | | 0x70a8,0x70ab | * | | | 0x70ad-0x70ae | * | | | 0x70d0-0x70d6 | * | | | 0x70d7-0x70db | * | Task Management | 0x8042 | 0x8000 | * | | | 0x8019 | * | | | 0x8025,0x8026 | * | | | 0x8031,0x8032 | * | | | 0x8039,0x803c | * | AER/EEH | 0x9011 | | * | Virtual Port | 0xa007 | | * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 | * | | | 0xb09e,0xb0ae | * | | | 0xb0c3,0xb0c6 | * | | | 0xb0e0-0xb0ef | * | | | 0xb085,0xb0dc | * | | | 0xb107,0xb108 | * | | | 0xb111,0xb11e | * | | | 0xb12c,0xb12d | * | | | 0xb13a,0xb142 | * | | | 0xb13c-0xb140 | * | | | 0xb149 | * | MultiQ | 0xc010 | | * | Misc | 0xd303 | 0xd031-0xd0ff | * | | | 0xd101-0xd1fe | * | | | 0xd214-0xd2fe | * | Target Mode | 0xe081 | | * | Target Mode Management | 0xf09b | 0xf002 | * | | | 0xf046-0xf049 | * | Target Mode Task Management | 0x1000d | | * ---------------------------------------------------------------------- */ #include "qla_def.h" #include <linux/delay.h> #define CREATE_TRACE_POINTS #include <trace/events/qla.h> static uint32_t ql_dbg_offset = 0x800; static inline void qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) { fw_dump->fw_major_version = htonl(ha->fw_major_version); fw_dump->fw_minor_version = htonl(ha->fw_minor_version); fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version); fw_dump->fw_attributes = htonl(ha->fw_attributes); fw_dump->vendor = htonl(ha->pdev->vendor); fw_dump->device = htonl(ha->pdev->device); fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor); fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device); } static inline void * qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr) { struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; /* Request queue. */ memcpy(ptr, req->ring, req->length * sizeof(request_t)); /* Response queue. */ ptr += req->length * sizeof(request_t); memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t)); return ptr + (rsp->length * sizeof(response_t)); } int qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, uint32_t ram_dwords, void **nxt) { struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; dma_addr_t dump_dma = ha->gid_list_dma; uint32_t *chunk = (uint32_t *)ha->gid_list; uint32_t dwords = qla2x00_gid_list_size(ha) / 4; uint32_t stat; ulong i, j, timer = 6000000; int rval = QLA_FUNCTION_FAILED; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); if (qla_pci_disconnected(vha, reg)) return rval; for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { if (i + dwords > ram_dwords) dwords = ram_dwords - i; wrt_reg_word(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM); wrt_reg_word(&reg->mailbox1, LSW(addr)); wrt_reg_word(&reg->mailbox8, MSW(addr)); wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma))); wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma))); wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma))); wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma))); wrt_reg_word(&reg->mailbox4, MSW(dwords)); wrt_reg_word(&reg->mailbox5, LSW(dwords)); wrt_reg_word(&reg->mailbox9, 0); wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT); ha->flags.mbox_int = 0; while (timer--) { udelay(5); if (qla_pci_disconnected(vha, reg)) return rval; stat = rd_reg_dword(&reg->host_status); /* Check for pending interrupts. */ if (!(stat & HSRX_RISC_INT)) continue; stat &= 0xff; if (stat != 0x1 && stat != 0x2 && stat != 0x10 && stat != 0x11) { /* Clear this intr; it wasn't a mailbox intr */ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT); rd_reg_dword(&reg->hccr); continue; } set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); rval = rd_reg_word(&reg->mailbox0) & MBS_MASK; wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT); rd_reg_dword(&reg->hccr); break; } ha->flags.mbox_int = 1; *nxt = ram + i; if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { /* no interrupt, timed out*/ return rval; } if (rval) { /* error completion status */ return rval; } for (j = 0; j < dwords; j++) { ram[i + j] = (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? chunk[j] : swab32(chunk[j]); } } *nxt = ram + i; return QLA_SUCCESS; } int qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram, uint32_t ram_dwords, void **nxt) { int rval = QLA_FUNCTION_FAILED; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; dma_addr_t dump_dma = ha->gid_list_dma; uint32_t *chunk = (uint32_t *)ha->gid_list; uint32_t dwords = qla2x00_gid_list_size(ha) / 4; uint32_t stat; ulong i, j, timer = 6000000; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); if (qla_pci_disconnected(vha, reg)) return rval; for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { if (i + dwords > ram_dwords) dwords = ram_dwords - i; wrt_reg_word(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); wrt_reg_word(&reg->mailbox1, LSW(addr)); wrt_reg_word(&reg->mailbox8, MSW(addr)); wrt_reg_word(&reg->mailbox10, 0); wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma))); wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma))); wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma))); wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma))); wrt_reg_word(&reg->mailbox4, MSW(dwords)); wrt_reg_word(&reg->mailbox5, LSW(dwords)); wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT); ha->flags.mbox_int = 0; while (timer--) { udelay(5); if (qla_pci_disconnected(vha, reg)) return rval; stat = rd_reg_dword(&reg->host_status); /* Check for pending interrupts. */ if (!(stat & HSRX_RISC_INT)) continue; stat &= 0xff; if (stat != 0x1 && stat != 0x2 && stat != 0x10 && stat != 0x11) { wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT); rd_reg_dword(&reg->hccr); continue; } set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); rval = rd_reg_word(&reg->mailbox0) & MBS_MASK; wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT); rd_reg_dword(&reg->hccr); break; } ha->flags.mbox_int = 1; *nxt = ram + i; if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { /* no interrupt, timed out*/ return rval; } if (rval) { /* error completion status */ return rval; } for (j = 0; j < dwords; j++) { ram[i + j] = (__force __be32) ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? chunk[j] : swab32(chunk[j])); } } *nxt = ram + i; return QLA_SUCCESS; } static int qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram, uint32_t cram_size, void **nxt) { int rval; /* Code RAM. */ rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt); if (rval != QLA_SUCCESS) return rval; set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags); /* External Memory. */ rval = qla24xx_dump_ram(ha, 0x100000, *nxt, ha->fw_memory_size - 0x100000 + 1, nxt); if (rval == QLA_SUCCESS) set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags); return rval; } static __be32 * qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, uint32_t count, __be32 *buf) { __le32 __iomem *dmp_reg; wrt_reg_dword(&reg->iobase_addr, iobase); dmp_reg = &reg->iobase_window; for ( ; count--; dmp_reg++) *buf++ = htonl(rd_reg_dword(dmp_reg)); return buf; } void qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha) { wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_PAUSE); /* 100 usec delay is sufficient enough for hardware to pause RISC */ udelay(100); if (rd_reg_dword(&reg->host_status) & HSRX_RISC_PAUSED) set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags); } int qla24xx_soft_reset(struct qla_hw_data *ha) { int rval = QLA_SUCCESS; uint32_t cnt; uint16_t wd; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; /* * Reset RISC. The delay is dependent on system architecture. * Driver can proceed with the reset sequence after waiting * for a timeout period. */ wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); for (cnt = 0; cnt < 30000; cnt++) { if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0) break; udelay(10); } if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE)) set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); wrt_reg_dword(&reg->ctrl_status, CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); udelay(100); /* Wait for soft-reset to complete. */ for (cnt = 0; cnt < 30000; cnt++) { if ((rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET) == 0) break; udelay(10); } if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET)) set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags); wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET); rd_reg_dword(&reg->hccr); /* PCI Posting. */ for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(10); else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); return rval; } static int qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram, uint32_t ram_words, void **nxt) { int rval; uint32_t cnt, stat, timer, words, idx; uint16_t mb0; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; dma_addr_t dump_dma = ha->gid_list_dma; __le16 *dump = (__force __le16 *)ha->gid_list; rval = QLA_SUCCESS; mb0 = 0; WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); words = qla2x00_gid_list_size(ha) / 2; for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS; cnt += words, addr += words) { if (cnt + words > ram_words) words = ram_words - cnt; WRT_MAILBOX_REG(ha, reg, 1, LSW(addr)); WRT_MAILBOX_REG(ha, reg, 8, MSW(addr)); WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma)); WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma)); WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma))); WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma))); WRT_MAILBOX_REG(ha, reg, 4, words); wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT); for (timer = 6000000; timer; timer--) { /* Check for pending interrupts. */ stat = rd_reg_dword(&reg->u.isp2300.host_status); if (stat & HSR_RISC_INT) { stat &= 0xff; if (stat == 0x1 || stat == 0x2) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_MAILBOX_REG(ha, reg, 0); /* Release mailbox registers. */ wrt_reg_word(&reg->semaphore, 0); wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT); rd_reg_word(&reg->hccr); break; } else if (stat == 0x10 || stat == 0x11) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_MAILBOX_REG(ha, reg, 0); wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT); rd_reg_word(&reg->hccr); break; } /* clear this intr; it wasn't a mailbox intr */ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT); rd_reg_word(&reg->hccr); } udelay(5); } if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { rval = mb0 & MBS_MASK; for (idx = 0; idx < words; idx++) ram[cnt + idx] = cpu_to_be16(le16_to_cpu(dump[idx])); } else { rval = QLA_FUNCTION_FAILED; } } *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL; return rval; } static inline void qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, __be16 *buf) { __le16 __iomem *dmp_reg = &reg->u.isp2300.fb_cmd; for ( ; count--; dmp_reg++) *buf++ = htons(rd_reg_word(dmp_reg)); } static inline void * qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr) { if (!ha->eft) return ptr; memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size)); return ptr + ntohl(ha->fw_dump->eft_size); } static inline void * qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) { uint32_t cnt; __be32 *iter_reg; struct qla2xxx_fce_chain *fcec = ptr; if (!ha->fce) return ptr; *last_chain = &fcec->type; fcec->type = htonl(DUMP_CHAIN_FCE); fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + fce_calc_size(ha->fce_bufs)); fcec->size = htonl(fce_calc_size(ha->fce_bufs)); fcec->addr_l = htonl(LSD(ha->fce_dma)); fcec->addr_h = htonl(MSD(ha->fce_dma)); iter_reg = fcec->eregs; for (cnt = 0; cnt < 8; cnt++) *iter_reg++ = htonl(ha->fce_mb[cnt]); memcpy(iter_reg, ha->fce, ntohl(fcec->size)); return (char *)iter_reg + ntohl(fcec->size); } static inline void * qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) { struct qla2xxx_offld_chain *c = ptr; if (!ha->exlogin_buf) return ptr; *last_chain = &c->type; c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN); c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + ha->exlogin_size); c->size = cpu_to_be32(ha->exlogin_size); c->addr = cpu_to_be64(ha->exlogin_buf_dma); ptr += sizeof(struct qla2xxx_offld_chain); memcpy(ptr, ha->exlogin_buf, ha->exlogin_size); return (char *)ptr + be32_to_cpu(c->size); } static inline void * qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) { struct qla2xxx_offld_chain *c = ptr; if (!ha->exchoffld_buf) return ptr; *last_chain = &c->type; c->type = cpu_to_be32(DUMP_CHAIN_EXCHG); c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + ha->exchoffld_size); c->size = cpu_to_be32(ha->exchoffld_size); c->addr = cpu_to_be64(ha->exchoffld_buf_dma); ptr += sizeof(struct qla2xxx_offld_chain); memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size); return (char *)ptr + be32_to_cpu(c->size); } static inline void * qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) { struct qla2xxx_mqueue_chain *q; struct qla2xxx_mqueue_header *qh; uint32_t num_queues; int que; struct { int length; void *ring; } aq, *aqp; if (!ha->tgt.atio_ring) return ptr; num_queues = 1; aqp = &aq; aqp->length = ha->tgt.atio_q_length; aqp->ring = ha->tgt.atio_ring; for (que = 0; que < num_queues; que++) { /* aqp = ha->atio_q_map[que]; */ q = ptr; *last_chain = &q->type; q->type = htonl(DUMP_CHAIN_QUEUE); q->chain_size = htonl( sizeof(struct qla2xxx_mqueue_chain) + sizeof(struct qla2xxx_mqueue_header) + (aqp->length * sizeof(request_t))); ptr += sizeof(struct qla2xxx_mqueue_chain); /* Add header. */ qh = ptr; qh->queue = htonl(TYPE_ATIO_QUEUE); qh->number = htonl(que); qh->size = htonl(aqp->length * sizeof(request_t)); ptr += sizeof(struct qla2xxx_mqueue_header); /* Add data. */ memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t)); ptr += aqp->length * sizeof(request_t); } return ptr; } static inline void * qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) { struct qla2xxx_mqueue_chain *q; struct qla2xxx_mqueue_header *qh; struct req_que *req; struct rsp_que *rsp; int que; if (!ha->mqenable) return ptr; /* Request queues */ for (que = 1; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req) break; /* Add chain. */ q = ptr; *last_chain = &q->type; q->type = htonl(DUMP_CHAIN_QUEUE); q->chain_size = htonl( sizeof(struct qla2xxx_mqueue_chain) + sizeof(struct qla2xxx_mqueue_header) + (req->length * sizeof(request_t))); ptr += sizeof(struct qla2xxx_mqueue_chain); /* Add header. */ qh = ptr; qh->queue = htonl(TYPE_REQUEST_QUEUE); qh->number = htonl(que); qh->size = htonl(req->length * sizeof(request_t)); ptr += sizeof(struct qla2xxx_mqueue_header); /* Add data. */ memcpy(ptr, req->ring, req->length * sizeof(request_t)); ptr += req->length * sizeof(request_t); } /* Response queues */ for (que = 1; que < ha->max_rsp_queues; que++) { rsp = ha->rsp_q_map[que]; if (!rsp) break; /* Add chain. */ q = ptr; *last_chain = &q->type; q->type = htonl(DUMP_CHAIN_QUEUE); q->chain_size = htonl( sizeof(struct qla2xxx_mqueue_chain) + sizeof(struct qla2xxx_mqueue_header) + (rsp->length * sizeof(response_t))); ptr += sizeof(struct qla2xxx_mqueue_chain); /* Add header. */ qh = ptr; qh->queue = htonl(TYPE_RESPONSE_QUEUE); qh->number = htonl(que); qh->size = htonl(rsp->length * sizeof(response_t)); ptr += sizeof(struct qla2xxx_mqueue_header); /* Add data. */ memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t)); ptr += rsp->length * sizeof(response_t); } return ptr; } static inline void * qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) { uint32_t cnt, que_idx; uint8_t que_cnt; struct qla2xxx_mq_chain *mq = ptr; device_reg_t *reg; if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) return ptr; mq = ptr; *last_chain = &mq->type; mq->type = htonl(DUMP_CHAIN_MQ); mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain)); que_cnt = ha->max_req_queues > ha->max_rsp_queues ? ha->max_req_queues : ha->max_rsp_queues; mq->count = htonl(que_cnt); for (cnt = 0; cnt < que_cnt; cnt++) { reg = ISP_QUE_REG(ha, cnt); que_idx = cnt * 4; mq->qregs[que_idx] = htonl(rd_reg_dword(&reg->isp25mq.req_q_in)); mq->qregs[que_idx+1] = htonl(rd_reg_dword(&reg->isp25mq.req_q_out)); mq->qregs[que_idx+2] = htonl(rd_reg_dword(&reg->isp25mq.rsp_q_in)); mq->qregs[que_idx+3] = htonl(rd_reg_dword(&reg->isp25mq.rsp_q_out)); } return ptr + sizeof(struct qla2xxx_mq_chain); } void qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) { struct qla_hw_data *ha = vha->hw; if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xd000, "Failed to dump firmware (%x), dump status flags (0x%lx).\n", rval, ha->fw_dump_cap_flags); ha->fw_dumped = false; } else { ql_log(ql_log_info, vha, 0xd001, "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags); ha->fw_dumped = true; qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); } } void qla2xxx_dump_fw(scsi_qla_host_t *vha) { unsigned long flags; spin_lock_irqsave(&vha->hw->hardware_lock, flags); vha->hw->isp_ops->fw_dump(vha); spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); } /** * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. * @vha: HA context */ void qla2300_fw_dump(scsi_qla_host_t *vha) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; __le16 __iomem *dmp_reg; struct qla2300_fw_dump *fw; void *nxt; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); lockdep_assert_held(&ha->hardware_lock); if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd002, "No buffer available for dump.\n"); return; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd003, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); return; } fw = &ha->fw_dump->isp.isp23; qla2xxx_prep_dump(ha, ha->fw_dump); rval = QLA_SUCCESS; fw->hccr = htons(rd_reg_word(&reg->hccr)); /* Pause RISC. */ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC); if (IS_QLA2300(ha)) { for (cnt = 30000; (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } } else { rd_reg_word(&reg->hccr); /* PCI Posting. */ udelay(10); } if (rval == QLA_SUCCESS) { dmp_reg = &reg->flash_address; for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg)); dmp_reg = &reg->u.isp2300.req_q_in; for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg); cnt++, dmp_reg++) fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg)); dmp_reg = &reg->u.isp2300.mailbox0; for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, dmp_reg++) fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg)); wrt_reg_word(&reg->ctrl_status, 0x40); qla2xxx_read_window(reg, 32, fw->resp_dma_reg); wrt_reg_word(&reg->ctrl_status, 0x50); qla2xxx_read_window(reg, 48, fw->dma_reg); wrt_reg_word(&reg->ctrl_status, 0x00); dmp_reg = &reg->risc_hw; for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++) fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg)); wrt_reg_word(&reg->pcr, 0x2000); qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); wrt_reg_word(&reg->pcr, 0x2200); qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); wrt_reg_word(&reg->pcr, 0x2400); qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); wrt_reg_word(&reg->pcr, 0x2600); qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); wrt_reg_word(&reg->pcr, 0x2800); qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); wrt_reg_word(&reg->pcr, 0x2A00); qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); wrt_reg_word(&reg->pcr, 0x2C00); qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); wrt_reg_word(&reg->pcr, 0x2E00); qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); wrt_reg_word(&reg->ctrl_status, 0x10); qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg); wrt_reg_word(&reg->ctrl_status, 0x20); qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); wrt_reg_word(&reg->ctrl_status, 0x30); qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); /* Reset RISC. */ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET); for (cnt = 0; cnt < 30000; cnt++) { if ((rd_reg_word(&reg->ctrl_status) & CSR_ISP_SOFT_RESET) == 0) break; udelay(10); } } if (!IS_QLA2300(ha)) { for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } } /* Get RISC SRAM. */ if (rval == QLA_SUCCESS) rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram, ARRAY_SIZE(fw->risc_ram), &nxt); /* Get stack SRAM. */ if (rval == QLA_SUCCESS) rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram, ARRAY_SIZE(fw->stack_ram), &nxt); /* Get data SRAM. */ if (rval == QLA_SUCCESS) rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram, ha->fw_memory_size - 0x11000 + 1, &nxt); if (rval == QLA_SUCCESS) qla2xxx_copy_queues(ha, nxt); qla2xxx_dump_post_process(base_vha, rval); } /** * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. * @vha: HA context */ void qla2100_fw_dump(scsi_qla_host_t *vha) { int rval; uint32_t cnt, timer; uint16_t risc_address = 0; uint16_t mb0 = 0, mb2 = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; __le16 __iomem *dmp_reg; struct qla2100_fw_dump *fw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); lockdep_assert_held(&ha->hardware_lock); if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd004, "No buffer available for dump.\n"); return; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd005, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); return; } fw = &ha->fw_dump->isp.isp21; qla2xxx_prep_dump(ha, ha->fw_dump); rval = QLA_SUCCESS; fw->hccr = htons(rd_reg_word(&reg->hccr)); /* Pause RISC. */ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC); for (cnt = 30000; (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) { dmp_reg = &reg->flash_address; for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg)); dmp_reg = &reg->u.isp2100.mailbox0; for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) { if (cnt == 8) dmp_reg = &reg->u_end.isp2200.mailbox8; fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg)); } dmp_reg = &reg->u.isp2100.unused_2[0]; for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++) fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg)); wrt_reg_word(&reg->ctrl_status, 0x00); dmp_reg = &reg->risc_hw; for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++) fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg)); wrt_reg_word(&reg->pcr, 0x2000); qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); wrt_reg_word(&reg->pcr, 0x2100); qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); wrt_reg_word(&reg->pcr, 0x2200); qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); wrt_reg_word(&reg->pcr, 0x2300); qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); wrt_reg_word(&reg->pcr, 0x2400); qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); wrt_reg_word(&reg->pcr, 0x2500); qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); wrt_reg_word(&reg->pcr, 0x2600); qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); wrt_reg_word(&reg->pcr, 0x2700); qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); wrt_reg_word(&reg->ctrl_status, 0x10); qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg); wrt_reg_word(&reg->ctrl_status, 0x20); qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); wrt_reg_word(&reg->ctrl_status, 0x30); qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); /* Reset the ISP. */ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET); } for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } /* Pause RISC. */ if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && (rd_reg_word(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) { wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC); for (cnt = 30000; (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) { /* Set memory configuration and timing. */ if (IS_QLA2100(ha)) wrt_reg_word(&reg->mctr, 0xf1); else wrt_reg_word(&reg->mctr, 0xf2); rd_reg_word(&reg->mctr); /* PCI Posting. */ /* Release RISC. */ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC); } } if (rval == QLA_SUCCESS) { /* Get RISC SRAM. */ risc_address = 0x1000; WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); } for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS; cnt++, risc_address++) { WRT_MAILBOX_REG(ha, reg, 1, risc_address); wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT); for (timer = 6000000; timer != 0; timer--) { /* Check for pending interrupts. */ if (rd_reg_word(&reg->istatus) & ISR_RISC_INT) { if (rd_reg_word(&reg->semaphore) & BIT_0) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_MAILBOX_REG(ha, reg, 0); mb2 = RD_MAILBOX_REG(ha, reg, 2); wrt_reg_word(&reg->semaphore, 0); wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT); rd_reg_word(&reg->hccr); break; } wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT); rd_reg_word(&reg->hccr); } udelay(5); } if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { rval = mb0 & MBS_MASK; fw->risc_ram[cnt] = htons(mb2); } else { rval = QLA_FUNCTION_FAILED; } } if (rval == QLA_SUCCESS) qla2xxx_copy_queues(ha, &fw->queue_dump[0]); qla2xxx_dump_post_process(base_vha, rval); } void qla24xx_fw_dump(scsi_qla_host_t *vha) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; __le32 __iomem *dmp_reg; __be32 *iter_reg; __le16 __iomem *mbx_reg; struct qla24xx_fw_dump *fw; void *nxt; void *nxt_chain; __be32 *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); lockdep_assert_held(&ha->hardware_lock); if (IS_P3P_TYPE(ha)) return; ha->fw_dump_cap_flags = 0; if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd006, "No buffer available for dump.\n"); return; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd007, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); return; } QLA_FW_STOPPED(ha); fw = &ha->fw_dump->isp.isp24; qla2xxx_prep_dump(ha, ha->fw_dump); fw->host_status = htonl(rd_reg_dword(&reg->host_status)); /* * Pause RISC. No need to track timeout, as resetting the chip * is the right approach incase of pause timeout */ qla24xx_pause_risc(reg, ha); /* Host interface registers. */ dmp_reg = &reg->flash_addr; for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); /* Disable interrupts. */ wrt_reg_dword(&reg->ictrl, 0); rd_reg_dword(&reg->ictrl); /* Shadow registers. */ wrt_reg_dword(&reg->iobase_addr, 0x0F70); rd_reg_dword(&reg->iobase_addr); wrt_reg_dword(&reg->iobase_select, 0xB0000000); fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0100000); fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0200000); fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0300000); fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0400000); fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0500000); fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0600000); fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata)); /* Mailbox registers. */ mbx_reg = &reg->mailbox0; for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); qla24xx_read_window(reg, 0xBF70, 16, iter_reg); qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg); qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); /* Receive sequence registers. */ iter_reg = fw->rseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); qla24xx_read_window(reg, 0xFF70, 16, iter_reg); qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg); qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); /* Command DMA registers. */ qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); /* Queues. */ iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); qla24xx_read_window(reg, 0x7610, 16, iter_reg); iter_reg = fw->xmt1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); qla24xx_read_window(reg, 0x7630, 16, iter_reg); iter_reg = fw->xmt2_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); qla24xx_read_window(reg, 0x7650, 16, iter_reg); iter_reg = fw->xmt3_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); qla24xx_read_window(reg, 0x7670, 16, iter_reg); iter_reg = fw->xmt4_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); qla24xx_read_window(reg, 0x7690, 16, iter_reg); qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); /* Receive DMA registers. */ iter_reg = fw->rcvt0_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); qla24xx_read_window(reg, 0x7710, 16, iter_reg); iter_reg = fw->rcvt1_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); qla24xx_read_window(reg, 0x7730, 16, iter_reg); /* RISC registers. */ iter_reg = fw->risc_gp_reg; iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); qla24xx_read_window(reg, 0x0F70, 16, iter_reg); /* Local memory controller registers. */ iter_reg = fw->lmc_reg; iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); qla24xx_read_window(reg, 0x3060, 16, iter_reg); /* Fibre Protocol Module registers. */ iter_reg = fw->fpm_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); qla24xx_read_window(reg, 0x40B0, 16, iter_reg); /* Frame Buffer registers. */ iter_reg = fw->fb_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); qla24xx_read_window(reg, 0x61B0, 16, iter_reg); rval = qla24xx_soft_reset(ha); if (rval != QLA_SUCCESS) goto qla24xx_fw_dump_failed_0; rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), &nxt); if (rval != QLA_SUCCESS) goto qla24xx_fw_dump_failed_0; nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); nxt_chain = (void *)ha->fw_dump + ha->chain_offset; nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); *last_chain |= htonl(DUMP_CHAIN_LAST); } /* Adjust valid length. */ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); qla24xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); } void qla25xx_fw_dump(scsi_qla_host_t *vha) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; __le32 __iomem *dmp_reg; __be32 *iter_reg; __le16 __iomem *mbx_reg; struct qla25xx_fw_dump *fw; void *nxt, *nxt_chain; __be32 *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); lockdep_assert_held(&ha->hardware_lock); ha->fw_dump_cap_flags = 0; if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd008, "No buffer available for dump.\n"); return; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd009, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); return; } QLA_FW_STOPPED(ha); fw = &ha->fw_dump->isp.isp25; qla2xxx_prep_dump(ha, ha->fw_dump); ha->fw_dump->version = htonl(2); fw->host_status = htonl(rd_reg_dword(&reg->host_status)); /* * Pause RISC. No need to track timeout, as resetting the chip * is the right approach incase of pause timeout */ qla24xx_pause_risc(reg, ha); /* Host/Risc registers. */ iter_reg = fw->host_risc_reg; iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); qla24xx_read_window(reg, 0x7010, 16, iter_reg); /* PCIe registers. */ wrt_reg_dword(&reg->iobase_addr, 0x7C00); rd_reg_dword(&reg->iobase_addr); wrt_reg_dword(&reg->iobase_window, 0x01); dmp_reg = &reg->iobase_c4; fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); dmp_reg++; fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); dmp_reg++; fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window)); wrt_reg_dword(&reg->iobase_window, 0x00); rd_reg_dword(&reg->iobase_window); /* Host interface registers. */ dmp_reg = &reg->flash_addr; for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); /* Disable interrupts. */ wrt_reg_dword(&reg->ictrl, 0); rd_reg_dword(&reg->ictrl); /* Shadow registers. */ wrt_reg_dword(&reg->iobase_addr, 0x0F70); rd_reg_dword(&reg->iobase_addr); wrt_reg_dword(&reg->iobase_select, 0xB0000000); fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0100000); fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0200000); fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0300000); fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0400000); fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0500000); fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0600000); fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0700000); fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0800000); fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0900000); fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0A00000); fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata)); /* RISC I/O register. */ wrt_reg_dword(&reg->iobase_addr, 0x0010); fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window)); /* Mailbox registers. */ mbx_reg = &reg->mailbox0; for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); qla24xx_read_window(reg, 0xBF70, 16, iter_reg); iter_reg = fw->xseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); /* Receive sequence registers. */ iter_reg = fw->rseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); qla24xx_read_window(reg, 0xFF70, 16, iter_reg); iter_reg = fw->rseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); /* Auxiliary sequence registers. */ iter_reg = fw->aseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); qla24xx_read_window(reg, 0xB070, 16, iter_reg); iter_reg = fw->aseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); /* Command DMA registers. */ qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); /* Queues. */ iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); qla24xx_read_window(reg, 0x7610, 16, iter_reg); iter_reg = fw->xmt1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); qla24xx_read_window(reg, 0x7630, 16, iter_reg); iter_reg = fw->xmt2_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); qla24xx_read_window(reg, 0x7650, 16, iter_reg); iter_reg = fw->xmt3_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); qla24xx_read_window(reg, 0x7670, 16, iter_reg); iter_reg = fw->xmt4_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); qla24xx_read_window(reg, 0x7690, 16, iter_reg); qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); /* Receive DMA registers. */ iter_reg = fw->rcvt0_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); qla24xx_read_window(reg, 0x7710, 16, iter_reg); iter_reg = fw->rcvt1_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); qla24xx_read_window(reg, 0x7730, 16, iter_reg); /* RISC registers. */ iter_reg = fw->risc_gp_reg; iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); qla24xx_read_window(reg, 0x0F70, 16, iter_reg); /* Local memory controller registers. */ iter_reg = fw->lmc_reg; iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); qla24xx_read_window(reg, 0x3070, 16, iter_reg); /* Fibre Protocol Module registers. */ iter_reg = fw->fpm_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); qla24xx_read_window(reg, 0x40B0, 16, iter_reg); /* Frame Buffer registers. */ iter_reg = fw->fb_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); qla24xx_read_window(reg, 0x6F00, 16, iter_reg); /* Multi queue registers */ nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, &last_chain); rval = qla24xx_soft_reset(ha); if (rval != QLA_SUCCESS) goto qla25xx_fw_dump_failed_0; rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), &nxt); if (rval != QLA_SUCCESS) goto qla25xx_fw_dump_failed_0; nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); /* Chain entries -- started with MQ. */ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); *last_chain |= htonl(DUMP_CHAIN_LAST); } /* Adjust valid length. */ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); qla25xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); } void qla81xx_fw_dump(scsi_qla_host_t *vha) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; __le32 __iomem *dmp_reg; __be32 *iter_reg; __le16 __iomem *mbx_reg; struct qla81xx_fw_dump *fw; void *nxt, *nxt_chain; __be32 *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); lockdep_assert_held(&ha->hardware_lock); ha->fw_dump_cap_flags = 0; if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd00a, "No buffer available for dump.\n"); return; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd00b, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); return; } fw = &ha->fw_dump->isp.isp81; qla2xxx_prep_dump(ha, ha->fw_dump); fw->host_status = htonl(rd_reg_dword(&reg->host_status)); /* * Pause RISC. No need to track timeout, as resetting the chip * is the right approach incase of pause timeout */ qla24xx_pause_risc(reg, ha); /* Host/Risc registers. */ iter_reg = fw->host_risc_reg; iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); qla24xx_read_window(reg, 0x7010, 16, iter_reg); /* PCIe registers. */ wrt_reg_dword(&reg->iobase_addr, 0x7C00); rd_reg_dword(&reg->iobase_addr); wrt_reg_dword(&reg->iobase_window, 0x01); dmp_reg = &reg->iobase_c4; fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); dmp_reg++; fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); dmp_reg++; fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window)); wrt_reg_dword(&reg->iobase_window, 0x00); rd_reg_dword(&reg->iobase_window); /* Host interface registers. */ dmp_reg = &reg->flash_addr; for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); /* Disable interrupts. */ wrt_reg_dword(&reg->ictrl, 0); rd_reg_dword(&reg->ictrl); /* Shadow registers. */ wrt_reg_dword(&reg->iobase_addr, 0x0F70); rd_reg_dword(&reg->iobase_addr); wrt_reg_dword(&reg->iobase_select, 0xB0000000); fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0100000); fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0200000); fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0300000); fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0400000); fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0500000); fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0600000); fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0700000); fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0800000); fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0900000); fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0A00000); fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata)); /* RISC I/O register. */ wrt_reg_dword(&reg->iobase_addr, 0x0010); fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window)); /* Mailbox registers. */ mbx_reg = &reg->mailbox0; for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); qla24xx_read_window(reg, 0xBF70, 16, iter_reg); iter_reg = fw->xseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); /* Receive sequence registers. */ iter_reg = fw->rseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); qla24xx_read_window(reg, 0xFF70, 16, iter_reg); iter_reg = fw->rseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); /* Auxiliary sequence registers. */ iter_reg = fw->aseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); qla24xx_read_window(reg, 0xB070, 16, iter_reg); iter_reg = fw->aseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); /* Command DMA registers. */ qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg); /* Queues. */ iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); qla24xx_read_window(reg, 0x7610, 16, iter_reg); iter_reg = fw->xmt1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); qla24xx_read_window(reg, 0x7630, 16, iter_reg); iter_reg = fw->xmt2_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); qla24xx_read_window(reg, 0x7650, 16, iter_reg); iter_reg = fw->xmt3_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); qla24xx_read_window(reg, 0x7670, 16, iter_reg); iter_reg = fw->xmt4_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); qla24xx_read_window(reg, 0x7690, 16, iter_reg); qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); /* Receive DMA registers. */ iter_reg = fw->rcvt0_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); qla24xx_read_window(reg, 0x7710, 16, iter_reg); iter_reg = fw->rcvt1_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); qla24xx_read_window(reg, 0x7730, 16, iter_reg); /* RISC registers. */ iter_reg = fw->risc_gp_reg; iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); qla24xx_read_window(reg, 0x0F70, 16, iter_reg); /* Local memory controller registers. */ iter_reg = fw->lmc_reg; iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); qla24xx_read_window(reg, 0x3070, 16, iter_reg); /* Fibre Protocol Module registers. */ iter_reg = fw->fpm_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); qla24xx_read_window(reg, 0x40D0, 16, iter_reg); /* Frame Buffer registers. */ iter_reg = fw->fb_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); qla24xx_read_window(reg, 0x6F00, 16, iter_reg); /* Multi queue registers */ nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, &last_chain); rval = qla24xx_soft_reset(ha); if (rval != QLA_SUCCESS) goto qla81xx_fw_dump_failed_0; rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), &nxt); if (rval != QLA_SUCCESS) goto qla81xx_fw_dump_failed_0; nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); /* Chain entries -- started with MQ. */ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); *last_chain |= htonl(DUMP_CHAIN_LAST); } /* Adjust valid length. */ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); qla81xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); } void qla83xx_fw_dump(scsi_qla_host_t *vha) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; __le32 __iomem *dmp_reg; __be32 *iter_reg; __le16 __iomem *mbx_reg; struct qla83xx_fw_dump *fw; void *nxt, *nxt_chain; __be32 *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); lockdep_assert_held(&ha->hardware_lock); ha->fw_dump_cap_flags = 0; if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd00c, "No buffer available for dump!!!\n"); return; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd00d, "Firmware has been previously dumped (%p) -- ignoring " "request...\n", ha->fw_dump); return; } QLA_FW_STOPPED(ha); fw = &ha->fw_dump->isp.isp83; qla2xxx_prep_dump(ha, ha->fw_dump); fw->host_status = htonl(rd_reg_dword(&reg->host_status)); /* * Pause RISC. No need to track timeout, as resetting the chip * is the right approach incase of pause timeout */ qla24xx_pause_risc(reg, ha); wrt_reg_dword(&reg->iobase_addr, 0x6000); dmp_reg = &reg->iobase_window; rd_reg_dword(dmp_reg); wrt_reg_dword(dmp_reg, 0); dmp_reg = &reg->unused_4_1[0]; rd_reg_dword(dmp_reg); wrt_reg_dword(dmp_reg, 0); wrt_reg_dword(&reg->iobase_addr, 0x6010); dmp_reg = &reg->unused_4_1[2]; rd_reg_dword(dmp_reg); wrt_reg_dword(dmp_reg, 0); /* select PCR and disable ecc checking and correction */ wrt_reg_dword(&reg->iobase_addr, 0x0F70); rd_reg_dword(&reg->iobase_addr); wrt_reg_dword(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */ /* Host/Risc registers. */ iter_reg = fw->host_risc_reg; iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg); qla24xx_read_window(reg, 0x7040, 16, iter_reg); /* PCIe registers. */ wrt_reg_dword(&reg->iobase_addr, 0x7C00); rd_reg_dword(&reg->iobase_addr); wrt_reg_dword(&reg->iobase_window, 0x01); dmp_reg = &reg->iobase_c4; fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); dmp_reg++; fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); dmp_reg++; fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window)); wrt_reg_dword(&reg->iobase_window, 0x00); rd_reg_dword(&reg->iobase_window); /* Host interface registers. */ dmp_reg = &reg->flash_addr; for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); /* Disable interrupts. */ wrt_reg_dword(&reg->ictrl, 0); rd_reg_dword(&reg->ictrl); /* Shadow registers. */ wrt_reg_dword(&reg->iobase_addr, 0x0F70); rd_reg_dword(&reg->iobase_addr); wrt_reg_dword(&reg->iobase_select, 0xB0000000); fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0100000); fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0200000); fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0300000); fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0400000); fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0500000); fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0600000); fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0700000); fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0800000); fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0900000); fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata)); wrt_reg_dword(&reg->iobase_select, 0xB0A00000); fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata)); /* RISC I/O register. */ wrt_reg_dword(&reg->iobase_addr, 0x0010); fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window)); /* Mailbox registers. */ mbx_reg = &reg->mailbox0; for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg); qla24xx_read_window(reg, 0xBF70, 16, iter_reg); iter_reg = fw->xseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg); qla24xx_read_window(reg, 0xBFE0, 16, iter_reg); qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg); qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg); /* Receive sequence registers. */ iter_reg = fw->rseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg); qla24xx_read_window(reg, 0xFF70, 16, iter_reg); iter_reg = fw->rseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg); qla24xx_read_window(reg, 0xFFD0, 16, iter_reg); qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg); qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg); qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg); /* Auxiliary sequence registers. */ iter_reg = fw->aseq_gp_reg; iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg); qla24xx_read_window(reg, 0xB170, 16, iter_reg); iter_reg = fw->aseq_0_reg; iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg); qla24xx_read_window(reg, 0xB0D0, 16, iter_reg); qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg); qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg); qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg); /* Command DMA registers. */ iter_reg = fw->cmd_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg); qla24xx_read_window(reg, 0x71F0, 16, iter_reg); /* Queues. */ iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = &reg->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg); qla24xx_read_window(reg, 0x7610, 16, iter_reg); iter_reg = fw->xmt1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg); qla24xx_read_window(reg, 0x7630, 16, iter_reg); iter_reg = fw->xmt2_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg); qla24xx_read_window(reg, 0x7650, 16, iter_reg); iter_reg = fw->xmt3_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg); qla24xx_read_window(reg, 0x7670, 16, iter_reg); iter_reg = fw->xmt4_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg); qla24xx_read_window(reg, 0x7690, 16, iter_reg); qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg); /* Receive DMA registers. */ iter_reg = fw->rcvt0_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg); qla24xx_read_window(reg, 0x7710, 16, iter_reg); iter_reg = fw->rcvt1_data_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg); qla24xx_read_window(reg, 0x7730, 16, iter_reg); /* RISC registers. */ iter_reg = fw->risc_gp_reg; iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg); qla24xx_read_window(reg, 0x0F70, 16, iter_reg); /* Local memory controller registers. */ iter_reg = fw->lmc_reg; iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg); qla24xx_read_window(reg, 0x3070, 16, iter_reg); /* Fibre Protocol Module registers. */ iter_reg = fw->fpm_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg); qla24xx_read_window(reg, 0x40F0, 16, iter_reg); /* RQ0 Array registers. */ iter_reg = fw->rq0_array_reg; iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg); qla24xx_read_window(reg, 0x5CF0, 16, iter_reg); /* RQ1 Array registers. */ iter_reg = fw->rq1_array_reg; iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg); qla24xx_read_window(reg, 0x5DF0, 16, iter_reg); /* RP0 Array registers. */ iter_reg = fw->rp0_array_reg; iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg); qla24xx_read_window(reg, 0x5EF0, 16, iter_reg); /* RP1 Array registers. */ iter_reg = fw->rp1_array_reg; iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg); qla24xx_read_window(reg, 0x5FF0, 16, iter_reg); iter_reg = fw->at0_array_reg; iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg); qla24xx_read_window(reg, 0x70F0, 16, iter_reg); /* I/O Queue Control registers. */ qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg); /* Frame Buffer registers. */ iter_reg = fw->fb_hdw_reg; iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg); iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg); qla24xx_read_window(reg, 0x6F00, 16, iter_reg); /* Multi queue registers */ nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset, &last_chain); rval = qla24xx_soft_reset(ha); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xd00e, "SOFT RESET FAILED, forcing continuation of dump!!!\n"); rval = QLA_SUCCESS; ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n"); wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET); rd_reg_dword(&reg->hccr); wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE); rd_reg_dword(&reg->hccr); wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET); rd_reg_dword(&reg->hccr); for (cnt = 30000; cnt && (rd_reg_word(&reg->mailbox0)); cnt--) udelay(5); if (!cnt) { nxt = fw->code_ram; nxt += sizeof(fw->code_ram); nxt += (ha->fw_memory_size - 0x100000 + 1); goto copy_queue; } else { set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); ql_log(ql_log_warn, vha, 0xd010, "bigger hammer success?\n"); } } rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), &nxt); if (rval != QLA_SUCCESS) goto qla83xx_fw_dump_failed_0; copy_queue: nxt = qla2xxx_copy_queues(ha, nxt); qla24xx_copy_eft(ha, nxt); /* Chain entries -- started with MQ. */ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain); nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain); if (last_chain) { ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); *last_chain |= htonl(DUMP_CHAIN_LAST); } /* Adjust valid length. */ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump); qla83xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); } /****************************************************************************/ /* Driver Debug Functions. */ /****************************************************************************/ /* Write the debug message prefix into @pbuf. */ static void ql_dbg_prefix(char *pbuf, int pbuf_size, struct pci_dev *pdev, const scsi_qla_host_t *vha, uint msg_id) { if (vha) { const struct pci_dev *pdev = vha->hw->pdev; /* <module-name> [<dev-name>]-<msg-id>:<host>: */ snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR, dev_name(&(pdev->dev)), msg_id, vha->host_no); } else if (pdev) { snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR, dev_name(&pdev->dev), msg_id); } else { /* <module-name> [<dev-name>]-<msg-id>: : */ snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR, "0000:00:00.0", msg_id); } } /* * This function is for formatting and logging debug information. * It is to be used when vha is available. It formats the message * and logs it to the messages file. * parameters: * level: The level of the debug messages to be printed. * If ql2xextended_error_logging value is correctly set, * this message will appear in the messages file. * vha: Pointer to the scsi_qla_host_t. * id: This is a unique identifier for the level. It identifies the * part of the code from where the message originated. * msg: The message to be displayed. */ void ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) { va_list va; struct va_format vaf; char pbuf[64]; ql_ktrace(1, level, pbuf, NULL, vha, id, fmt); if (!ql_mask_match(level)) return; if (!pbuf[0]) /* set by ql_ktrace */ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id); va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; pr_warn("%s%pV", pbuf, &vaf); va_end(va); } /* * This function is for formatting and logging debug information. * It is to be used when vha is not available and pci is available, * i.e., before host allocation. It formats the message and logs it * to the messages file. * parameters: * level: The level of the debug messages to be printed. * If ql2xextended_error_logging value is correctly set, * this message will appear in the messages file. * pdev: Pointer to the struct pci_dev. * id: This is a unique id for the level. It identifies the part * of the code from where the message originated. * msg: The message to be displayed. */ void ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) { va_list va; struct va_format vaf; char pbuf[128]; if (pdev == NULL) return; ql_ktrace(1, level, pbuf, pdev, NULL, id, fmt); if (!ql_mask_match(level)) return; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; if (!pbuf[0]) /* set by ql_ktrace */ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL, id + ql_dbg_offset); pr_warn("%s%pV", pbuf, &vaf); va_end(va); } /* * This function is for formatting and logging log messages. * It is to be used when vha is available. It formats the message * and logs it to the messages file. All the messages will be logged * irrespective of value of ql2xextended_error_logging. * parameters: * level: The level of the log messages to be printed in the * messages file. * vha: Pointer to the scsi_qla_host_t * id: This is a unique id for the level. It identifies the * part of the code from where the message originated. * msg: The message to be displayed. */ void ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) { va_list va; struct va_format vaf; char pbuf[128]; if (level > ql_errlev) return; ql_ktrace(0, level, pbuf, NULL, vha, id, fmt); if (!pbuf[0]) /* set by ql_ktrace */ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id); va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; switch (level) { case ql_log_fatal: /* FATAL LOG */ pr_crit("%s%pV", pbuf, &vaf); break; case ql_log_warn: pr_err("%s%pV", pbuf, &vaf); break; case ql_log_info: pr_warn("%s%pV", pbuf, &vaf); break; default: pr_info("%s%pV", pbuf, &vaf); break; } va_end(va); } /* * This function is for formatting and logging log messages. * It is to be used when vha is not available and pci is available, * i.e., before host allocation. It formats the message and logs * it to the messages file. All the messages are logged irrespective * of the value of ql2xextended_error_logging. * parameters: * level: The level of the log messages to be printed in the * messages file. * pdev: Pointer to the struct pci_dev. * id: This is a unique id for the level. It identifies the * part of the code from where the message originated. * msg: The message to be displayed. */ void ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) { va_list va; struct va_format vaf; char pbuf[128]; if (pdev == NULL) return; if (level > ql_errlev) return; ql_ktrace(0, level, pbuf, pdev, NULL, id, fmt); if (!pbuf[0]) /* set by ql_ktrace */ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL, id); va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; switch (level) { case ql_log_fatal: /* FATAL LOG */ pr_crit("%s%pV", pbuf, &vaf); break; case ql_log_warn: pr_err("%s%pV", pbuf, &vaf); break; case ql_log_info: pr_warn("%s%pV", pbuf, &vaf); break; default: pr_info("%s%pV", pbuf, &vaf); break; } va_end(va); } void ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id) { int i; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; __le16 __iomem *mbx_reg; if (!ql_mask_match(level)) return; if (IS_P3P_TYPE(ha)) mbx_reg = &reg82->mailbox_in[0]; else if (IS_FWI2_CAPABLE(ha)) mbx_reg = &reg24->mailbox0; else mbx_reg = MAILBOX_REG(ha, reg, 0); ql_dbg(level, vha, id, "Mailbox registers:\n"); for (i = 0; i < 6; i++, mbx_reg++) ql_dbg(level, vha, id, "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg)); } void ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf, uint size) { uint cnt; if (!ql_mask_match(level)) return; ql_dbg(level, vha, id, "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size); ql_dbg(level, vha, id, "----- -----------------------------------------------\n"); for (cnt = 0; cnt < size; cnt += 16) { ql_dbg(level, vha, id, "%04x: ", cnt); print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, buf + cnt, min(16U, size - cnt), false); } } /* * This function is for formatting and logging log messages. * It is to be used when vha is available. It formats the message * and logs it to the messages file. All the messages will be logged * irrespective of value of ql2xextended_error_logging. * parameters: * level: The level of the log messages to be printed in the * messages file. * vha: Pointer to the scsi_qla_host_t * id: This is a unique id for the level. It identifies the * part of the code from where the message originated. * msg: The message to be displayed. */ void ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, const char *fmt, ...) { va_list va; struct va_format vaf; char pbuf[128]; if (level > ql_errlev) return; ql_ktrace(0, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt); if (!pbuf[0]) /* set by ql_ktrace */ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, qpair ? qpair->vha : NULL, id); va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; switch (level) { case ql_log_fatal: /* FATAL LOG */ pr_crit("%s%pV", pbuf, &vaf); break; case ql_log_warn: pr_err("%s%pV", pbuf, &vaf); break; case ql_log_info: pr_warn("%s%pV", pbuf, &vaf); break; default: pr_info("%s%pV", pbuf, &vaf); break; } va_end(va); } /* * This function is for formatting and logging debug information. * It is to be used when vha is available. It formats the message * and logs it to the messages file. * parameters: * level: The level of the debug messages to be printed. * If ql2xextended_error_logging value is correctly set, * this message will appear in the messages file. * vha: Pointer to the scsi_qla_host_t. * id: This is a unique identifier for the level. It identifies the * part of the code from where the message originated. * msg: The message to be displayed. */ void ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, const char *fmt, ...) { va_list va; struct va_format vaf; char pbuf[128]; ql_ktrace(1, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt); if (!ql_mask_match(level)) return; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; if (!pbuf[0]) /* set by ql_ktrace */ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, qpair ? qpair->vha : NULL, id + ql_dbg_offset); pr_warn("%s%pV", pbuf, &vaf); va_end(va); }
linux-master
drivers/scsi/qla2xxx/qla_dbg.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2017 QLogic Corporation */ #include "qla_nvme.h" #include <linux/scatterlist.h> #include <linux/delay.h> #include <linux/nvme.h> #include <linux/nvme-fc.h> #include <linux/blk-mq-pci.h> #include <linux/blk-mq.h> static struct nvme_fc_port_template qla_nvme_fc_transport; static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp, struct qla_nvme_lsrjt_pt_arg *a, bool is_xchg_terminate); struct qla_nvme_unsol_ctx { struct list_head elem; struct scsi_qla_host *vha; struct fc_port *fcport; struct srb *sp; struct nvmefc_ls_rsp lsrsp; struct nvmefc_ls_rsp *fd_rsp; struct work_struct lsrsp_work; struct work_struct abort_work; __le32 exchange_address; __le16 nport_handle; __le16 ox_id; int comp_status; spinlock_t cmd_lock; }; int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) { struct qla_nvme_rport *rport; struct nvme_fc_port_info req; int ret; if (!IS_ENABLED(CONFIG_NVME_FC)) return 0; if (!vha->flags.nvme_enabled) { ql_log(ql_log_info, vha, 0x2100, "%s: Not registering target since Host NVME is not enabled\n", __func__); return 0; } if (!vha->nvme_local_port && qla_nvme_register_hba(vha)) return 0; if (!(fcport->nvme_prli_service_param & (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) || (fcport->nvme_flag & NVME_FLAG_REGISTERED)) return 0; fcport->nvme_flag &= ~NVME_FLAG_RESETTING; memset(&req, 0, sizeof(struct nvme_fc_port_info)); req.port_name = wwn_to_u64(fcport->port_name); req.node_name = wwn_to_u64(fcport->node_name); req.port_role = 0; req.dev_loss_tmo = fcport->dev_loss_tmo; if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR) req.port_role = FC_PORT_ROLE_NVME_INITIATOR; if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET) req.port_role |= FC_PORT_ROLE_NVME_TARGET; if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY) req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; req.port_id = fcport->d_id.b24; ql_log(ql_log_info, vha, 0x2102, "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n", __func__, req.node_name, req.port_name, req.port_id); ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req, &fcport->nvme_remote_port); if (ret) { ql_log(ql_log_warn, vha, 0x212e, "Failed to register remote port. Transport returned %d\n", ret); return ret; } nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, fcport->dev_loss_tmo); if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER) ql_log(ql_log_info, vha, 0x212a, "PortID:%06x Supports SLER\n", req.port_id); if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL) ql_log(ql_log_info, vha, 0x212b, "PortID:%06x Supports PI control\n", req.port_id); rport = fcport->nvme_remote_port->private; rport->fcport = fcport; fcport->nvme_flag |= NVME_FLAG_REGISTERED; return 0; } /* Allocate a queue for NVMe traffic */ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, unsigned int qidx, u16 qsize, void **handle) { struct scsi_qla_host *vha; struct qla_hw_data *ha; struct qla_qpair *qpair; /* Map admin queue and 1st IO queue to index 0 */ if (qidx) qidx--; vha = (struct scsi_qla_host *)lport->private; ha = vha->hw; ql_log(ql_log_info, vha, 0x2104, "%s: handle %p, idx =%d, qsize %d\n", __func__, handle, qidx, qsize); if (qidx > qla_nvme_fc_transport.max_hw_queues) { ql_log(ql_log_warn, vha, 0x212f, "%s: Illegal qidx=%d. Max=%d\n", __func__, qidx, qla_nvme_fc_transport.max_hw_queues); return -EINVAL; } /* Use base qpair if max_qpairs is 0 */ if (!ha->max_qpairs) { qpair = ha->base_qpair; } else { if (ha->queue_pair_map[qidx]) { *handle = ha->queue_pair_map[qidx]; ql_log(ql_log_info, vha, 0x2121, "Returning existing qpair of %p for idx=%x\n", *handle, qidx); return 0; } qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true); if (!qpair) { ql_log(ql_log_warn, vha, 0x2122, "Failed to allocate qpair\n"); return -EINVAL; } qla_adjust_iocb_limit(vha); } *handle = qpair; return 0; } static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) { struct srb *sp = container_of(kref, struct srb, cmd_kref); struct nvme_private *priv = (struct nvme_private *)sp->priv; struct nvmefc_fcp_req *fd; struct srb_iocb *nvme; unsigned long flags; if (!priv) goto out; nvme = &sp->u.iocb_cmd; fd = nvme->u.nvme.desc; spin_lock_irqsave(&priv->cmd_lock, flags); priv->sp = NULL; sp->priv = NULL; if (priv->comp_status == QLA_SUCCESS) { fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len); fd->status = NVME_SC_SUCCESS; } else { fd->rcv_rsplen = 0; fd->transferred_length = 0; fd->status = NVME_SC_INTERNAL; } spin_unlock_irqrestore(&priv->cmd_lock, flags); fd->done(fd); out: qla2xxx_rel_qpair_sp(sp->qpair, sp); } static void qla_nvme_release_ls_cmd_kref(struct kref *kref) { struct srb *sp = container_of(kref, struct srb, cmd_kref); struct nvme_private *priv = (struct nvme_private *)sp->priv; struct nvmefc_ls_req *fd; unsigned long flags; if (!priv) goto out; spin_lock_irqsave(&priv->cmd_lock, flags); priv->sp = NULL; sp->priv = NULL; spin_unlock_irqrestore(&priv->cmd_lock, flags); fd = priv->fd; fd->done(fd, priv->comp_status); out: qla2x00_rel_sp(sp); } static void qla_nvme_ls_complete(struct work_struct *work) { struct nvme_private *priv = container_of(work, struct nvme_private, ls_work); kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref); } static void qla_nvme_sp_ls_done(srb_t *sp, int res) { struct nvme_private *priv = sp->priv; if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0)) return; if (res) res = -EINVAL; priv->comp_status = res; INIT_WORK(&priv->ls_work, qla_nvme_ls_complete); schedule_work(&priv->ls_work); } static void qla_nvme_release_lsrsp_cmd_kref(struct kref *kref) { struct srb *sp = container_of(kref, struct srb, cmd_kref); struct qla_nvme_unsol_ctx *uctx = sp->priv; struct nvmefc_ls_rsp *fd_rsp; unsigned long flags; if (!uctx) { qla2x00_rel_sp(sp); return; } spin_lock_irqsave(&uctx->cmd_lock, flags); uctx->sp = NULL; sp->priv = NULL; spin_unlock_irqrestore(&uctx->cmd_lock, flags); fd_rsp = uctx->fd_rsp; list_del(&uctx->elem); fd_rsp->done(fd_rsp); kfree(uctx); qla2x00_rel_sp(sp); } static void qla_nvme_lsrsp_complete(struct work_struct *work) { struct qla_nvme_unsol_ctx *uctx = container_of(work, struct qla_nvme_unsol_ctx, lsrsp_work); kref_put(&uctx->sp->cmd_kref, qla_nvme_release_lsrsp_cmd_kref); } static void qla_nvme_sp_lsrsp_done(srb_t *sp, int res) { struct qla_nvme_unsol_ctx *uctx = sp->priv; if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0)) return; if (res) res = -EINVAL; uctx->comp_status = res; INIT_WORK(&uctx->lsrsp_work, qla_nvme_lsrsp_complete); schedule_work(&uctx->lsrsp_work); } /* it assumed that QPair lock is held. */ static void qla_nvme_sp_done(srb_t *sp, int res) { struct nvme_private *priv = sp->priv; priv->comp_status = res; kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref); return; } static void qla_nvme_abort_work(struct work_struct *work) { struct nvme_private *priv = container_of(work, struct nvme_private, abort_work); srb_t *sp = priv->sp; fc_port_t *fcport = sp->fcport; struct qla_hw_data *ha = fcport->vha->hw; int rval, abts_done_called = 1; bool io_wait_for_abort_done; uint32_t handle; ql_dbg(ql_dbg_io, fcport->vha, 0xffff, "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n", __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted); if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED) goto out; if (ha->flags.host_shutting_down) { ql_log(ql_log_info, sp->fcport->vha, 0xffff, "%s Calling done on sp: %p, type: 0x%x\n", __func__, sp, sp->type); sp->done(sp, 0); goto out; } /* * sp may not be valid after abort_command if return code is either * SUCCESS or ERR_FROM_FW codes, so cache the value here. */ io_wait_for_abort_done = ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp); handle = sp->handle; rval = ha->isp_ops->abort_command(sp); ql_dbg(ql_dbg_io, fcport->vha, 0x212b, "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n", __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted", sp, handle, fcport, rval); /* * If async tmf is enabled, the abort callback is called only on * return codes QLA_SUCCESS and QLA_ERR_FROM_FW. */ if (ql2xasynctmfenable && rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW) abts_done_called = 0; /* * Returned before decreasing kref so that I/O requests * are waited until ABTS complete. This kref is decreased * at qla24xx_abort_sp_done function. */ if (abts_done_called && io_wait_for_abort_done) return; out: /* kref_get was done before work was schedule. */ kref_put(&sp->cmd_kref, sp->put_fn); } static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport, struct nvme_fc_remote_port *rport, struct nvmefc_ls_rsp *fd_resp) { struct qla_nvme_unsol_ctx *uctx = container_of(fd_resp, struct qla_nvme_unsol_ctx, lsrsp); struct qla_nvme_rport *qla_rport = rport->private; fc_port_t *fcport = qla_rport->fcport; struct scsi_qla_host *vha = uctx->vha; struct qla_hw_data *ha = vha->hw; struct qla_nvme_lsrjt_pt_arg a; struct srb_iocb *nvme; srb_t *sp; int rval = QLA_FUNCTION_FAILED; uint8_t cnt = 0; if (!fcport || fcport->deleted) goto out; if (!ha->flags.fw_started) goto out; /* Alloc SRB structure */ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); if (!sp) goto out; sp->type = SRB_NVME_LS; sp->name = "nvme_ls"; sp->done = qla_nvme_sp_lsrsp_done; sp->put_fn = qla_nvme_release_lsrsp_cmd_kref; sp->priv = (void *)uctx; sp->unsol_rsp = 1; uctx->sp = sp; spin_lock_init(&uctx->cmd_lock); nvme = &sp->u.iocb_cmd; uctx->fd_rsp = fd_resp; nvme->u.nvme.desc = fd_resp; nvme->u.nvme.dir = 0; nvme->u.nvme.dl = 0; nvme->u.nvme.timeout_sec = 0; nvme->u.nvme.cmd_dma = fd_resp->rspdma; nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen); nvme->u.nvme.rsp_len = 0; nvme->u.nvme.rsp_dma = 0; nvme->u.nvme.exchange_address = uctx->exchange_address; nvme->u.nvme.nport_handle = uctx->nport_handle; nvme->u.nvme.ox_id = uctx->ox_id; dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, fd_resp->rsplen, DMA_TO_DEVICE); ql_dbg(ql_dbg_unsol, vha, 0x2122, "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n", fcport->d_id.b24, fcport->port_name, uctx->exchange_address, uctx->ox_id, uctx->nport_handle); retry: rval = qla2x00_start_sp(sp); switch (rval) { case QLA_SUCCESS: break; case EAGAIN: msleep(PURLS_MSLEEP_INTERVAL); cnt++; if (cnt < PURLS_RETRY_COUNT) goto retry; fallthrough; default: ql_dbg(ql_log_warn, vha, 0x2123, "Failed to xmit Unsol ls response = %d\n", rval); rval = -EIO; qla2x00_rel_sp(sp); goto out; } return 0; out: memset((void *)&a, 0, sizeof(a)); a.vp_idx = vha->vp_idx; a.nport_handle = uctx->nport_handle; a.xchg_address = uctx->exchange_address; qla_nvme_ls_reject_iocb(vha, ha->base_qpair, &a, true); kfree(uctx); return rval; } static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport, struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) { struct nvme_private *priv = fd->private; unsigned long flags; spin_lock_irqsave(&priv->cmd_lock, flags); if (!priv->sp) { spin_unlock_irqrestore(&priv->cmd_lock, flags); return; } if (!kref_get_unless_zero(&priv->sp->cmd_kref)) { spin_unlock_irqrestore(&priv->cmd_lock, flags); return; } spin_unlock_irqrestore(&priv->cmd_lock, flags); INIT_WORK(&priv->abort_work, qla_nvme_abort_work); schedule_work(&priv->abort_work); } static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) { struct qla_nvme_rport *qla_rport = rport->private; fc_port_t *fcport = qla_rport->fcport; struct srb_iocb *nvme; struct nvme_private *priv = fd->private; struct scsi_qla_host *vha; int rval = QLA_FUNCTION_FAILED; struct qla_hw_data *ha; srb_t *sp; if (!fcport || fcport->deleted) return rval; vha = fcport->vha; ha = vha->hw; if (!ha->flags.fw_started) return rval; /* Alloc SRB structure */ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); if (!sp) return rval; sp->type = SRB_NVME_LS; sp->name = "nvme_ls"; sp->done = qla_nvme_sp_ls_done; sp->put_fn = qla_nvme_release_ls_cmd_kref; sp->priv = priv; priv->sp = sp; kref_init(&sp->cmd_kref); spin_lock_init(&priv->cmd_lock); nvme = &sp->u.iocb_cmd; priv->fd = fd; nvme->u.nvme.desc = fd; nvme->u.nvme.dir = 0; nvme->u.nvme.dl = 0; nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen); nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen); nvme->u.nvme.rsp_dma = fd->rspdma; nvme->u.nvme.timeout_sec = fd->timeout; nvme->u.nvme.cmd_dma = fd->rqstdma; dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, fd->rqstlen, DMA_TO_DEVICE); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, "qla2x00_start_sp failed = %d\n", rval); sp->priv = NULL; priv->sp = NULL; qla2x00_rel_sp(sp); return rval; } return rval; } static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport, struct nvme_fc_remote_port *rport, void *hw_queue_handle, struct nvmefc_fcp_req *fd) { struct nvme_private *priv = fd->private; unsigned long flags; spin_lock_irqsave(&priv->cmd_lock, flags); if (!priv->sp) { spin_unlock_irqrestore(&priv->cmd_lock, flags); return; } if (!kref_get_unless_zero(&priv->sp->cmd_kref)) { spin_unlock_irqrestore(&priv->cmd_lock, flags); return; } spin_unlock_irqrestore(&priv->cmd_lock, flags); INIT_WORK(&priv->abort_work, qla_nvme_abort_work); schedule_work(&priv->abort_work); } static inline int qla2x00_start_nvme_mq(srb_t *sp) { unsigned long flags; uint32_t *clr_ptr; uint32_t handle; struct cmd_nvme *cmd_pkt; uint16_t cnt, i; uint16_t req_cnt; uint16_t tot_dsds; uint16_t avail_dsds; struct dsd64 *cur_dsd; struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; struct qla_qpair *qpair = sp->qpair; struct srb_iocb *nvme = &sp->u.iocb_cmd; struct scatterlist *sgl, *sg; struct nvmefc_fcp_req *fd = nvme->u.nvme.desc; struct nvme_fc_cmd_iu *cmd = fd->cmdaddr; uint32_t rval = QLA_SUCCESS; /* Setup qpair pointers */ req = qpair->req; rsp = qpair->rsp; tot_dsds = fd->sg_cnt; /* Acquire qpair specific lock */ spin_lock_irqsave(&qpair->qp_lock, flags); handle = qla2xxx_get_next_handle(req); if (handle == 0) { rval = -EBUSY; goto queuing_error; } req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = req_cnt; if (qla_get_fw_resources(sp->qpair, &sp->iores)) { rval = -EBUSY; goto queuing_error; } if (req->cnt < (req_cnt + 2)) { if (IS_SHADOW_REG_CAPABLE(ha)) { cnt = *req->out_ptr; } else { cnt = rd_reg_dword_relaxed(req->req_q_out); if (qla2x00_check_reg16_for_disconnect(vha, cnt)) { rval = -EBUSY; goto queuing_error; } } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else req->cnt = req->length - (req->ring_index - cnt); if (req->cnt < (req_cnt + 2)){ rval = -EBUSY; goto queuing_error; } } if (unlikely(!fd->sqid)) { if (cmd->sqe.common.opcode == nvme_admin_async_event) { nvme->u.nvme.aen_op = 1; atomic_inc(&ha->nvme_active_aen_cnt); } } /* Build command packet. */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; sp->handle = handle; req->cnt -= req_cnt; cmd_pkt = (struct cmd_nvme *)req->ring_ptr; cmd_pkt->handle = make_handle(req->id, handle); /* Zero out remaining portion of packet. */ clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); cmd_pkt->entry_status = 0; /* Update entry type to indicate Command NVME IOCB */ cmd_pkt->entry_type = COMMAND_NVME; /* No data transfer how do we check buffer len == 0?? */ if (fd->io_dir == NVMEFC_FCP_READ) { cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); qpair->counters.input_bytes += fd->payload_length; qpair->counters.input_requests++; } else if (fd->io_dir == NVMEFC_FCP_WRITE) { cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); if ((vha->flags.nvme_first_burst) && (sp->fcport->nvme_prli_service_param & NVME_PRLI_SP_FIRST_BURST)) { if ((fd->payload_length <= sp->fcport->nvme_first_burst_size) || (sp->fcport->nvme_first_burst_size == 0)) cmd_pkt->control_flags |= cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE); } qpair->counters.output_bytes += fd->payload_length; qpair->counters.output_requests++; } else if (fd->io_dir == 0) { cmd_pkt->control_flags = 0; } if (sp->fcport->edif.enable && fd->io_dir != 0) cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF); /* Set BIT_13 of control flags for Async event */ if (vha->flags.nvme2_enabled && cmd->sqe.common.opcode == nvme_admin_async_event) { cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT); } /* Set NPORT-ID */ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; cmd_pkt->vp_index = sp->fcport->vha->vp_idx; /* NVME RSP IU */ cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen); put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address); /* NVME CNMD IU */ cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen); cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma); cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); cmd_pkt->byte_count = cpu_to_le32(fd->payload_length); /* One DSD is available in the Command Type NVME IOCB */ avail_dsds = 1; cur_dsd = &cmd_pkt->nvme_dsd; sgl = fd->first_sgl; /* Load data segments */ for_each_sg(sgl, sg, tot_dsds, i) { cont_a64_entry_t *cont_pkt; /* Allocate additional continuation packets? */ if (avail_dsds == 0) { /* * Five DSDs are available in the Continuation * Type 1 IOCB. */ /* Adjust ring index */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else { req->ring_ptr++; } cont_pkt = (cont_a64_entry_t *)req->ring_ptr; put_unaligned_le32(CONTINUE_A64_TYPE, &cont_pkt->entry_type); cur_dsd = cont_pkt->dsd; avail_dsds = ARRAY_SIZE(cont_pkt->dsd); } append_dsd64(&cur_dsd, sg); avail_dsds--; } /* Set total entry count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; wmb(); /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else { req->ring_ptr++; } /* ignore nvme async cmd due to long timeout */ if (!nvme->u.nvme.aen_op) sp->qpair->cmd_cnt++; /* Set chip new ring index. */ wrt_reg_dword(req->req_q_in, req->ring_index); if (vha->flags.process_response_queue && rsp->ring_ptr->signature != RESPONSE_PROCESSED) qla24xx_process_response_queue(vha, rsp); queuing_error: if (rval) qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&qpair->qp_lock, flags); return rval; } /* Post a command */ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, struct nvme_fc_remote_port *rport, void *hw_queue_handle, struct nvmefc_fcp_req *fd) { fc_port_t *fcport; struct srb_iocb *nvme; struct scsi_qla_host *vha; struct qla_hw_data *ha; int rval; srb_t *sp; struct qla_qpair *qpair = hw_queue_handle; struct nvme_private *priv = fd->private; struct qla_nvme_rport *qla_rport = rport->private; if (!priv) { /* nvme association has been torn down */ return -ENODEV; } fcport = qla_rport->fcport; if (unlikely(!qpair || !fcport || fcport->deleted)) return -EBUSY; if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)) return -ENODEV; vha = fcport->vha; ha = vha->hw; if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) return -EBUSY; /* * If we know the dev is going away while the transport is still sending * IO's return busy back to stall the IO Q. This happens when the * link goes away and fw hasn't notified us yet, but IO's are being * returned. If the dev comes back quickly we won't exhaust the IO * retry count at the core. */ if (fcport->nvme_flag & NVME_FLAG_RESETTING) return -EBUSY; qpair = qla_mapq_nvme_select_qpair(ha, qpair); /* Alloc SRB structure */ sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC); if (!sp) return -EBUSY; kref_init(&sp->cmd_kref); spin_lock_init(&priv->cmd_lock); sp->priv = priv; priv->sp = sp; sp->type = SRB_NVME_CMD; sp->name = "nvme_cmd"; sp->done = qla_nvme_sp_done; sp->put_fn = qla_nvme_release_fcp_cmd_kref; sp->qpair = qpair; sp->vha = vha; sp->cmd_sp = sp; nvme = &sp->u.iocb_cmd; nvme->u.nvme.desc = fd; rval = qla2x00_start_nvme_mq(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d, "qla2x00_start_nvme_mq failed = %d\n", rval); sp->priv = NULL; priv->sp = NULL; qla2xxx_rel_qpair_sp(sp->qpair, sp); } return rval; } static void qla_nvme_map_queues(struct nvme_fc_local_port *lport, struct blk_mq_queue_map *map) { struct scsi_qla_host *vha = lport->private; blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset); } static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) { struct scsi_qla_host *vha = lport->private; ql_log(ql_log_info, vha, 0x210f, "localport delete of %p completed.\n", vha->nvme_local_port); vha->nvme_local_port = NULL; complete(&vha->nvme_del_done); } static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) { fc_port_t *fcport; struct qla_nvme_rport *qla_rport = rport->private; fcport = qla_rport->fcport; fcport->nvme_remote_port = NULL; fcport->nvme_flag &= ~NVME_FLAG_REGISTERED; fcport->nvme_flag &= ~NVME_FLAG_DELETING; ql_log(ql_log_info, fcport->vha, 0x2110, "remoteport_delete of %p %8phN completed.\n", fcport, fcport->port_name); complete(&fcport->nvme_del_done); } static struct nvme_fc_port_template qla_nvme_fc_transport = { .localport_delete = qla_nvme_localport_delete, .remoteport_delete = qla_nvme_remoteport_delete, .create_queue = qla_nvme_alloc_queue, .delete_queue = NULL, .ls_req = qla_nvme_ls_req, .ls_abort = qla_nvme_ls_abort, .fcp_io = qla_nvme_post_cmd, .fcp_abort = qla_nvme_fcp_abort, .xmt_ls_rsp = qla_nvme_xmt_ls_rsp, .map_queues = qla_nvme_map_queues, .max_hw_queues = DEF_NVME_HW_QUEUES, .max_sgl_segments = 1024, .max_dif_sgl_segments = 64, .dma_boundary = 0xFFFFFFFF, .local_priv_sz = 8, .remote_priv_sz = sizeof(struct qla_nvme_rport), .lsrqst_priv_sz = sizeof(struct nvme_private), .fcprqst_priv_sz = sizeof(struct nvme_private), }; void qla_nvme_unregister_remote_port(struct fc_port *fcport) { int ret; if (!IS_ENABLED(CONFIG_NVME_FC)) return; ql_log(ql_log_warn, fcport->vha, 0x2112, "%s: unregister remoteport on %p %8phN\n", __func__, fcport, fcport->port_name); if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags)) nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); init_completion(&fcport->nvme_del_done); ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port); if (ret) ql_log(ql_log_info, fcport->vha, 0x2114, "%s: Failed to unregister nvme_remote_port (%d)\n", __func__, ret); wait_for_completion(&fcport->nvme_del_done); } void qla_nvme_delete(struct scsi_qla_host *vha) { int nv_ret; if (!IS_ENABLED(CONFIG_NVME_FC)) return; if (vha->nvme_local_port) { init_completion(&vha->nvme_del_done); ql_log(ql_log_info, vha, 0x2116, "unregister localport=%p\n", vha->nvme_local_port); nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port); if (nv_ret) ql_log(ql_log_info, vha, 0x2115, "Unregister of localport failed\n"); else wait_for_completion(&vha->nvme_del_done); } } int qla_nvme_register_hba(struct scsi_qla_host *vha) { struct nvme_fc_port_template *tmpl; struct qla_hw_data *ha; struct nvme_fc_port_info pinfo; int ret = -EINVAL; if (!IS_ENABLED(CONFIG_NVME_FC)) return ret; ha = vha->hw; tmpl = &qla_nvme_fc_transport; if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) { ql_log(ql_log_warn, vha, 0xfffd, "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n", ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES); ql2xnvme_queues = DEF_NVME_HW_QUEUES; } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) { ql_log(ql_log_warn, vha, 0xfffd, "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n", ql2xnvme_queues, (ha->max_qpairs - 1), (ha->max_qpairs - 1)); ql2xnvme_queues = ((ha->max_qpairs - 1)); } qla_nvme_fc_transport.max_hw_queues = min((uint8_t)(ql2xnvme_queues), (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1)); ql_log(ql_log_info, vha, 0xfffb, "Number of NVME queues used for this port: %d\n", qla_nvme_fc_transport.max_hw_queues); pinfo.node_name = wwn_to_u64(vha->node_name); pinfo.port_name = wwn_to_u64(vha->port_name); pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR; pinfo.port_id = vha->d_id.b24; mutex_lock(&ha->vport_lock); /* * Check again for nvme_local_port to see if any other thread raced * with this one and finished registration. */ if (!vha->nvme_local_port) { ql_log(ql_log_info, vha, 0xffff, "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n", pinfo.node_name, pinfo.port_name, pinfo.port_id); qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary; ret = nvme_fc_register_localport(&pinfo, tmpl, get_device(&ha->pdev->dev), &vha->nvme_local_port); mutex_unlock(&ha->vport_lock); } else { mutex_unlock(&ha->vport_lock); return 0; } if (ret) { ql_log(ql_log_warn, vha, 0xffff, "register_localport failed: ret=%x\n", ret); } else { vha->nvme_local_port->private = vha; } return ret; } void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp) { struct qla_hw_data *ha; if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) return; ha = orig_sp->fcport->vha->hw; WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0)); /* Use Driver Specified Retry Count */ abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT); abt->drv.abts_rty_cnt = cpu_to_le16(2); /* Use specified response timeout */ abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT); /* set it to 2 * r_a_tov in secs */ abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10)); } void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp) { u16 comp_status; struct scsi_qla_host *vha; if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) return; vha = orig_sp->fcport->vha; comp_status = le16_to_cpu(abt->comp_status); switch (comp_status) { case CS_RESET: /* reset event aborted */ case CS_ABORTED: /* IOCB was cleaned */ /* N_Port handle is not currently logged in */ case CS_TIMEOUT: /* N_Port handle was logged out while waiting for ABTS to complete */ case CS_PORT_UNAVAILABLE: /* Firmware found that the port name changed */ case CS_PORT_LOGGED_OUT: /* BA_RJT was received for the ABTS */ case CS_PORT_CONFIG_CHG: ql_dbg(ql_dbg_async, vha, 0xf09d, "Abort I/O IOCB completed with error, comp_status=%x\n", comp_status); break; /* BA_RJT was received for the ABTS */ case CS_REJECT_RECEIVED: ql_dbg(ql_dbg_async, vha, 0xf09e, "BA_RJT was received for the ABTS rjt_vendorUnique = %u", abt->fw.ba_rjt_vendorUnique); ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e, "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n", abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode); break; case CS_COMPLETE: ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f, "IOCB request is completed successfully comp_status=%x\n", comp_status); break; case CS_IOCB_ERROR: ql_dbg(ql_dbg_async, vha, 0xf0a0, "IOCB request is failed, comp_status=%x\n", comp_status); break; default: ql_dbg(ql_dbg_async, vha, 0xf0a1, "Invalid Abort IO IOCB Completion Status %x\n", comp_status); break; } } inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp) { if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) return; kref_put(&orig_sp->cmd_kref, orig_sp->put_fn); } static void qla_nvme_fc_format_rjt(void *buf, u8 ls_cmd, u8 reason, u8 explanation, u8 vendor) { struct fcnvme_ls_rjt *rjt = buf; rjt->w0.ls_cmd = FCNVME_LSDESC_RQST; rjt->desc_list_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)); rjt->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST); rjt->rqst.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)); rjt->rqst.w0.ls_cmd = ls_cmd; rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT); rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt)); rjt->rjt.reason_code = reason; rjt->rjt.reason_explanation = explanation; rjt->rjt.vendor = vendor; } static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host *vha, struct pt_ls4_request *lsrjt_iocb, struct qla_nvme_lsrjt_pt_arg *a) { lsrjt_iocb->entry_type = PT_LS4_REQUEST; lsrjt_iocb->entry_count = 1; lsrjt_iocb->sys_define = 0; lsrjt_iocb->entry_status = 0; lsrjt_iocb->handle = QLA_SKIP_HANDLE; lsrjt_iocb->nport_handle = a->nport_handle; lsrjt_iocb->exchange_address = a->xchg_address; lsrjt_iocb->vp_index = a->vp_idx; lsrjt_iocb->control_flags = cpu_to_le16(a->control_flags); put_unaligned_le64(a->tx_addr, &lsrjt_iocb->dsd[0].address); lsrjt_iocb->dsd[0].length = cpu_to_le32(a->tx_byte_count); lsrjt_iocb->tx_dseg_count = cpu_to_le16(1); lsrjt_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count); put_unaligned_le64(a->rx_addr, &lsrjt_iocb->dsd[1].address); lsrjt_iocb->dsd[1].length = 0; lsrjt_iocb->rx_dseg_count = 0; lsrjt_iocb->rx_byte_count = 0; } static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp, struct qla_nvme_lsrjt_pt_arg *a, bool is_xchg_terminate) { struct pt_ls4_request *lsrjt_iocb; lsrjt_iocb = __qla2x00_alloc_iocbs(qp, NULL); if (!lsrjt_iocb) { ql_log(ql_log_warn, vha, 0x210e, "qla2x00_alloc_iocbs failed.\n"); return QLA_FUNCTION_FAILED; } if (!is_xchg_terminate) { qla_nvme_fc_format_rjt((void *)vha->hw->lsrjt.c, a->opcode, a->reason, a->explanation, 0); a->tx_byte_count = sizeof(struct fcnvme_ls_rjt); a->tx_addr = vha->hw->lsrjt.cdma; a->control_flags = CF_LS4_RESPONDER << CF_LS4_SHIFT; ql_dbg(ql_dbg_unsol, vha, 0x211f, "Sending nvme fc ls reject ox_id %04x op %04x\n", a->ox_id, a->opcode); ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x210f, vha->hw->lsrjt.c, sizeof(*vha->hw->lsrjt.c)); } else { a->tx_byte_count = 0; a->control_flags = CF_LS4_RESPONDER_TERM << CF_LS4_SHIFT; ql_dbg(ql_dbg_unsol, vha, 0x2110, "Terminate nvme ls xchg 0x%x\n", a->xchg_address); } qla_nvme_lsrjt_pt_iocb(vha, lsrjt_iocb, a); /* flush iocb to mem before notifying hw doorbell */ wmb(); qla2x00_start_iocbs(vha, qp->req); return 0; } /* * qla2xxx_process_purls_pkt() - Pass-up Unsolicited * Received FC-NVMe Link Service pkt to nvme_fc_rcv_ls_req(). * LLDD need to provide memory for response buffer, which * will be used to reference the exchange corresponding * to the LS when issuing an ls response. LLDD will have to free * response buffer in lport->ops->xmt_ls_rsp(). * * @vha: SCSI qla host * @item: ptr to purex_item */ static void qla2xxx_process_purls_pkt(struct scsi_qla_host *vha, struct purex_item *item) { struct qla_nvme_unsol_ctx *uctx = item->purls_context; struct qla_nvme_lsrjt_pt_arg a; int ret = 1; #if (IS_ENABLED(CONFIG_NVME_FC)) ret = nvme_fc_rcv_ls_req(uctx->fcport->nvme_remote_port, &uctx->lsrsp, &item->iocb, item->size); #endif if (ret) { ql_dbg(ql_dbg_unsol, vha, 0x2125, "NVMe transport ls_req failed\n"); memset((void *)&a, 0, sizeof(a)); a.vp_idx = vha->vp_idx; a.nport_handle = uctx->nport_handle; a.xchg_address = uctx->exchange_address; qla_nvme_ls_reject_iocb(vha, vha->hw->base_qpair, &a, true); list_del(&uctx->elem); kfree(uctx); } } static scsi_qla_host_t * qla2xxx_get_vha_from_vp_idx(struct qla_hw_data *ha, uint16_t vp_index) { scsi_qla_host_t *base_vha, *vha, *tvp; unsigned long flags; base_vha = pci_get_drvdata(ha->pdev); if (!vp_index && !ha->num_vhosts) return base_vha; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) { if (vha->vp_idx == vp_index) { spin_unlock_irqrestore(&ha->vport_slock, flags); return vha; } } spin_unlock_irqrestore(&ha->vport_slock, flags); return NULL; } void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp) { struct nvme_fc_remote_port *rport; struct qla_nvme_rport *qla_rport; struct qla_nvme_lsrjt_pt_arg a; struct pt_ls4_rx_unsol *p = *pkt; struct qla_nvme_unsol_ctx *uctx; struct rsp_que *rsp_q = *rsp; struct qla_hw_data *ha; scsi_qla_host_t *vha; fc_port_t *fcport = NULL; struct purex_item *item; port_id_t d_id = {0}; port_id_t id = {0}; u8 *opcode; bool xmt_reject = false; ha = rsp_q->hw; vha = qla2xxx_get_vha_from_vp_idx(ha, p->vp_index); if (!vha) { ql_log(ql_log_warn, NULL, 0x2110, "Invalid vp index %d\n", p->vp_index); WARN_ON_ONCE(1); return; } memset((void *)&a, 0, sizeof(a)); opcode = (u8 *)&p->payload[0]; a.opcode = opcode[3]; a.vp_idx = p->vp_index; a.nport_handle = p->nport_handle; a.ox_id = p->ox_id; a.xchg_address = p->exchange_address; id.b.domain = p->s_id.domain; id.b.area = p->s_id.area; id.b.al_pa = p->s_id.al_pa; d_id.b.domain = p->d_id[2]; d_id.b.area = p->d_id[1]; d_id.b.al_pa = p->d_id[0]; fcport = qla2x00_find_fcport_by_nportid(vha, &id, 0); if (!fcport) { ql_dbg(ql_dbg_unsol, vha, 0x211e, "Failed to find sid=%06x did=%06x\n", id.b24, d_id.b24); a.reason = FCNVME_RJT_RC_INV_ASSOC; a.explanation = FCNVME_RJT_EXP_NONE; xmt_reject = true; goto out; } rport = fcport->nvme_remote_port; qla_rport = rport->private; item = qla27xx_copy_multiple_pkt(vha, pkt, rsp, true, false); if (!item) { a.reason = FCNVME_RJT_RC_LOGIC; a.explanation = FCNVME_RJT_EXP_NONE; xmt_reject = true; goto out; } uctx = kzalloc(sizeof(*uctx), GFP_ATOMIC); if (!uctx) { ql_log(ql_log_info, vha, 0x2126, "Failed allocate memory\n"); a.reason = FCNVME_RJT_RC_LOGIC; a.explanation = FCNVME_RJT_EXP_NONE; xmt_reject = true; kfree(item); goto out; } uctx->vha = vha; uctx->fcport = fcport; uctx->exchange_address = p->exchange_address; uctx->nport_handle = p->nport_handle; uctx->ox_id = p->ox_id; qla_rport->uctx = uctx; INIT_LIST_HEAD(&uctx->elem); list_add_tail(&uctx->elem, &fcport->unsol_ctx_head); item->purls_context = (void *)uctx; ql_dbg(ql_dbg_unsol, vha, 0x2121, "PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n", item->iocb.iocb[3], item->size, uctx->exchange_address, fcport->d_id.b24); /* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F * ----- ----------------------------------------------- * 0000: 00 00 00 05 28 00 00 00 07 00 00 00 08 00 00 00 * 0010: ab ec 0f cc 00 00 8d 7d 05 00 00 00 10 00 00 00 * 0020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 */ ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x2120, &item->iocb, item->size); qla24xx_queue_purex_item(vha, item, qla2xxx_process_purls_pkt); out: if (xmt_reject) { qla_nvme_ls_reject_iocb(vha, (*rsp)->qpair, &a, false); __qla_consume_iocb(vha, pkt, rsp); } }
linux-master
drivers/scsi/qla2xxx/qla_nvme.c
/* bnx2i_hwi.c: QLogic NetXtreme II iSCSI driver. * * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * Copyright (c) 2014, QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Anil Veerabhadrappa ([email protected]) * Previously Maintained by: Eddie Wai ([email protected]) * Maintained by: [email protected] */ #include <linux/gfp.h> #include <scsi/scsi_tcq.h> #include <scsi/libiscsi.h> #include "bnx2i.h" DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); /** * bnx2i_get_cid_num - get cid from ep * @ep: endpoint pointer * * Only applicable to 57710 family of devices */ static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep) { u32 cid; if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) cid = ep->ep_cid; else cid = GET_CID_NUM(ep->ep_cid); return cid; } /** * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type * @hba: Adapter for which adjustments is to be made * * Only applicable to 57710 family of devices */ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) { u32 num_elements_per_pg; if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) || test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) || test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { if (!is_power_of_2(hba->max_sqes)) hba->max_sqes = rounddown_pow_of_two(hba->max_sqes); if (!is_power_of_2(hba->max_rqes)) hba->max_rqes = rounddown_pow_of_two(hba->max_rqes); } /* Adjust each queue size if the user selection does not * yield integral num of page buffers */ /* adjust SQ */ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; if (hba->max_sqes < num_elements_per_pg) hba->max_sqes = num_elements_per_pg; else if (hba->max_sqes % num_elements_per_pg) hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) & ~(num_elements_per_pg - 1); /* adjust CQ */ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE; if (hba->max_cqes < num_elements_per_pg) hba->max_cqes = num_elements_per_pg; else if (hba->max_cqes % num_elements_per_pg) hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) & ~(num_elements_per_pg - 1); /* adjust RQ */ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE; if (hba->max_rqes < num_elements_per_pg) hba->max_rqes = num_elements_per_pg; else if (hba->max_rqes % num_elements_per_pg) hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) & ~(num_elements_per_pg - 1); } /** * bnx2i_get_link_state - get network interface link state * @hba: adapter instance pointer * * updates adapter structure flag based on netdev state */ static void bnx2i_get_link_state(struct bnx2i_hba *hba) { if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); else clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); } /** * bnx2i_iscsi_license_error - displays iscsi license related error message * @hba: adapter instance pointer * @error_code: error classification * * Puts out an error log when driver is unable to offload iscsi connection * due to license restrictions */ static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code) { if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED) /* iSCSI offload not supported on this device */ printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n", hba->netdev->name); if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED) /* iSCSI offload not supported on this LOM device */ printk(KERN_ERR "bnx2i: LOM is not enable to " "offload iSCSI connections, dev=%s\n", hba->netdev->name); set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state); } /** * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification * @ep: endpoint (transport identifier) structure * @action: action, ARM or DISARM. For now only ARM_CQE is used * * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt * the driver. EQ event is generated CQ index is hit or at least 1 CQ is * outstanding and on chip timer expires */ int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action) { struct bnx2i_5771x_cq_db *cq_db; u16 cq_index; u16 next_index = 0; u32 num_active_cmds; /* Coalesce CQ entries only on 10G devices */ if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) return 0; /* Do not update CQ DB multiple times before firmware writes * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious * interrupts and other unwanted results */ cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; if (action != CNIC_ARM_CQE_FP) if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF) return 0; if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) { num_active_cmds = atomic_read(&ep->num_active_cmds); if (num_active_cmds <= event_coal_min) next_index = 1; else { next_index = num_active_cmds >> ep->ec_shift; if (next_index > num_active_cmds - event_coal_min) next_index = num_active_cmds - event_coal_min; } if (!next_index) next_index = 1; cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; if (cq_index > ep->qp.cqe_size * 2) cq_index -= ep->qp.cqe_size * 2; if (!cq_index) cq_index = 1; cq_db->sqn[0] = cq_index; } return next_index; } /** * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer * @bnx2i_conn: iscsi connection on which RQ event occurred * @ptr: driver buffer to which RQ buffer contents is to * be copied * @len: length of valid data inside RQ buf * * Copies RQ buffer contents from shared (DMA'able) memory region to * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and * scsi sense info */ void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len) { if (!bnx2i_conn->ep->qp.rqe_left) return; bnx2i_conn->ep->qp.rqe_left--; memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; bnx2i_conn->ep->qp.rq_cons_idx = 0; } else { bnx2i_conn->ep->qp.rq_cons_qe++; bnx2i_conn->ep->qp.rq_cons_idx++; } } static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn) { struct bnx2i_5771x_dbell dbell; u32 msg; memset(&dbell, 0, sizeof(dbell)); dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE << B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT); msg = *((u32 *)&dbell); /* TODO : get doorbell register mapping */ writel(cpu_to_le32(msg), conn->ep->qp.ctx_base); } /** * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell * @bnx2i_conn: iscsi connection on which event to post * @count: number of RQ buffer being posted to chip * * No need to ring hardware doorbell for 57710 family of devices */ void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count) { struct bnx2i_5771x_sq_rq_db *rq_db; u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000); struct bnx2i_endpoint *ep = bnx2i_conn->ep; ep->qp.rqe_left += count; ep->qp.rq_prod_idx &= 0x7FFF; ep->qp.rq_prod_idx += count; if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) { ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes; if (!hi_bit) ep->qp.rq_prod_idx |= 0x8000; } else ep->qp.rq_prod_idx |= hi_bit; if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt; rq_db->prod_idx = ep->qp.rq_prod_idx; /* no need to ring hardware doorbell for 57710 */ } else { writew(ep->qp.rq_prod_idx, ep->qp.ctx_base + CNIC_RECV_DOORBELL); } } /** * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine * @bnx2i_conn: iscsi connection to which new SQ entries belong * @count: number of SQ WQEs to post * * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family * of devices. For 5706/5708/5709 new SQ WQE count is written into the * doorbell register */ static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count) { struct bnx2i_5771x_sq_rq_db *sq_db; struct bnx2i_endpoint *ep = bnx2i_conn->ep; atomic_inc(&ep->num_active_cmds); wmb(); /* flush SQ WQE memory before the doorbell is rung */ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt; sq_db->prod_idx = ep->qp.sq_prod_idx; bnx2i_ring_577xx_doorbell(bnx2i_conn); } else writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL); } /** * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters * @bnx2i_conn: iscsi connection to which new SQ entries belong * @count: number of SQ WQEs to post * * this routine will update SQ driver parameters and ring the doorbell */ static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn, int count) { int tmp_cnt; if (count == 1) { if (bnx2i_conn->ep->qp.sq_prod_qe == bnx2i_conn->ep->qp.sq_last_qe) bnx2i_conn->ep->qp.sq_prod_qe = bnx2i_conn->ep->qp.sq_first_qe; else bnx2i_conn->ep->qp.sq_prod_qe++; } else { if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <= bnx2i_conn->ep->qp.sq_last_qe) bnx2i_conn->ep->qp.sq_prod_qe += count; else { tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe - bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_conn->ep->qp.sq_prod_qe = &bnx2i_conn->ep->qp.sq_first_qe[count - (tmp_cnt + 1)]; } } bnx2i_conn->ep->qp.sq_prod_idx += count; /* Ring the doorbell */ bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx); } /** * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware * @bnx2i_conn: iscsi connection * @task: transport layer's command structure pointer which is requesting * a WQE to sent to chip for further processing * * prepare and post an iSCSI Login request WQE to CNIC firmware */ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn, struct iscsi_task *task) { struct bnx2i_login_request *login_wqe; struct iscsi_login_req *login_hdr; u32 dword; login_hdr = (struct iscsi_login_req *)task->hdr; login_wqe = (struct bnx2i_login_request *) bnx2i_conn->ep->qp.sq_prod_qe; login_wqe->op_code = login_hdr->opcode; login_wqe->op_attr = login_hdr->flags; login_wqe->version_max = login_hdr->max_version; login_wqe->version_min = login_hdr->min_version; login_wqe->data_length = ntoh24(login_hdr->dlength); login_wqe->isid_lo = *((u32 *) login_hdr->isid); login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2); login_wqe->tsih = login_hdr->tsih; login_wqe->itt = task->itt | (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT); login_wqe->cid = login_hdr->cid; login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn); login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); login_wqe->flags = ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN; login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma; login_wqe->resp_bd_list_addr_hi = (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32); dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) | (bnx2i_conn->gen_pdu.resp_buf_size << ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT)); login_wqe->resp_buffer = dword; login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma; login_wqe->bd_list_addr_hi = (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32); login_wqe->num_bds = 1; login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); return 0; } /** * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware * @bnx2i_conn: iscsi connection * @mtask: driver command structure which is requesting * a WQE to sent to chip for further processing * * prepare and post an iSCSI Login request WQE to CNIC firmware */ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, struct iscsi_task *mtask) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct iscsi_tm *tmfabort_hdr; struct scsi_cmnd *ref_sc; struct iscsi_task *ctask; struct bnx2i_tmf_request *tmfabort_wqe; u32 dword; u32 scsi_lun[2]; tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; tmfabort_wqe = (struct bnx2i_tmf_request *) bnx2i_conn->ep->qp.sq_prod_qe; tmfabort_wqe->op_code = tmfabort_hdr->opcode; tmfabort_wqe->op_attr = tmfabort_hdr->flags; tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); tmfabort_wqe->reserved2 = 0; tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); switch (tmfabort_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) { case ISCSI_TM_FUNC_ABORT_TASK: case ISCSI_TM_FUNC_TASK_REASSIGN: ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); if (!ctask || !ctask->sc) /* * the iscsi layer must have completed the cmd while * was starting up. * * Note: In the case of a SCSI cmd timeout, the task's * sc is still active; hence ctask->sc != 0 * In this case, the task must be aborted */ return 0; ref_sc = ctask->sc; if (ref_sc->sc_data_direction == DMA_TO_DEVICE) dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); else dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); tmfabort_wqe->ref_itt = (dword | (tmfabort_hdr->rtt & ISCSI_ITT_MASK)); break; default: tmfabort_wqe->ref_itt = RESERVED_ITT; } memcpy(scsi_lun, &tmfabort_hdr->lun, sizeof(struct scsi_lun)); tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]); tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]); tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; tmfabort_wqe->bd_list_addr_hi = (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); tmfabort_wqe->num_bds = 1; tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); return 0; } /** * bnx2i_send_iscsi_text - post iSCSI text WQE to hardware * @bnx2i_conn: iscsi connection * @mtask: driver command structure which is requesting * a WQE to sent to chip for further processing * * prepare and post an iSCSI Text request WQE to CNIC firmware */ int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn, struct iscsi_task *mtask) { struct bnx2i_text_request *text_wqe; struct iscsi_text *text_hdr; u32 dword; text_hdr = (struct iscsi_text *)mtask->hdr; text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe; memset(text_wqe, 0, sizeof(struct bnx2i_text_request)); text_wqe->op_code = text_hdr->opcode; text_wqe->op_attr = text_hdr->flags; text_wqe->data_length = ntoh24(text_hdr->dlength); text_wqe->itt = mtask->itt | (ISCSI_TASK_TYPE_MPATH << ISCSI_TEXT_REQUEST_TYPE_SHIFT); text_wqe->ttt = be32_to_cpu(text_hdr->ttt); text_wqe->cmd_sn = be32_to_cpu(text_hdr->cmdsn); text_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma; text_wqe->resp_bd_list_addr_hi = (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32); dword = ((1 << ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT) | (bnx2i_conn->gen_pdu.resp_buf_size << ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT)); text_wqe->resp_buffer = dword; text_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma; text_wqe->bd_list_addr_hi = (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32); text_wqe->num_bds = 1; text_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); return 0; } /** * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware * @bnx2i_conn: iscsi connection * @cmd: driver command structure which is requesting * a WQE to sent to chip for further processing * * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware */ int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn, struct bnx2i_cmd *cmd) { struct bnx2i_cmd_request *scsi_cmd_wqe; scsi_cmd_wqe = (struct bnx2i_cmd_request *) bnx2i_conn->ep->qp.sq_prod_qe; memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request)); scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); return 0; } /** * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware * @bnx2i_conn: iscsi connection * @task: transport layer's command structure pointer which is * requesting a WQE to sent to chip for further processing * @datap: payload buffer pointer * @data_len: payload data length * @unsol: indicated whether nopout pdu is unsolicited pdu or * in response to target's NOPIN w/ TTT != FFFFFFFF * * prepare and post a nopout request WQE to CNIC firmware */ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, struct iscsi_task *task, char *datap, int data_len, int unsol) { struct bnx2i_endpoint *ep = bnx2i_conn->ep; struct bnx2i_nop_out_request *nopout_wqe; struct iscsi_nopout *nopout_hdr; nopout_hdr = (struct iscsi_nopout *)task->hdr; nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe; memset(nopout_wqe, 0x00, sizeof(struct bnx2i_nop_out_request)); nopout_wqe->op_code = nopout_hdr->opcode; nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL; memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8); /* 57710 requires LUN field to be swapped */ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) swap(nopout_wqe->lun[0], nopout_wqe->lun[1]); nopout_wqe->itt = ((u16)task->itt | (ISCSI_TASK_TYPE_MPATH << ISCSI_TMF_REQUEST_TYPE_SHIFT)); nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt); nopout_wqe->flags = 0; if (!unsol) nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; else if (nopout_hdr->itt == RESERVED_ITT) nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); nopout_wqe->data_length = data_len; if (data_len) { /* handle payload data, not required in first release */ printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n"); } else { nopout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; nopout_wqe->bd_list_addr_hi = (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); nopout_wqe->num_bds = 1; } nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); return 0; } /** * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware * @bnx2i_conn: iscsi connection * @task: transport layer's command structure pointer which is * requesting a WQE to sent to chip for further processing * * prepare and post logout request WQE to CNIC firmware */ int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn, struct iscsi_task *task) { struct bnx2i_logout_request *logout_wqe; struct iscsi_logout *logout_hdr; logout_hdr = (struct iscsi_logout *)task->hdr; logout_wqe = (struct bnx2i_logout_request *) bnx2i_conn->ep->qp.sq_prod_qe; memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request)); logout_wqe->op_code = logout_hdr->opcode; logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn); logout_wqe->op_attr = logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE; logout_wqe->itt = ((u16)task->itt | (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGOUT_REQUEST_TYPE_SHIFT)); logout_wqe->data_length = 0; logout_wqe->cid = 0; logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; logout_wqe->bd_list_addr_hi = (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); logout_wqe->num_bds = 1; logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ bnx2i_conn->ep->state = EP_STATE_LOGOUT_SENT; bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); return 0; } /** * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware * @conn: iscsi connection which requires iscsi parameter update * * sends down iSCSI Conn Update request to move iSCSI conn to FFP */ void bnx2i_update_iscsi_conn(struct iscsi_conn *conn) { struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct bnx2i_hba *hba = bnx2i_conn->hba; struct kwqe *kwqe_arr[2]; struct iscsi_kwqe_conn_update *update_wqe; struct iscsi_kwqe_conn_update conn_update_kwqe; update_wqe = &conn_update_kwqe; update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN; update_wqe->hdr.flags = (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); /* 5771x requires conn context id to be passed as is */ if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type)) update_wqe->context_id = bnx2i_conn->ep->ep_cid; else update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7); update_wqe->conn_flags = 0; if (conn->hdrdgst_en) update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST; if (conn->datadgst_en) update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST; if (conn->session->initial_r2t_en) update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T; if (conn->session->imm_data_en) update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA; update_wqe->max_send_pdu_length = conn->max_xmit_dlength; update_wqe->max_recv_pdu_length = conn->max_recv_dlength; update_wqe->first_burst_length = conn->session->first_burst; update_wqe->max_burst_length = conn->session->max_burst; update_wqe->exp_stat_sn = conn->exp_statsn; update_wqe->max_outstanding_r2ts = conn->session->max_r2t; update_wqe->session_error_recovery_level = conn->session->erl; iscsi_conn_printk(KERN_ALERT, conn, "bnx2i: conn update - MBL 0x%x FBL 0x%x" "MRDSL_I 0x%x MRDSL_T 0x%x \n", update_wqe->max_burst_length, update_wqe->first_burst_length, update_wqe->max_recv_pdu_length, update_wqe->max_send_pdu_length); kwqe_arr[0] = (struct kwqe *) update_wqe; if (hba->cnic && hba->cnic->submit_kwqes) hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); } /** * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware * @t: timer context used to fetch the endpoint (transport * handle) structure pointer * * routine to handle connection offload/destroy request timeout */ void bnx2i_ep_ofld_timer(struct timer_list *t) { struct bnx2i_endpoint *ep = from_timer(ep, t, ofld_timer); if (ep->state == EP_STATE_OFLD_START) { printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n"); ep->state = EP_STATE_OFLD_FAILED; } else if (ep->state == EP_STATE_DISCONN_START) { printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n"); ep->state = EP_STATE_DISCONN_TIMEDOUT; } else if (ep->state == EP_STATE_CLEANUP_START) { printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n"); ep->state = EP_STATE_CLEANUP_FAILED; } wake_up_interruptible(&ep->ofld_wait); } static int bnx2i_power_of2(u32 val) { u32 power = 0; if (val & (val - 1)) return power; val--; while (val) { val = val >> 1; power++; } return power; } /** * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request * @hba: adapter structure pointer * @cmd: driver command structure which is requesting * a WQE to sent to chip for further processing * * prepares and posts CONN_OFLD_REQ1/2 KWQE */ void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) { struct bnx2i_cleanup_request *cmd_cleanup; cmd_cleanup = (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe; memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request)); cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST; cmd_cleanup->itt = cmd->req.itt; cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */ bnx2i_ring_dbell_update_sq_params(cmd->conn, 1); } /** * bnx2i_send_conn_destroy - initiates iscsi connection teardown process * @hba: adapter structure pointer * @ep: endpoint (transport identifier) structure * * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate * iscsi connection context clean-up process */ int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { struct kwqe *kwqe_arr[2]; struct iscsi_kwqe_conn_destroy conn_cleanup; int rc = -EINVAL; memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy)); conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN; conn_cleanup.hdr.flags = (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); /* 5771x requires conn context id to be passed as is */ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) conn_cleanup.context_id = ep->ep_cid; else conn_cleanup.context_id = (ep->ep_cid >> 7); conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid; kwqe_arr[0] = (struct kwqe *) &conn_cleanup; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); return rc; } /** * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process * @hba: adapter structure pointer * @ep: endpoint (transport identifier) structure * * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE */ static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { struct kwqe *kwqe_arr[2]; struct iscsi_kwqe_conn_offload1 ofld_req1; struct iscsi_kwqe_conn_offload2 ofld_req2; dma_addr_t dma_addr; int num_kwqes = 2; u32 *ptbl; int rc = -EINVAL; ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; ofld_req1.hdr.flags = (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; dma_addr = ep->qp.sq_pgtbl_phys; ofld_req1.sq_page_table_addr_lo = (u32) dma_addr; ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); dma_addr = ep->qp.cq_pgtbl_phys; ofld_req1.cq_page_table_addr_lo = (u32) dma_addr; ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2; ofld_req2.hdr.flags = (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); dma_addr = ep->qp.rq_pgtbl_phys; ofld_req2.rq_page_table_addr_lo = (u32) dma_addr; ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); ptbl = (u32 *) ep->qp.sq_pgtbl_virt; ofld_req2.sq_first_pte.hi = *ptbl++; ofld_req2.sq_first_pte.lo = *ptbl; ptbl = (u32 *) ep->qp.cq_pgtbl_virt; ofld_req2.cq_first_pte.hi = *ptbl++; ofld_req2.cq_first_pte.lo = *ptbl; kwqe_arr[0] = (struct kwqe *) &ofld_req1; kwqe_arr[1] = (struct kwqe *) &ofld_req2; ofld_req2.num_additional_wqes = 0; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation * @hba: adapter structure pointer * @ep: endpoint (transport identifier) structure * * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE */ static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { struct kwqe *kwqe_arr[5]; struct iscsi_kwqe_conn_offload1 ofld_req1; struct iscsi_kwqe_conn_offload2 ofld_req2; struct iscsi_kwqe_conn_offload3 ofld_req3[1]; dma_addr_t dma_addr; int num_kwqes = 2; u32 *ptbl; int rc = -EINVAL; ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; ofld_req1.hdr.flags = (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE; ofld_req1.sq_page_table_addr_lo = (u32) dma_addr; ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE; ofld_req1.cq_page_table_addr_lo = (u32) dma_addr; ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2; ofld_req2.hdr.flags = (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE; ofld_req2.rq_page_table_addr_lo = (u32) dma_addr; ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); ofld_req2.sq_first_pte.hi = *ptbl++; ofld_req2.sq_first_pte.lo = *ptbl; ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); ofld_req2.cq_first_pte.hi = *ptbl++; ofld_req2.cq_first_pte.lo = *ptbl; kwqe_arr[0] = (struct kwqe *) &ofld_req1; kwqe_arr[1] = (struct kwqe *) &ofld_req2; ofld_req2.num_additional_wqes = 1; memset(ofld_req3, 0x00, sizeof(ofld_req3[0])); ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); ofld_req3[0].qp_first_pte[0].hi = *ptbl++; ofld_req3[0].qp_first_pte[0].lo = *ptbl; kwqe_arr[2] = (struct kwqe *) ofld_req3; /* need if we decide to go with multiple KCQE's per conn */ num_kwqes += 1; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process * * @hba: adapter structure pointer * @ep: endpoint (transport identifier) structure * * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE */ int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { int rc; if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) rc = bnx2i_5771x_send_conn_ofld_req(hba, ep); else rc = bnx2i_570x_send_conn_ofld_req(hba, ep); return rc; } /** * setup_qp_page_tables - iscsi QP page table setup function * @ep: endpoint (transport identifier) structure * * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires * 64-bit address in big endian format. Whereas 10G/sec (57710) requires * PT in little endian format */ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) { int num_pages; u32 *ptbl; dma_addr_t page; int cnic_dev_10g; if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) cnic_dev_10g = 1; else cnic_dev_10g = 0; /* SQ page table */ memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE; page = ep->qp.sq_phys; if (cnic_dev_10g) ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); else ptbl = (u32 *) ep->qp.sq_pgtbl_virt; while (num_pages--) { if (cnic_dev_10g) { /* PTE is written in little endian format for 57710 */ *ptbl = (u32) page; ptbl++; *ptbl = (u32) ((u64) page >> 32); ptbl++; page += CNIC_PAGE_SIZE; } else { /* PTE is written in big endian format for * 5706/5708/5709 devices */ *ptbl = (u32) ((u64) page >> 32); ptbl++; *ptbl = (u32) page; ptbl++; page += CNIC_PAGE_SIZE; } } /* RQ page table */ memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE; page = ep->qp.rq_phys; if (cnic_dev_10g) ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); else ptbl = (u32 *) ep->qp.rq_pgtbl_virt; while (num_pages--) { if (cnic_dev_10g) { /* PTE is written in little endian format for 57710 */ *ptbl = (u32) page; ptbl++; *ptbl = (u32) ((u64) page >> 32); ptbl++; page += CNIC_PAGE_SIZE; } else { /* PTE is written in big endian format for * 5706/5708/5709 devices */ *ptbl = (u32) ((u64) page >> 32); ptbl++; *ptbl = (u32) page; ptbl++; page += CNIC_PAGE_SIZE; } } /* CQ page table */ memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE; page = ep->qp.cq_phys; if (cnic_dev_10g) ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); else ptbl = (u32 *) ep->qp.cq_pgtbl_virt; while (num_pages--) { if (cnic_dev_10g) { /* PTE is written in little endian format for 57710 */ *ptbl = (u32) page; ptbl++; *ptbl = (u32) ((u64) page >> 32); ptbl++; page += CNIC_PAGE_SIZE; } else { /* PTE is written in big endian format for * 5706/5708/5709 devices */ *ptbl = (u32) ((u64) page >> 32); ptbl++; *ptbl = (u32) page; ptbl++; page += CNIC_PAGE_SIZE; } } } /** * bnx2i_alloc_qp_resc - allocates required resources for QP. * @hba: adapter structure pointer * @ep: endpoint (transport identifier) structure * * Allocate QP (transport layer for iSCSI connection) resources, DMA'able * memory for SQ/RQ/CQ and page tables. EP structure elements such * as producer/consumer indexes/pointers, queue sizes and page table * contents are setup */ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { struct bnx2i_5771x_cq_db *cq_db; ep->hba = hba; ep->conn = NULL; ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0; /* Allocate page table memory for SQ which is page aligned */ ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; ep->qp.sq_mem_size = (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.sq_pgtbl_size = (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); ep->qp.sq_pgtbl_size = (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.sq_pgtbl_virt = dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, &ep->qp.sq_pgtbl_phys, GFP_KERNEL); if (!ep->qp.sq_pgtbl_virt) { printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n", ep->qp.sq_pgtbl_size); goto mem_alloc_err; } /* Allocate memory area for actual SQ element */ ep->qp.sq_virt = dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, &ep->qp.sq_phys, GFP_KERNEL); if (!ep->qp.sq_virt) { printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", ep->qp.sq_mem_size); goto mem_alloc_err; } ep->qp.sq_first_qe = ep->qp.sq_virt; ep->qp.sq_prod_qe = ep->qp.sq_first_qe; ep->qp.sq_cons_qe = ep->qp.sq_first_qe; ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1]; ep->qp.sq_prod_idx = 0; ep->qp.sq_cons_idx = 0; ep->qp.sqe_left = hba->max_sqes; /* Allocate page table memory for CQ which is page aligned */ ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; ep->qp.cq_mem_size = (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.cq_pgtbl_size = (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); ep->qp.cq_pgtbl_size = (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.cq_pgtbl_virt = dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, &ep->qp.cq_pgtbl_phys, GFP_KERNEL); if (!ep->qp.cq_pgtbl_virt) { printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n", ep->qp.cq_pgtbl_size); goto mem_alloc_err; } /* Allocate memory area for actual CQ element */ ep->qp.cq_virt = dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, &ep->qp.cq_phys, GFP_KERNEL); if (!ep->qp.cq_virt) { printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", ep->qp.cq_mem_size); goto mem_alloc_err; } ep->qp.cq_first_qe = ep->qp.cq_virt; ep->qp.cq_prod_qe = ep->qp.cq_first_qe; ep->qp.cq_cons_qe = ep->qp.cq_first_qe; ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1]; ep->qp.cq_prod_idx = 0; ep->qp.cq_cons_idx = 0; ep->qp.cqe_left = hba->max_cqes; ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN; ep->qp.cqe_size = hba->max_cqes; /* Invalidate all EQ CQE index, req only for 57710 */ cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS); /* Allocate page table memory for RQ which is page aligned */ ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; ep->qp.rq_mem_size = (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.rq_pgtbl_size = (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); ep->qp.rq_pgtbl_size = (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.rq_pgtbl_virt = dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, &ep->qp.rq_pgtbl_phys, GFP_KERNEL); if (!ep->qp.rq_pgtbl_virt) { printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n", ep->qp.rq_pgtbl_size); goto mem_alloc_err; } /* Allocate memory area for actual RQ element */ ep->qp.rq_virt = dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, &ep->qp.rq_phys, GFP_KERNEL); if (!ep->qp.rq_virt) { printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n", ep->qp.rq_mem_size); goto mem_alloc_err; } ep->qp.rq_first_qe = ep->qp.rq_virt; ep->qp.rq_prod_qe = ep->qp.rq_first_qe; ep->qp.rq_cons_qe = ep->qp.rq_first_qe; ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1]; ep->qp.rq_prod_idx = 0x8000; ep->qp.rq_cons_idx = 0; ep->qp.rqe_left = hba->max_rqes; setup_qp_page_tables(ep); return 0; mem_alloc_err: bnx2i_free_qp_resc(hba, ep); return -ENOMEM; } /** * bnx2i_free_qp_resc - free memory resources held by QP * @hba: adapter structure pointer * @ep: endpoint (transport identifier) structure * * Free QP resources - SQ/RQ/CQ memory and page tables. */ void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { if (ep->qp.ctx_base) { iounmap(ep->qp.ctx_base); ep->qp.ctx_base = NULL; } /* Free SQ mem */ if (ep->qp.sq_pgtbl_virt) { dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys); ep->qp.sq_pgtbl_virt = NULL; ep->qp.sq_pgtbl_phys = 0; } if (ep->qp.sq_virt) { dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, ep->qp.sq_virt, ep->qp.sq_phys); ep->qp.sq_virt = NULL; ep->qp.sq_phys = 0; } /* Free RQ mem */ if (ep->qp.rq_pgtbl_virt) { dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys); ep->qp.rq_pgtbl_virt = NULL; ep->qp.rq_pgtbl_phys = 0; } if (ep->qp.rq_virt) { dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, ep->qp.rq_virt, ep->qp.rq_phys); ep->qp.rq_virt = NULL; ep->qp.rq_phys = 0; } /* Free CQ mem */ if (ep->qp.cq_pgtbl_virt) { dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys); ep->qp.cq_pgtbl_virt = NULL; ep->qp.cq_pgtbl_phys = 0; } if (ep->qp.cq_virt) { dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, ep->qp.cq_virt, ep->qp.cq_phys); ep->qp.cq_virt = NULL; ep->qp.cq_phys = 0; } } /** * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w * @hba: adapter structure pointer * * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w * This results in iSCSi support validation and on-chip context manager * initialization. Firmware completes this handshake with a CQE carrying * the result of iscsi support validation. Parameter carried by * iscsi init request determines the number of offloaded connection and * tolerance level for iscsi protocol violation this hba/chip can support */ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) { struct kwqe *kwqe_arr[3]; struct iscsi_kwqe_init1 iscsi_init; struct iscsi_kwqe_init2 iscsi_init2; int rc = 0; u64 mask64; memset(&iscsi_init, 0x00, sizeof(struct iscsi_kwqe_init1)); memset(&iscsi_init2, 0x00, sizeof(struct iscsi_kwqe_init2)); bnx2i_adjust_qp_size(hba); iscsi_init.flags = (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; if (en_tcp_dack) iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; iscsi_init.reserved0 = 0; iscsi_init.num_cqs = 1; iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1; iscsi_init.hdr.flags = (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; iscsi_init.dummy_buffer_addr_hi = (u32) ((u64) hba->dummy_buf_dma >> 32); hba->num_ccell = hba->max_sqes >> 1; hba->ctx_ccell_tasks = ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); iscsi_init.num_ccells_per_conn = hba->num_ccell; iscsi_init.num_tasks_per_conn = hba->max_sqes; iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; iscsi_init.sq_num_wqes = hba->max_sqes; iscsi_init.cq_log_wqes_per_page = (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE); iscsi_init.cq_num_wqes = hba->max_cqes; iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; iscsi_init.rq_num_wqes = hba->max_rqes; iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2; iscsi_init2.hdr.flags = (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1; mask64 = 0x0ULL; mask64 |= ( /* CISCO MDS */ (1UL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) | /* HP MSA1510i */ (1UL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) | /* EMC */ (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN)); if (error_mask1) { iscsi_init2.error_bit_map[0] = error_mask1; mask64 ^= (u32)(mask64); mask64 |= error_mask1; } else iscsi_init2.error_bit_map[0] = (u32) mask64; if (error_mask2) { iscsi_init2.error_bit_map[1] = error_mask2; mask64 &= 0xffffffff; mask64 |= ((u64)error_mask2 << 32); } else iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32); iscsi_error_mask = mask64; kwqe_arr[0] = (struct kwqe *) &iscsi_init; kwqe_arr[1] = (struct kwqe *) &iscsi_init2; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2); return rc; } /** * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion. * @session: iscsi session * @bnx2i_conn: bnx2i connection * @cqe: pointer to newly DMA'ed CQE entry for processing * * process SCSI CMD Response CQE & complete the request to SCSI-ML */ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct bnx2i_hba *hba = bnx2i_conn->hba; struct bnx2i_cmd_response *resp_cqe; struct bnx2i_cmd *bnx2i_cmd; struct iscsi_task *task; struct iscsi_scsi_rsp *hdr; u32 datalen = 0; resp_cqe = (struct bnx2i_cmd_response *)cqe; spin_lock_bh(&session->back_lock); task = iscsi_itt_to_task(conn, resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX); if (!task) goto fail; bnx2i_cmd = task->dd_data; if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) { conn->datain_pdus_cnt += resp_cqe->task_stat.read_stat.num_data_ins; conn->rxdata_octets += bnx2i_cmd->req.total_data_transfer_length; ADD_STATS_64(hba, rx_pdus, resp_cqe->task_stat.read_stat.num_data_ins); ADD_STATS_64(hba, rx_bytes, bnx2i_cmd->req.total_data_transfer_length); } else { conn->dataout_pdus_cnt += resp_cqe->task_stat.write_stat.num_data_outs; conn->r2t_pdus_cnt += resp_cqe->task_stat.write_stat.num_r2ts; conn->txdata_octets += bnx2i_cmd->req.total_data_transfer_length; ADD_STATS_64(hba, tx_pdus, resp_cqe->task_stat.write_stat.num_data_outs); ADD_STATS_64(hba, tx_bytes, bnx2i_cmd->req.total_data_transfer_length); ADD_STATS_64(hba, rx_pdus, resp_cqe->task_stat.write_stat.num_r2ts); } bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); hdr = (struct iscsi_scsi_rsp *)task->hdr; resp_cqe = (struct bnx2i_cmd_response *)cqe; hdr->opcode = resp_cqe->op_code; hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn); hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn); hdr->response = resp_cqe->response; hdr->cmd_status = resp_cqe->status; hdr->flags = resp_cqe->response_flags; hdr->residual_count = cpu_to_be32(resp_cqe->residual_count); if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN) goto done; if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) { datalen = resp_cqe->data_length; if (datalen < 2) goto done; if (datalen > BNX2I_RQ_WQE_SIZE) { iscsi_conn_printk(KERN_ERR, conn, "sense data len %d > RQ sz\n", datalen); datalen = BNX2I_RQ_WQE_SIZE; } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) { iscsi_conn_printk(KERN_ERR, conn, "sense data len %d > conn data\n", datalen); datalen = ISCSI_DEF_MAX_RECV_SEG_LEN; } bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen); bnx2i_put_rq_buf(bnx2i_cmd->conn, 1); } done: __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, datalen); fail: spin_unlock_bh(&session->back_lock); return 0; } /** * bnx2i_process_login_resp - this function handles iscsi login response * @session: iscsi session pointer * @bnx2i_conn: iscsi connection pointer * @cqe: pointer to newly DMA'ed CQE entry for processing * * process Login Response CQE & complete it to open-iscsi user daemon */ static int bnx2i_process_login_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct iscsi_task *task; struct bnx2i_login_response *login; struct iscsi_login_rsp *resp_hdr; int pld_len; int pad_len; login = (struct bnx2i_login_response *) cqe; spin_lock(&session->back_lock); task = iscsi_itt_to_task(conn, login->itt & ISCSI_LOGIN_RESPONSE_INDEX); if (!task) goto done; resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); resp_hdr->opcode = login->op_code; resp_hdr->flags = login->response_flags; resp_hdr->max_version = login->version_max; resp_hdr->active_version = login->version_active; resp_hdr->hlength = 0; hton24(resp_hdr->dlength, login->data_length); memcpy(resp_hdr->isid, &login->isid_lo, 6); resp_hdr->tsih = cpu_to_be16(login->tsih); resp_hdr->itt = task->hdr->itt; resp_hdr->statsn = cpu_to_be32(login->stat_sn); resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn); resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn); resp_hdr->status_class = login->status_class; resp_hdr->status_detail = login->status_detail; pld_len = login->data_length; bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf + pld_len; pad_len = 0; if (pld_len & 0x3) pad_len = 4 - (pld_len % 4); if (pad_len) { int i = 0; for (i = 0; i < pad_len; i++) { bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0; bnx2i_conn->gen_pdu.resp_wr_ptr++; } } __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bnx2i_conn->gen_pdu.resp_buf, bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf); done: spin_unlock(&session->back_lock); return 0; } /** * bnx2i_process_text_resp - this function handles iscsi text response * @session: iscsi session pointer * @bnx2i_conn: iscsi connection pointer * @cqe: pointer to newly DMA'ed CQE entry for processing * * process iSCSI Text Response CQE& complete it to open-iscsi user daemon */ static int bnx2i_process_text_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct iscsi_task *task; struct bnx2i_text_response *text; struct iscsi_text_rsp *resp_hdr; int pld_len; int pad_len; text = (struct bnx2i_text_response *) cqe; spin_lock(&session->back_lock); task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX); if (!task) goto done; resp_hdr = (struct iscsi_text_rsp *)&bnx2i_conn->gen_pdu.resp_hdr; memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); resp_hdr->opcode = text->op_code; resp_hdr->flags = text->response_flags; resp_hdr->hlength = 0; hton24(resp_hdr->dlength, text->data_length); resp_hdr->itt = task->hdr->itt; resp_hdr->ttt = cpu_to_be32(text->ttt); resp_hdr->statsn = task->hdr->exp_statsn; resp_hdr->exp_cmdsn = cpu_to_be32(text->exp_cmd_sn); resp_hdr->max_cmdsn = cpu_to_be32(text->max_cmd_sn); pld_len = text->data_length; bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf + pld_len; pad_len = 0; if (pld_len & 0x3) pad_len = 4 - (pld_len % 4); if (pad_len) { int i = 0; for (i = 0; i < pad_len; i++) { bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0; bnx2i_conn->gen_pdu.resp_wr_ptr++; } } __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bnx2i_conn->gen_pdu.resp_buf, bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf); done: spin_unlock(&session->back_lock); return 0; } /** * bnx2i_process_tmf_resp - this function handles iscsi TMF response * @session: iscsi session pointer * @bnx2i_conn: iscsi connection pointer * @cqe: pointer to newly DMA'ed CQE entry for processing * * process iSCSI TMF Response CQE and wake up the driver eh thread. */ static int bnx2i_process_tmf_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct iscsi_task *task; struct bnx2i_tmf_response *tmf_cqe; struct iscsi_tm_rsp *resp_hdr; tmf_cqe = (struct bnx2i_tmf_response *)cqe; spin_lock(&session->back_lock); task = iscsi_itt_to_task(conn, tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX); if (!task) goto done; resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); resp_hdr->opcode = tmf_cqe->op_code; resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn); resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn); resp_hdr->itt = task->hdr->itt; resp_hdr->response = tmf_cqe->response; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); done: spin_unlock(&session->back_lock); return 0; } /** * bnx2i_process_logout_resp - this function handles iscsi logout response * @session: iscsi session pointer * @bnx2i_conn: iscsi connection pointer * @cqe: pointer to newly DMA'ed CQE entry for processing * * process iSCSI Logout Response CQE & make function call to * notify the user daemon. */ static int bnx2i_process_logout_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct iscsi_task *task; struct bnx2i_logout_response *logout; struct iscsi_logout_rsp *resp_hdr; logout = (struct bnx2i_logout_response *) cqe; spin_lock(&session->back_lock); task = iscsi_itt_to_task(conn, logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX); if (!task) goto done; resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); resp_hdr->opcode = logout->op_code; resp_hdr->flags = logout->response; resp_hdr->hlength = 0; resp_hdr->itt = task->hdr->itt; resp_hdr->statsn = task->hdr->exp_statsn; resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn); resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn); resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait); resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD; done: spin_unlock(&session->back_lock); return 0; } /** * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE * @session: iscsi session pointer * @bnx2i_conn: iscsi connection pointer * @cqe: pointer to newly DMA'ed CQE entry for processing * * process iSCSI NOPIN local completion CQE, frees IIT and command structures */ static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct bnx2i_nop_in_msg *nop_in; struct iscsi_task *task; nop_in = (struct bnx2i_nop_in_msg *)cqe; spin_lock(&session->back_lock); task = iscsi_itt_to_task(conn, nop_in->itt & ISCSI_NOP_IN_MSG_INDEX); if (task) __iscsi_put_task(task); spin_unlock(&session->back_lock); } /** * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd * @bnx2i_conn: iscsi connection * * Firmware advances RQ producer index for every unsolicited PDU even if * payload data length is '0'. This function makes corresponding * adjustments on the driver side to match this f/w behavior */ static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn) { char dummy_rq_data[2]; bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1); bnx2i_put_rq_buf(bnx2i_conn, 1); } /** * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE * @session: iscsi session pointer * @bnx2i_conn: iscsi connection pointer * @cqe: pointer to newly DMA'ed CQE entry for processing * * process iSCSI target's proactive iSCSI NOPIN request */ static int bnx2i_process_nopin_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct iscsi_task *task; struct bnx2i_nop_in_msg *nop_in; struct iscsi_nopin *hdr; int tgt_async_nop = 0; nop_in = (struct bnx2i_nop_in_msg *)cqe; spin_lock(&session->back_lock); hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr; memset(hdr, 0, sizeof(struct iscsi_hdr)); hdr->opcode = nop_in->op_code; hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn); hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn); hdr->ttt = cpu_to_be32(nop_in->ttt); if (nop_in->itt == (u16) RESERVED_ITT) { bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); hdr->itt = RESERVED_ITT; tgt_async_nop = 1; goto done; } /* this is a response to one of our nop-outs */ task = iscsi_itt_to_task(conn, (itt_t) (nop_in->itt & ISCSI_NOP_IN_MSG_INDEX)); if (task) { hdr->flags = ISCSI_FLAG_CMD_FINAL; hdr->itt = task->hdr->itt; hdr->ttt = cpu_to_be32(nop_in->ttt); memcpy(&hdr->lun, nop_in->lun, 8); } done: __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); spin_unlock(&session->back_lock); return tgt_async_nop; } /** * bnx2i_process_async_mesg - this function handles iscsi async message * @session: iscsi session pointer * @bnx2i_conn: iscsi connection pointer * @cqe: pointer to newly DMA'ed CQE entry for processing * * process iSCSI ASYNC Message */ static void bnx2i_process_async_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) { struct bnx2i_async_msg *async_cqe; struct iscsi_async *resp_hdr; u8 async_event; bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); async_cqe = (struct bnx2i_async_msg *)cqe; async_event = async_cqe->async_event; if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) { iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, "async: scsi events not supported\n"); return; } spin_lock(&session->back_lock); resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr; memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); resp_hdr->opcode = async_cqe->op_code; resp_hdr->flags = 0x80; memcpy(&resp_hdr->lun, async_cqe->lun, 8); resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn); resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn); resp_hdr->async_event = async_cqe->async_event; resp_hdr->async_vcode = async_cqe->async_vcode; resp_hdr->param1 = cpu_to_be16(async_cqe->param1); resp_hdr->param2 = cpu_to_be16(async_cqe->param2); resp_hdr->param3 = cpu_to_be16(async_cqe->param3); __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data, (struct iscsi_hdr *)resp_hdr, NULL, 0); spin_unlock(&session->back_lock); } /** * bnx2i_process_reject_mesg - process iscsi reject pdu * @session: iscsi session pointer * @bnx2i_conn: iscsi connection pointer * @cqe: pointer to newly DMA'ed CQE entry for processing * * process iSCSI REJECT message */ static void bnx2i_process_reject_mesg(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct bnx2i_reject_msg *reject; struct iscsi_reject *hdr; reject = (struct bnx2i_reject_msg *) cqe; if (reject->data_length) { bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length); bnx2i_put_rq_buf(bnx2i_conn, 1); } else bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); spin_lock(&session->back_lock); hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr; memset(hdr, 0, sizeof(struct iscsi_hdr)); hdr->opcode = reject->op_code; hdr->reason = reject->reason; hton24(hdr->dlength, reject->data_length); hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn); hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn); hdr->ffffffff = cpu_to_be32(RESERVED_ITT); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, reject->data_length); spin_unlock(&session->back_lock); } /** * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion * @session: iscsi session pointer * @bnx2i_conn: iscsi connection pointer * @cqe: pointer to newly DMA'ed CQE entry for processing * * process command cleanup response CQE during conn shutdown or error recovery */ static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct cqe *cqe) { struct bnx2i_cleanup_response *cmd_clean_rsp; struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct iscsi_task *task; cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe; spin_lock(&session->back_lock); task = iscsi_itt_to_task(conn, cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); if (!task) printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n", cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); spin_unlock(&session->back_lock); complete(&bnx2i_conn->cmd_cleanup_cmpl); } /** * bnx2i_percpu_io_thread - thread per cpu for ios * * @arg: ptr to bnx2i_percpu_info structure */ int bnx2i_percpu_io_thread(void *arg) { struct bnx2i_percpu_s *p = arg; struct bnx2i_work *work, *tmp; LIST_HEAD(work_list); set_user_nice(current, MIN_NICE); while (!kthread_should_stop()) { spin_lock_bh(&p->p_work_lock); while (!list_empty(&p->work_list)) { list_splice_init(&p->work_list, &work_list); spin_unlock_bh(&p->p_work_lock); list_for_each_entry_safe(work, tmp, &work_list, list) { list_del_init(&work->list); /* work allocated in the bh, freed here */ bnx2i_process_scsi_cmd_resp(work->session, work->bnx2i_conn, &work->cqe); atomic_dec(&work->bnx2i_conn->work_cnt); kfree(work); } spin_lock_bh(&p->p_work_lock); } set_current_state(TASK_INTERRUPTIBLE); spin_unlock_bh(&p->p_work_lock); schedule(); } __set_current_state(TASK_RUNNING); return 0; } /** * bnx2i_queue_scsi_cmd_resp - queue cmd completion to the percpu thread * @session: iscsi session * @bnx2i_conn: bnx2i connection * @cqe: pointer to newly DMA'ed CQE entry for processing * * this function is called by generic KCQ handler to queue all pending cmd * completion CQEs * * The implementation is to queue the cmd response based on the * last recorded command for the given connection. The * cpu_id gets recorded upon task_xmit. No out-of-order completion! */ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct bnx2i_nop_in_msg *cqe) { struct bnx2i_work *bnx2i_work = NULL; struct bnx2i_percpu_s *p = NULL; struct iscsi_task *task; struct scsi_cmnd *sc; int rc = 0; spin_lock(&session->back_lock); task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data, cqe->itt & ISCSI_CMD_RESPONSE_INDEX); if (!task || !task->sc) { spin_unlock(&session->back_lock); return -EINVAL; } sc = task->sc; spin_unlock(&session->back_lock); p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(scsi_cmd_to_rq(sc))); spin_lock(&p->p_work_lock); if (unlikely(!p->iothread)) { rc = -EINVAL; goto err; } /* Alloc and copy to the cqe */ bnx2i_work = kzalloc(sizeof(struct bnx2i_work), GFP_ATOMIC); if (bnx2i_work) { INIT_LIST_HEAD(&bnx2i_work->list); bnx2i_work->session = session; bnx2i_work->bnx2i_conn = bnx2i_conn; memcpy(&bnx2i_work->cqe, cqe, sizeof(struct cqe)); list_add_tail(&bnx2i_work->list, &p->work_list); atomic_inc(&bnx2i_conn->work_cnt); wake_up_process(p->iothread); spin_unlock(&p->p_work_lock); goto done; } else rc = -ENOMEM; err: spin_unlock(&p->p_work_lock); bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, (struct cqe *)cqe); done: return rc; } /** * bnx2i_process_new_cqes - process newly DMA'ed CQE's * @bnx2i_conn: bnx2i connection * * this function is called by generic KCQ handler to process all pending CQE's */ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) { struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; struct bnx2i_hba *hba = bnx2i_conn->hba; struct qp_info *qp; struct bnx2i_nop_in_msg *nopin; int tgt_async_msg; int cqe_cnt = 0; if (bnx2i_conn->ep == NULL) return 0; qp = &bnx2i_conn->ep->qp; if (!qp->cq_virt) { printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!", hba->netdev->name); goto out; } while (1) { nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe; if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) break; if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { if (nopin->op_code == ISCSI_OP_NOOP_IN && nopin->itt == (u16) RESERVED_ITT) { printk(KERN_ALERT "bnx2i: Unsolicited " "NOP-In detected for suspended " "connection dev=%s!\n", hba->netdev->name); bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); goto cqe_out; } break; } tgt_async_msg = 0; switch (nopin->op_code) { case ISCSI_OP_SCSI_CMD_RSP: case ISCSI_OP_SCSI_DATA_IN: /* Run the kthread engine only for data cmds All other cmds will be completed in this bh! */ bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin); goto done; case ISCSI_OP_LOGIN_RSP: bnx2i_process_login_resp(session, bnx2i_conn, qp->cq_cons_qe); break; case ISCSI_OP_SCSI_TMFUNC_RSP: bnx2i_process_tmf_resp(session, bnx2i_conn, qp->cq_cons_qe); break; case ISCSI_OP_TEXT_RSP: bnx2i_process_text_resp(session, bnx2i_conn, qp->cq_cons_qe); break; case ISCSI_OP_LOGOUT_RSP: bnx2i_process_logout_resp(session, bnx2i_conn, qp->cq_cons_qe); break; case ISCSI_OP_NOOP_IN: if (bnx2i_process_nopin_mesg(session, bnx2i_conn, qp->cq_cons_qe)) tgt_async_msg = 1; break; case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION: bnx2i_process_nopin_local_cmpl(session, bnx2i_conn, qp->cq_cons_qe); break; case ISCSI_OP_ASYNC_EVENT: bnx2i_process_async_mesg(session, bnx2i_conn, qp->cq_cons_qe); tgt_async_msg = 1; break; case ISCSI_OP_REJECT: bnx2i_process_reject_mesg(session, bnx2i_conn, qp->cq_cons_qe); break; case ISCSI_OPCODE_CLEANUP_RESPONSE: bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn, qp->cq_cons_qe); break; default: printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", nopin->op_code); } ADD_STATS_64(hba, rx_pdus, 1); ADD_STATS_64(hba, rx_bytes, nopin->data_length); done: if (!tgt_async_msg) { if (!atomic_read(&bnx2i_conn->ep->num_active_cmds)) printk(KERN_ALERT "bnx2i (%s): no active cmd! " "op 0x%x\n", hba->netdev->name, nopin->op_code); else atomic_dec(&bnx2i_conn->ep->num_active_cmds); } cqe_out: /* clear out in production version only, till beta keep opcode * field intact, will be helpful in debugging (context dump) * nopin->op_code = 0; */ cqe_cnt++; qp->cqe_exp_seq_sn++; if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1)) qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN; if (qp->cq_cons_qe == qp->cq_last_qe) { qp->cq_cons_qe = qp->cq_first_qe; qp->cq_cons_idx = 0; } else { qp->cq_cons_qe++; qp->cq_cons_idx++; } } out: return cqe_cnt; } /** * bnx2i_fastpath_notification - process global event queue (KCQ) * @hba: adapter structure pointer * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry * * Fast path event notification handler, KCQ entry carries context id * of the connection that has 1 or more pending CQ entries */ static void bnx2i_fastpath_notification(struct bnx2i_hba *hba, struct iscsi_kcqe *new_cqe_kcqe) { struct bnx2i_conn *bnx2i_conn; u32 iscsi_cid; int nxt_idx; iscsi_cid = new_cqe_kcqe->iscsi_conn_id; bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); if (!bnx2i_conn) { printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid); return; } if (!bnx2i_conn->ep) { printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); return; } bnx2i_process_new_cqes(bnx2i_conn); nxt_idx = bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP); if (nxt_idx && nxt_idx == bnx2i_process_new_cqes(bnx2i_conn)) bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP); } /** * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE * @hba: adapter structure pointer * @update_kcqe: kcqe pointer * * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration */ static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba, struct iscsi_kcqe *update_kcqe) { struct bnx2i_conn *conn; u32 iscsi_cid; iscsi_cid = update_kcqe->iscsi_conn_id; conn = bnx2i_get_conn_from_id(hba, iscsi_cid); if (!conn) { printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid); return; } if (!conn->ep) { printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid); return; } if (update_kcqe->completion_status) { printk(KERN_ALERT "request failed cid %x\n", iscsi_cid); conn->ep->state = EP_STATE_ULP_UPDATE_FAILED; } else conn->ep->state = EP_STATE_ULP_UPDATE_COMPL; wake_up_interruptible(&conn->ep->ofld_wait); } /** * bnx2i_recovery_que_add_conn - add connection to recovery queue * @hba: adapter structure pointer * @bnx2i_conn: iscsi connection * * Add connection to recovery queue and schedule adapter eh worker */ static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_conn) { iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data, ISCSI_ERR_CONN_FAILED); } /** * bnx2i_process_tcp_error - process error notification on a given connection * * @hba: adapter structure pointer * @tcp_err: tcp error kcqe pointer * * handles tcp level error notifications from FW. */ static void bnx2i_process_tcp_error(struct bnx2i_hba *hba, struct iscsi_kcqe *tcp_err) { struct bnx2i_conn *bnx2i_conn; u32 iscsi_cid; iscsi_cid = tcp_err->iscsi_conn_id; bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); if (!bnx2i_conn) { printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid); return; } printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n", iscsi_cid, tcp_err->completion_status); bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); } /** * bnx2i_process_iscsi_error - process error notification on a given connection * @hba: adapter structure pointer * @iscsi_err: iscsi error kcqe pointer * * handles iscsi error notifications from the FW. Firmware based in initial * handshake classifies iscsi protocol / TCP rfc violation into either * warning or error indications. If indication is of "Error" type, driver * will initiate session recovery for that connection/session. For * "Warning" type indication, driver will put out a system log message * (there will be only one message for each type for the life of the * session, this is to avoid un-necessarily overloading the system) */ static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba, struct iscsi_kcqe *iscsi_err) { struct bnx2i_conn *bnx2i_conn; u32 iscsi_cid; const char *additional_notice = ""; const char *message; int need_recovery; u64 err_mask64; iscsi_cid = iscsi_err->iscsi_conn_id; bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); if (!bnx2i_conn) { printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid); return; } err_mask64 = (0x1ULL << iscsi_err->completion_status); if (err_mask64 & iscsi_error_mask) { need_recovery = 0; message = "iscsi_warning"; } else { need_recovery = 1; message = "iscsi_error"; } switch (iscsi_err->completion_status) { case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR: additional_notice = "hdr digest err"; break; case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR: additional_notice = "data digest err"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE: additional_notice = "wrong opcode rcvd"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN: additional_notice = "AHS len > 0 rcvd"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT: additional_notice = "invalid ITT rcvd"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN: additional_notice = "wrong StatSN rcvd"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN: additional_notice = "wrong DataSN rcvd"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T: additional_notice = "pend R2T violation"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0: additional_notice = "ERL0, UO"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1: additional_notice = "ERL0, U1"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2: additional_notice = "ERL0, U2"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3: additional_notice = "ERL0, U3"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4: additional_notice = "ERL0, U4"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5: additional_notice = "ERL0, U5"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6: additional_notice = "ERL0, U6"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN: additional_notice = "invalid resi len"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN: additional_notice = "MRDSL violation"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO: additional_notice = "F-bit not set"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV: additional_notice = "invalid TTT"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN: additional_notice = "invalid DataSN"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN: additional_notice = "burst len violation"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF: additional_notice = "buf offset violation"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN: additional_notice = "invalid LUN field"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN: additional_notice = "invalid R2TSN field"; break; #define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0: additional_notice = "invalid cmd len1"; break; #define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1: additional_notice = "invalid cmd len2"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED: additional_notice = "pend r2t exceeds MaxOutstandingR2T value"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV: additional_notice = "TTT is rsvd"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN: additional_notice = "MBL violation"; break; #define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO: additional_notice = "data seg len != 0"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN: additional_notice = "reject pdu len error"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN: additional_notice = "async pdu len error"; break; case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN: additional_notice = "nopin pdu len error"; break; #define BNX2_ERR_PEND_R2T_IN_CLEANUP \ ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP case BNX2_ERR_PEND_R2T_IN_CLEANUP: additional_notice = "pend r2t in cleanup"; break; case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT: additional_notice = "IP fragments rcvd"; break; case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS: additional_notice = "IP options error"; break; case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG: additional_notice = "urgent flag error"; break; default: printk(KERN_ALERT "iscsi_err - unknown err %x\n", iscsi_err->completion_status); } if (need_recovery) { iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, "bnx2i: %s - %s\n", message, additional_notice); iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, "conn_err - hostno %d conn %p, " "iscsi_cid %x cid %x\n", bnx2i_conn->hba->shost->host_no, bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid, bnx2i_conn->ep->ep_cid); bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); } else if (!test_and_set_bit(iscsi_err->completion_status, (void *) &bnx2i_conn->violation_notified)) iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, "bnx2i: %s - %s\n", message, additional_notice); } /** * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion * @hba: adapter structure pointer * @conn_destroy: conn destroy kcqe pointer * * handles connection destroy completion request. */ static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba, struct iscsi_kcqe *conn_destroy) { struct bnx2i_endpoint *ep; ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id); if (!ep) { printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending " "offload request, unexpected completion\n"); return; } if (hba != ep->hba) { printk(KERN_ALERT "conn destroy- error hba mismatch\n"); return; } if (conn_destroy->completion_status) { printk(KERN_ALERT "conn_destroy_cmpl: op failed\n"); ep->state = EP_STATE_CLEANUP_FAILED; } else ep->state = EP_STATE_CLEANUP_CMPL; wake_up_interruptible(&ep->ofld_wait); } /** * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion * @hba: adapter structure pointer * @ofld_kcqe: conn offload kcqe pointer * * handles initial connection offload completion, ep_connect() thread is * woken-up to continue with LLP connect process */ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba, struct iscsi_kcqe *ofld_kcqe) { u32 cid_addr; struct bnx2i_endpoint *ep; ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id); if (!ep) { printk(KERN_ALERT "ofld_cmpl: no pend offload request\n"); return; } if (hba != ep->hba) { printk(KERN_ALERT "ofld_cmpl: error hba mismatch\n"); return; } if (ofld_kcqe->completion_status) { ep->state = EP_STATE_OFLD_FAILED; if (ofld_kcqe->completion_status == ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - unable " "to allocate iSCSI context resources\n", hba->netdev->name); else if (ofld_kcqe->completion_status == ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE) printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid " "opcode\n", hba->netdev->name); else if (ofld_kcqe->completion_status == ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY) /* error status code valid only for 5771x chipset */ ep->state = EP_STATE_OFLD_FAILED_CID_BUSY; else printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid " "error code %d\n", hba->netdev->name, ofld_kcqe->completion_status); } else { ep->state = EP_STATE_OFLD_COMPL; cid_addr = ofld_kcqe->iscsi_conn_context_id; ep->ep_cid = cid_addr; ep->qp.ctx_base = NULL; } wake_up_interruptible(&ep->ofld_wait); } /** * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE * @context: adapter structure pointer * @kcqe: kcqe pointer * @num_cqe: number of kcqes to process * * Generic KCQ event handler/dispatcher */ static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[], u32 num_cqe) { struct bnx2i_hba *hba = context; int i = 0; struct iscsi_kcqe *ikcqe = NULL; while (i < num_cqe) { ikcqe = (struct iscsi_kcqe *) kcqe[i++]; if (ikcqe->op_code == ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION) bnx2i_fastpath_notification(hba, ikcqe); else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN) bnx2i_process_ofld_cmpl(hba, ikcqe); else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN) bnx2i_process_update_conn_cmpl(hba, ikcqe); else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) { if (ikcqe->completion_status != ISCSI_KCQE_COMPLETION_STATUS_SUCCESS) bnx2i_iscsi_license_error(hba, ikcqe->\ completion_status); else { set_bit(ADAPTER_STATE_UP, &hba->adapter_state); bnx2i_get_link_state(hba); printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: " "ISCSI_INIT passed\n", (u8)hba->pcidev->bus->number, hba->pci_devno, (u8)hba->pci_func); } } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN) bnx2i_process_conn_destroy_cmpl(hba, ikcqe); else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR) bnx2i_process_iscsi_error(hba, ikcqe); else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR) bnx2i_process_tcp_error(hba, ikcqe); else printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", ikcqe->op_code); } } /** * bnx2i_indicate_netevent - Generic netdev event handler * @context: adapter structure pointer * @event: event type * @vlan_id: vlans id - associated vlan id with this event * * Handles four netdev events, NETDEV_UP, NETDEV_DOWN, * NETDEV_GOING_DOWN and NETDEV_CHANGE */ static void bnx2i_indicate_netevent(void *context, unsigned long event, u16 vlan_id) { struct bnx2i_hba *hba = context; /* Ignore all netevent coming from vlans */ if (vlan_id != 0) return; switch (event) { case NETDEV_UP: if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) bnx2i_send_fw_iscsi_init_msg(hba); break; case NETDEV_DOWN: clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); break; case NETDEV_GOING_DOWN: set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); iscsi_host_for_each_session(hba->shost, bnx2i_drop_session); break; case NETDEV_CHANGE: bnx2i_get_link_state(hba); break; default: ; } } /** * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion * @cm_sk: cnic sock structure pointer * * function callback exported via bnx2i - cnic driver interface to * indicate completion of option-2 TCP connect request. */ static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk) { struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) ep->state = EP_STATE_CONNECT_FAILED; else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags)) ep->state = EP_STATE_CONNECT_COMPL; else ep->state = EP_STATE_CONNECT_FAILED; wake_up_interruptible(&ep->ofld_wait); } /** * bnx2i_cm_close_cmpl - process tcp conn close completion * @cm_sk: cnic sock structure pointer * * function callback exported via bnx2i - cnic driver interface to * indicate completion of option-2 graceful TCP connect shutdown */ static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk) { struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; ep->state = EP_STATE_DISCONN_COMPL; wake_up_interruptible(&ep->ofld_wait); } /** * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion * @cm_sk: cnic sock structure pointer * * function callback exported via bnx2i - cnic driver interface to * indicate completion of option-2 abortive TCP connect termination */ static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk) { struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; ep->state = EP_STATE_DISCONN_COMPL; wake_up_interruptible(&ep->ofld_wait); } /** * bnx2i_cm_remote_close - process received TCP FIN * @cm_sk: cnic sock structure pointer * * function callback exported via bnx2i - cnic driver interface to indicate * async TCP events such as FIN */ static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk) { struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; ep->state = EP_STATE_TCP_FIN_RCVD; if (ep->conn) bnx2i_recovery_que_add_conn(ep->hba, ep->conn); } /** * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup * @cm_sk: cnic sock structure pointer * * function callback exported via bnx2i - cnic driver interface to * indicate async TCP events (RST) sent by the peer. */ static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk) { struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; u32 old_state = ep->state; ep->state = EP_STATE_TCP_RST_RCVD; if (old_state == EP_STATE_DISCONN_START) wake_up_interruptible(&ep->ofld_wait); else if (ep->conn) bnx2i_recovery_que_add_conn(ep->hba, ep->conn); } static int bnx2i_send_nl_mesg(void *context, u32 msg_type, char *buf, u16 buflen) { struct bnx2i_hba *hba = context; int rc; if (!hba) return -ENODEV; rc = iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport, msg_type, buf, buflen); if (rc) printk(KERN_ALERT "bnx2i: private nl message send error\n"); return rc; } /* * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure * carrying callback function pointers */ struct cnic_ulp_ops bnx2i_cnic_cb = { .cnic_init = bnx2i_ulp_init, .cnic_exit = bnx2i_ulp_exit, .cnic_start = bnx2i_start, .cnic_stop = bnx2i_stop, .indicate_kcqes = bnx2i_indicate_kcqe, .indicate_netevent = bnx2i_indicate_netevent, .cm_connect_complete = bnx2i_cm_connect_cmpl, .cm_close_complete = bnx2i_cm_close_cmpl, .cm_abort_complete = bnx2i_cm_abort_cmpl, .cm_remote_close = bnx2i_cm_remote_close, .cm_remote_abort = bnx2i_cm_remote_abort, .iscsi_nl_send_msg = bnx2i_send_nl_mesg, .cnic_get_stats = bnx2i_get_stats, .owner = THIS_MODULE }; /** * bnx2i_map_ep_dbell_regs - map connection doorbell registers * @ep: bnx2i endpoint * * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these * register in BAR #0. Whereas in 57710 these register are accessed by * mapping BAR #1 */ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) { u32 cid_num; u32 reg_off; u32 first_l4l5; u32 ctx_sz; u32 config2; resource_size_t reg_base; cid_num = bnx2i_get_cid_num(ep); if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { reg_base = pci_resource_start(ep->hba->pcidev, BNX2X_DOORBELL_PCI_BAR); reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF); ep->qp.ctx_base = ioremap(reg_base + reg_off, 4); if (!ep->qp.ctx_base) return -ENOMEM; goto arm_cq; } if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) && (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) { config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2); first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5; ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3; if (ctx_sz) reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE + BNX2I_570X_PAGE_SIZE_DEFAULT * (((cid_num - first_l4l5) / ctx_sz) + 256); else reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); } else /* 5709 device in normal node and 5706/5708 devices */ reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); ep->qp.ctx_base = ioremap(ep->hba->reg_base + reg_off, MB_KERNEL_CTX_SIZE); if (!ep->qp.ctx_base) return -ENOMEM; arm_cq: bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE); return 0; }
linux-master
drivers/scsi/bnx2i/bnx2i_hwi.c
/* bnx2i_sysfs.c: QLogic NetXtreme II iSCSI driver. * * Copyright (c) 2004 - 2013 Broadcom Corporation * Copyright (c) 2014, QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Anil Veerabhadrappa ([email protected]) * Previously Maintained by: Eddie Wai ([email protected]) * Maintained by: [email protected] */ #include "bnx2i.h" /** * bnx2i_dev_to_hba - maps dev pointer to adapter struct * @dev: device pointer * * Map device to hba structure */ static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev) { struct Scsi_Host *shost = class_to_shost(dev); return iscsi_host_priv(shost); } /** * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size * @dev: device pointer * @attr: device attribute (unused) * @buf: buffer to return current SQ size parameter * * Returns current SQ size parameter, this paramater determines the number * outstanding iSCSI commands supported on a connection */ static ssize_t bnx2i_show_sq_info(struct device *dev, struct device_attribute *attr, char *buf) { struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); return sprintf(buf, "0x%x\n", hba->max_sqes); } /** * bnx2i_set_sq_info - update send queue (SQ) size parameter * @dev: device pointer * @attr: device attribute (unused) * @buf: buffer to return current SQ size parameter * @count: parameter buffer size * * Interface for user to change shared queue size allocated for each conn * Must be within SQ limits and a power of 2. For the latter this is needed * because of how libiscsi preallocates tasks. */ static ssize_t bnx2i_set_sq_info(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); u32 val; int max_sq_size; if (hba->ofld_conns_active) goto skip_config; if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) max_sq_size = BNX2I_5770X_SQ_WQES_MAX; else max_sq_size = BNX2I_570X_SQ_WQES_MAX; if (sscanf(buf, " 0x%x ", &val) > 0) { if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) && (is_power_of_2(val))) hba->max_sqes = val; } return count; skip_config: printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n"); return 0; } /** * bnx2i_show_ccell_info - returns command cell (HQ) size * @dev: device pointer * @attr: device attribute (unused) * @buf: buffer to return current SQ size parameter * * returns per-connection TCP history queue size parameter */ static ssize_t bnx2i_show_ccell_info(struct device *dev, struct device_attribute *attr, char *buf) { struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); return sprintf(buf, "0x%x\n", hba->num_ccell); } /** * bnx2i_set_ccell_info - set command cell (HQ) size * @dev: device pointer * @attr: device attribute (unused) * @buf: buffer to return current SQ size parameter * @count: parameter buffer size * * updates per-connection TCP history queue size parameter */ static ssize_t bnx2i_set_ccell_info(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u32 val; struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); if (hba->ofld_conns_active) goto skip_config; if (sscanf(buf, " 0x%x ", &val) > 0) { if ((val >= BNX2I_CCELLS_MIN) && (val <= BNX2I_CCELLS_MAX)) { hba->num_ccell = val; } } return count; skip_config: printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n"); return 0; } static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR, bnx2i_show_sq_info, bnx2i_set_sq_info); static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR, bnx2i_show_ccell_info, bnx2i_set_ccell_info); static struct attribute *bnx2i_dev_attributes[] = { &dev_attr_sq_size.attr, &dev_attr_num_ccell.attr, NULL }; static const struct attribute_group bnx2i_dev_attr_group = { .attrs = bnx2i_dev_attributes }; const struct attribute_group *bnx2i_dev_groups[] = { &bnx2i_dev_attr_group, NULL };
linux-master
drivers/scsi/bnx2i/bnx2i_sysfs.c
/* * bnx2i_iscsi.c: QLogic NetXtreme II iSCSI driver. * * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * Copyright (c) 2014, QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Anil Veerabhadrappa ([email protected]) * Previously Maintained by: Eddie Wai ([email protected]) * Maintained by: [email protected] */ #include <linux/slab.h> #include <scsi/scsi_tcq.h> #include <scsi/libiscsi.h> #include "bnx2i.h" struct scsi_transport_template *bnx2i_scsi_xport_template; struct iscsi_transport bnx2i_iscsi_transport; static const struct scsi_host_template bnx2i_host_template; /* * Global endpoint resource info */ static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */ DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); static int bnx2i_adapter_ready(struct bnx2i_hba *hba) { int retval = 0; if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) retval = -EPERM; return retval; } /** * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks * @cmd: iscsi cmd struct pointer * @buf_off: absolute buffer offset * @start_bd_off: u32 pointer to return the offset within the BD * indicated by 'start_bd_idx' on which 'buf_off' falls * @start_bd_idx: index of the BD on which 'buf_off' falls * * identifies & marks various bd info for scsi command's imm data, * unsolicited data and the first solicited data seq. */ static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off, u32 *start_bd_off, u32 *start_bd_idx) { struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl; u32 cur_offset = 0; u32 cur_bd_idx = 0; if (buf_off) { while (buf_off >= (cur_offset + bd_tbl->buffer_length)) { cur_offset += bd_tbl->buffer_length; cur_bd_idx++; bd_tbl++; } } *start_bd_off = buf_off - cur_offset; *start_bd_idx = cur_bd_idx; } /** * bnx2i_setup_write_cmd_bd_info - sets up BD various information * @task: transport layer's cmd struct pointer * * identifies & marks various bd info for scsi command's immediate data, * unsolicited data and first solicited data seq which includes BD start * index & BD buf off. his function takes into account iscsi parameter such * as immediate data and unsolicited data is support on this connection. */ static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task) { struct bnx2i_cmd *cmd = task->dd_data; u32 start_bd_offset; u32 start_bd_idx; u32 buffer_offset = 0; u32 cmd_len = cmd->req.total_data_transfer_length; /* if ImmediateData is turned off & IntialR2T is turned on, * there will be no immediate or unsolicited data, just return. */ if (!iscsi_task_has_unsol_data(task) && !task->imm_count) return; /* Immediate data */ buffer_offset += task->imm_count; if (task->imm_count == cmd_len) return; if (iscsi_task_has_unsol_data(task)) { bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, &start_bd_offset, &start_bd_idx); cmd->req.ud_buffer_offset = start_bd_offset; cmd->req.ud_start_bd_index = start_bd_idx; buffer_offset += task->unsol_r2t.data_length; } if (buffer_offset != cmd_len) { bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, &start_bd_offset, &start_bd_idx); if ((start_bd_offset > task->conn->session->first_burst) || (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) { int i = 0; iscsi_conn_printk(KERN_ALERT, task->conn, "bnx2i- error, buf offset 0x%x " "bd_valid %d use_sg %d\n", buffer_offset, cmd->io_tbl.bd_valid, scsi_sg_count(cmd->scsi_cmd)); for (i = 0; i < cmd->io_tbl.bd_valid; i++) iscsi_conn_printk(KERN_ALERT, task->conn, "bnx2i err, bd[%d]: len %x\n", i, cmd->io_tbl.bd_tbl[i].\ buffer_length); } cmd->req.sd_buffer_offset = start_bd_offset; cmd->req.sd_start_bd_index = start_bd_idx; } } /** * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table * @hba: adapter instance * @cmd: iscsi cmd struct pointer * * map SG list */ static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) { struct scsi_cmnd *sc = cmd->scsi_cmd; struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; struct scatterlist *sg; int byte_count = 0; int bd_count = 0; int sg_count; int sg_len; u64 addr; int i; BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD); sg_count = scsi_dma_map(sc); scsi_for_each_sg(sc, sg, sg_count, i) { sg_len = sg_dma_len(sg); addr = (u64) sg_dma_address(sg); bd[bd_count].buffer_addr_lo = addr & 0xffffffff; bd[bd_count].buffer_addr_hi = addr >> 32; bd[bd_count].buffer_length = sg_len; bd[bd_count].flags = 0; if (bd_count == 0) bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN; byte_count += sg_len; bd_count++; } if (bd_count) bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN; BUG_ON(byte_count != scsi_bufflen(sc)); return bd_count; } /** * bnx2i_iscsi_map_sg_list - maps SG list * @cmd: iscsi cmd struct pointer * * creates BD list table for the command */ static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd) { int bd_count; bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd); if (!bd_count) { struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0; bd[0].buffer_length = bd[0].flags = 0; } cmd->io_tbl.bd_valid = bd_count; } /** * bnx2i_iscsi_unmap_sg_list - unmaps SG list * @cmd: iscsi cmd struct pointer * * unmap IO buffers and invalidate the BD table */ void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd) { struct scsi_cmnd *sc = cmd->scsi_cmd; if (cmd->io_tbl.bd_valid && sc) { scsi_dma_unmap(sc); cmd->io_tbl.bd_valid = 0; } } static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd) { memset(&cmd->req, 0x00, sizeof(cmd->req)); cmd->req.op_code = 0xFF; cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma; cmd->req.bd_list_addr_hi = (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32); } /** * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid' * @hba: pointer to adapter instance * @bnx2i_conn: pointer to iscsi connection * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) * * update iscsi cid table entry with connection pointer. This enables * driver to quickly get hold of connection structure pointer in * completion/interrupt thread using iscsi context ID */ static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_conn, u32 iscsi_cid) { if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) { iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, "conn bind - entry #%d not free\n", iscsi_cid); return -EBUSY; } hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn; return 0; } /** * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr * @hba: pointer to adapter instance * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) */ struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, u16 iscsi_cid) { if (!hba->cid_que.conn_cid_tbl) { printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n"); return NULL; } else if (iscsi_cid >= hba->max_active_conns) { printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid); return NULL; } return hba->cid_que.conn_cid_tbl[iscsi_cid]; } /** * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool * @hba: pointer to adapter instance */ static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba) { int idx; if (!hba->cid_que.cid_free_cnt) return -1; idx = hba->cid_que.cid_q_cons_idx; hba->cid_que.cid_q_cons_idx++; if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx) hba->cid_que.cid_q_cons_idx = 0; hba->cid_que.cid_free_cnt--; return hba->cid_que.cid_que[idx]; } /** * bnx2i_free_iscsi_cid - returns tcp port to free list * @hba: pointer to adapter instance * @iscsi_cid: iscsi context ID to free */ static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid) { int idx; if (iscsi_cid == (u16) -1) return; hba->cid_que.cid_free_cnt++; idx = hba->cid_que.cid_q_prod_idx; hba->cid_que.cid_que[idx] = iscsi_cid; hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL; hba->cid_que.cid_q_prod_idx++; if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx) hba->cid_que.cid_q_prod_idx = 0; } /** * bnx2i_setup_free_cid_que - sets up free iscsi cid queue * @hba: pointer to adapter instance * * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table, * and initialize table attributes */ static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba) { int mem_size; int i; mem_size = hba->max_active_conns * sizeof(u32); mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL); if (!hba->cid_que.cid_que_base) return -ENOMEM; mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *); mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL); if (!hba->cid_que.conn_cid_tbl) { kfree(hba->cid_que.cid_que_base); hba->cid_que.cid_que_base = NULL; return -ENOMEM; } hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base; hba->cid_que.cid_q_prod_idx = 0; hba->cid_que.cid_q_cons_idx = 0; hba->cid_que.cid_q_max_idx = hba->max_active_conns; hba->cid_que.cid_free_cnt = hba->max_active_conns; for (i = 0; i < hba->max_active_conns; i++) { hba->cid_que.cid_que[i] = i; hba->cid_que.conn_cid_tbl[i] = NULL; } return 0; } /** * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources * @hba: pointer to adapter instance */ static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba) { kfree(hba->cid_que.cid_que_base); hba->cid_que.cid_que_base = NULL; kfree(hba->cid_que.conn_cid_tbl); hba->cid_que.conn_cid_tbl = NULL; } /** * bnx2i_alloc_ep - allocates ep structure from global pool * @hba: pointer to adapter instance * * routine allocates a free endpoint structure from global pool and * a tcp port to be used for this connection. Global resource lock, * 'bnx2i_resc_lock' is held while accessing shared global data structures */ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba) { struct iscsi_endpoint *ep; struct bnx2i_endpoint *bnx2i_ep; u32 ec_div; ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); if (!ep) { printk(KERN_ERR "bnx2i: Could not allocate ep\n"); return NULL; } bnx2i_ep = ep->dd_data; bnx2i_ep->cls_ep = ep; INIT_LIST_HEAD(&bnx2i_ep->link); bnx2i_ep->state = EP_STATE_IDLE; bnx2i_ep->ep_iscsi_cid = (u16) -1; bnx2i_ep->hba = hba; bnx2i_ep->hba_age = hba->age; ec_div = event_coal_div; while (ec_div >>= 1) bnx2i_ep->ec_shift += 1; hba->ofld_conns_active++; init_waitqueue_head(&bnx2i_ep->ofld_wait); return ep; } /** * bnx2i_free_ep - free endpoint * @ep: pointer to iscsi endpoint structure */ static void bnx2i_free_ep(struct iscsi_endpoint *ep) { struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; unsigned long flags; spin_lock_irqsave(&bnx2i_resc_lock, flags); bnx2i_ep->state = EP_STATE_IDLE; bnx2i_ep->hba->ofld_conns_active--; if (bnx2i_ep->ep_iscsi_cid != (u16) -1) bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); if (bnx2i_ep->conn) { bnx2i_ep->conn->ep = NULL; bnx2i_ep->conn = NULL; } bnx2i_ep->hba = NULL; spin_unlock_irqrestore(&bnx2i_resc_lock, flags); iscsi_destroy_endpoint(ep); } /** * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command * @hba: adapter instance pointer * @session: iscsi session pointer * @cmd: iscsi command structure */ static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session, struct bnx2i_cmd *cmd) { struct io_bdt *io = &cmd->io_tbl; struct iscsi_bd *bd; io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, ISCSI_MAX_BDS_PER_CMD * sizeof(*bd), &io->bd_tbl_dma, GFP_KERNEL); if (!io->bd_tbl) { iscsi_session_printk(KERN_ERR, session, "Could not " "allocate bdt.\n"); return -ENOMEM; } io->bd_valid = 0; return 0; } /** * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table * @hba: adapter instance pointer * @session: iscsi session pointer */ static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba, struct iscsi_session *session) { int i; for (i = 0; i < session->cmds_max; i++) { struct iscsi_task *task = session->cmds[i]; struct bnx2i_cmd *cmd = task->dd_data; if (cmd->io_tbl.bd_tbl) dma_free_coherent(&hba->pcidev->dev, ISCSI_MAX_BDS_PER_CMD * sizeof(struct iscsi_bd), cmd->io_tbl.bd_tbl, cmd->io_tbl.bd_tbl_dma); } } /** * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session * @hba: adapter instance pointer * @session: iscsi session pointer */ static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba, struct iscsi_session *session) { int i; for (i = 0; i < session->cmds_max; i++) { struct iscsi_task *task = session->cmds[i]; struct bnx2i_cmd *cmd = task->dd_data; task->hdr = &cmd->hdr; task->hdr_max = sizeof(struct iscsi_hdr); if (bnx2i_alloc_bdt(hba, session, cmd)) goto free_bdts; } return 0; free_bdts: bnx2i_destroy_cmd_pool(hba, session); return -ENOMEM; } /** * bnx2i_setup_mp_bdt - allocate BD table resources * @hba: pointer to adapter structure * * Allocate memory for dummy buffer and associated BD * table to be used by middle path (MP) requests */ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) { int rc = 0; struct iscsi_bd *mp_bdt; u64 addr; hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &hba->mp_bd_dma, GFP_KERNEL); if (!hba->mp_bd_tbl) { printk(KERN_ERR "unable to allocate Middle Path BDT\n"); rc = -1; goto out; } hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &hba->dummy_buf_dma, GFP_KERNEL); if (!hba->dummy_buffer) { printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba->mp_bd_tbl, hba->mp_bd_dma); hba->mp_bd_tbl = NULL; rc = -1; goto out; } mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl; addr = (unsigned long) hba->dummy_buf_dma; mp_bdt->buffer_addr_lo = addr & 0xffffffff; mp_bdt->buffer_addr_hi = addr >> 32; mp_bdt->buffer_length = CNIC_PAGE_SIZE; mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | ISCSI_BD_FIRST_IN_BD_CHAIN; out: return rc; } /** * bnx2i_free_mp_bdt - releases ITT back to free pool * @hba: pointer to adapter instance * * free MP dummy buffer and associated BD table */ static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) { if (hba->mp_bd_tbl) { dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba->mp_bd_tbl, hba->mp_bd_dma); hba->mp_bd_tbl = NULL; } if (hba->dummy_buffer) { dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba->dummy_buffer, hba->dummy_buf_dma); hba->dummy_buffer = NULL; } return; } /** * bnx2i_drop_session - notifies iscsid of connection error. * @cls_session: iscsi cls session pointer * * This notifies iscsid that there is a error, so it can initiate * recovery. * * This relies on caller using the iscsi class iterator so the object * is refcounted and does not disapper from under us. */ void bnx2i_drop_session(struct iscsi_cls_session *cls_session) { iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); } /** * bnx2i_ep_destroy_list_add - add an entry to EP destroy list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport identifier) structure * * EP destroy queue manager */ static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_add_tail(&ep->link, &hba->ep_destroy_list); write_unlock_bh(&hba->ep_rdwr_lock); return 0; } /** * bnx2i_ep_destroy_list_del - add an entry to EP destroy list * * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport identifier) structure * * EP destroy queue manager */ static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_del_init(&ep->link); write_unlock_bh(&hba->ep_rdwr_lock); return 0; } /** * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport identifier) structure * * pending conn offload completion queue manager */ static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_add_tail(&ep->link, &hba->ep_ofld_list); write_unlock_bh(&hba->ep_rdwr_lock); return 0; } /** * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport identifier) structure * * pending conn offload completion queue manager */ static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_del_init(&ep->link); write_unlock_bh(&hba->ep_rdwr_lock); return 0; } /** * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints * * @hba: pointer to adapter instance * @iscsi_cid: iscsi context ID to find * */ struct bnx2i_endpoint * bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) { struct list_head *list; struct list_head *tmp; struct bnx2i_endpoint *ep = NULL; read_lock_bh(&hba->ep_rdwr_lock); list_for_each_safe(list, tmp, &hba->ep_ofld_list) { ep = (struct bnx2i_endpoint *)list; if (ep->ep_iscsi_cid == iscsi_cid) break; ep = NULL; } read_unlock_bh(&hba->ep_rdwr_lock); if (!ep) printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); return ep; } /** * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list * @hba: pointer to adapter instance * @iscsi_cid: iscsi context ID to find * */ struct bnx2i_endpoint * bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) { struct list_head *list; struct list_head *tmp; struct bnx2i_endpoint *ep = NULL; read_lock_bh(&hba->ep_rdwr_lock); list_for_each_safe(list, tmp, &hba->ep_destroy_list) { ep = (struct bnx2i_endpoint *)list; if (ep->ep_iscsi_cid == iscsi_cid) break; ep = NULL; } read_unlock_bh(&hba->ep_rdwr_lock); if (!ep) printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); return ep; } /** * bnx2i_ep_active_list_add - add an entry to ep active list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport identifier) structure * * current active conn queue manager */ static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_add_tail(&ep->link, &hba->ep_active_list); write_unlock_bh(&hba->ep_rdwr_lock); } /** * bnx2i_ep_active_list_del - deletes an entry to ep active list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport identifier) structure * * current active conn queue manager */ static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_del_init(&ep->link); write_unlock_bh(&hba->ep_rdwr_lock); } /** * bnx2i_setup_host_queue_size - assigns shost->can_queue param * @hba: pointer to adapter instance * @shost: scsi host pointer * * Initializes 'can_queue' parameter based on how many outstanding commands * the device can handle. Each device 5708/5709/57710 has different * capabilities */ static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba, struct Scsi_Host *shost) { if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type)) shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709; else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710; else shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; } /** * bnx2i_alloc_hba - allocate and init adapter instance * @cnic: cnic device pointer * * allocate & initialize adapter structure and call other * support routines to do per adapter initialization */ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) { struct Scsi_Host *shost; struct bnx2i_hba *hba; shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0); if (!shost) return NULL; shost->dma_boundary = cnic->pcidev->dma_mask; shost->transportt = bnx2i_scsi_xport_template; shost->max_id = ISCSI_MAX_CONNS_PER_HBA - 1; shost->max_channel = 0; shost->max_lun = 512; shost->max_cmd_len = 16; hba = iscsi_host_priv(shost); hba->shost = shost; hba->netdev = cnic->netdev; /* Get PCI related information and update hba struct members */ hba->pcidev = cnic->pcidev; pci_dev_get(hba->pcidev); hba->pci_did = hba->pcidev->device; hba->pci_vid = hba->pcidev->vendor; hba->pci_sdid = hba->pcidev->subsystem_device; hba->pci_svid = hba->pcidev->subsystem_vendor; hba->pci_func = PCI_FUNC(hba->pcidev->devfn); hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); bnx2i_identify_device(hba, cnic); bnx2i_setup_host_queue_size(hba, shost); hba->reg_base = pci_resource_start(hba->pcidev, 0); if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2); if (!hba->regview) goto ioreg_map_err; } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { hba->regview = pci_iomap(hba->pcidev, 0, 4096); if (!hba->regview) goto ioreg_map_err; } if (bnx2i_setup_mp_bdt(hba)) goto mp_bdt_mem_err; INIT_LIST_HEAD(&hba->ep_ofld_list); INIT_LIST_HEAD(&hba->ep_active_list); INIT_LIST_HEAD(&hba->ep_destroy_list); rwlock_init(&hba->ep_rdwr_lock); hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED; /* different values for 5708/5709/57710 */ hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA; if (bnx2i_setup_free_cid_que(hba)) goto cid_que_err; /* SQ/RQ/CQ size can be changed via sysfx interface */ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX) hba->max_sqes = sq_size; else hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT; } else { /* 5706/5708/5709 */ if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX) hba->max_sqes = sq_size; else hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT; } hba->max_rqes = rq_size; hba->max_cqes = hba->max_sqes + rq_size; if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX) hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX; } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX) hba->max_cqes = BNX2I_570X_CQ_WQES_MAX; hba->num_ccell = hba->max_sqes / 2; spin_lock_init(&hba->lock); mutex_init(&hba->net_dev_lock); init_waitqueue_head(&hba->eh_wait); if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { hba->hba_shutdown_tmo = 30 * HZ; hba->conn_teardown_tmo = 20 * HZ; hba->conn_ctx_destroy_tmo = 6 * HZ; } else { /* 5706/5708/5709 */ hba->hba_shutdown_tmo = 20 * HZ; hba->conn_teardown_tmo = 10 * HZ; hba->conn_ctx_destroy_tmo = 2 * HZ; } #ifdef CONFIG_32BIT spin_lock_init(&hba->stat_lock); #endif memset(&hba->stats, 0, sizeof(struct iscsi_stats_info)); if (iscsi_host_add(shost, &hba->pcidev->dev)) goto free_dump_mem; return hba; free_dump_mem: bnx2i_release_free_cid_que(hba); cid_que_err: bnx2i_free_mp_bdt(hba); mp_bdt_mem_err: if (hba->regview) { pci_iounmap(hba->pcidev, hba->regview); hba->regview = NULL; } ioreg_map_err: pci_dev_put(hba->pcidev); scsi_host_put(shost); return NULL; } /** * bnx2i_free_hba- releases hba structure and resources held by the adapter * @hba: pointer to adapter instance * * free adapter structure and call various cleanup routines. */ void bnx2i_free_hba(struct bnx2i_hba *hba) { struct Scsi_Host *shost = hba->shost; iscsi_host_remove(shost, false); INIT_LIST_HEAD(&hba->ep_ofld_list); INIT_LIST_HEAD(&hba->ep_active_list); INIT_LIST_HEAD(&hba->ep_destroy_list); if (hba->regview) { pci_iounmap(hba->pcidev, hba->regview); hba->regview = NULL; } pci_dev_put(hba->pcidev); bnx2i_free_mp_bdt(hba); bnx2i_release_free_cid_que(hba); iscsi_host_free(shost); } /** * bnx2i_conn_free_login_resources - free DMA resources used for login process * @hba: pointer to adapter instance * @bnx2i_conn: iscsi connection pointer * * Login related resources, mostly BDT & payload DMA memory is freed */ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_conn) { if (bnx2i_conn->gen_pdu.resp_bd_tbl) { dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, bnx2i_conn->gen_pdu.resp_bd_tbl, bnx2i_conn->gen_pdu.resp_bd_dma); bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; } if (bnx2i_conn->gen_pdu.req_bd_tbl) { dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, bnx2i_conn->gen_pdu.req_bd_tbl, bnx2i_conn->gen_pdu.req_bd_dma); bnx2i_conn->gen_pdu.req_bd_tbl = NULL; } if (bnx2i_conn->gen_pdu.resp_buf) { dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, bnx2i_conn->gen_pdu.resp_buf, bnx2i_conn->gen_pdu.resp_dma_addr); bnx2i_conn->gen_pdu.resp_buf = NULL; } if (bnx2i_conn->gen_pdu.req_buf) { dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, bnx2i_conn->gen_pdu.req_buf, bnx2i_conn->gen_pdu.req_dma_addr); bnx2i_conn->gen_pdu.req_buf = NULL; } } /** * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop. * @hba: pointer to adapter instance * @bnx2i_conn: iscsi connection pointer * * Mgmt task DNA resources are allocated in this routine. */ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_conn) { /* Allocate memory for login request/response buffers */ bnx2i_conn->gen_pdu.req_buf = dma_alloc_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, &bnx2i_conn->gen_pdu.req_dma_addr, GFP_KERNEL); if (bnx2i_conn->gen_pdu.req_buf == NULL) goto login_req_buf_failure; bnx2i_conn->gen_pdu.req_buf_size = 0; bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf; bnx2i_conn->gen_pdu.resp_buf = dma_alloc_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, &bnx2i_conn->gen_pdu.resp_dma_addr, GFP_KERNEL); if (bnx2i_conn->gen_pdu.resp_buf == NULL) goto login_resp_buf_failure; bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; bnx2i_conn->gen_pdu.req_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) goto login_req_bd_tbl_failure; bnx2i_conn->gen_pdu.resp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &bnx2i_conn->gen_pdu.resp_bd_dma, GFP_KERNEL); if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) goto login_resp_bd_tbl_failure; return 0; login_resp_bd_tbl_failure: dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, bnx2i_conn->gen_pdu.req_bd_tbl, bnx2i_conn->gen_pdu.req_bd_dma); bnx2i_conn->gen_pdu.req_bd_tbl = NULL; login_req_bd_tbl_failure: dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, bnx2i_conn->gen_pdu.resp_buf, bnx2i_conn->gen_pdu.resp_dma_addr); bnx2i_conn->gen_pdu.resp_buf = NULL; login_resp_buf_failure: dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, bnx2i_conn->gen_pdu.req_buf, bnx2i_conn->gen_pdu.req_dma_addr); bnx2i_conn->gen_pdu.req_buf = NULL; login_req_buf_failure: iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data, "login resource alloc failed!!\n"); return -ENOMEM; } /** * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table. * @bnx2i_conn: iscsi connection pointer * * Allocates buffers and BD tables before shipping requests to cnic * for PDUs prepared by 'iscsid' daemon */ static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn) { struct iscsi_bd *bd_tbl; bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl; bd_tbl->buffer_addr_hi = (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32); bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr; bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr - bnx2i_conn->gen_pdu.req_buf; bd_tbl->reserved0 = 0; bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | ISCSI_BD_FIRST_IN_BD_CHAIN; bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl; bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32; bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr; bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN; bd_tbl->reserved0 = 0; bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | ISCSI_BD_FIRST_IN_BD_CHAIN; } /** * bnx2i_iscsi_send_generic_request - called to send mgmt tasks. * @task: transport layer task pointer * * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login, * Nop-out and Logout requests flow through this path. */ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) { struct bnx2i_cmd *cmd = task->dd_data; struct bnx2i_conn *bnx2i_conn = cmd->conn; int rc = 0; char *buf; int data_len; bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn); switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_LOGIN: bnx2i_send_iscsi_login(bnx2i_conn, task); break; case ISCSI_OP_NOOP_OUT: data_len = bnx2i_conn->gen_pdu.req_buf_size; buf = bnx2i_conn->gen_pdu.req_buf; if (data_len) rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, buf, data_len, 1); else rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, NULL, 0, 1); break; case ISCSI_OP_LOGOUT: rc = bnx2i_send_iscsi_logout(bnx2i_conn, task); break; case ISCSI_OP_SCSI_TMFUNC: rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); break; case ISCSI_OP_TEXT: rc = bnx2i_send_iscsi_text(bnx2i_conn, task); break; default: iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, "send_gen: unsupported op 0x%x\n", task->hdr->opcode); } return rc; } /********************************************************************** * SCSI-ML Interface **********************************************************************/ /** * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe * @sc: SCSI-ML command pointer * @cmd: iscsi cmd pointer */ static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd) { u32 dword; int lpcnt; u8 *srcp; u32 *dstp; u32 scsi_lun[2]; int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun); cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]); cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]); lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword); srcp = (u8 *) sc->cmnd; dstp = (u32 *) cmd->req.cdb; while (lpcnt--) { memcpy(&dword, (const void *) srcp, 4); *dstp = cpu_to_be32(dword); srcp += 4; dstp++; } if (sc->cmd_len & 0x3) { dword = (u32) srcp[0] | ((u32) srcp[1] << 8); *dstp = cpu_to_be32(dword); } } static void bnx2i_cleanup_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct bnx2i_hba *hba = bnx2i_conn->hba; /* * mgmt task or cmd was never sent to us to transmit. */ if (!task->sc || task->state == ISCSI_TASK_PENDING) return; /* * need to clean-up task context to claim dma buffers */ if (task->state == ISCSI_TASK_ABRT_TMF) { bnx2i_send_cmd_cleanup_req(hba, task->dd_data); spin_unlock_bh(&conn->session->back_lock); wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); spin_lock_bh(&conn->session->back_lock); } bnx2i_iscsi_unmap_sg_list(task->dd_data); } /** * bnx2i_mtask_xmit - transmit mtask to chip for further processing * @conn: transport layer conn structure pointer * @task: transport layer command structure pointer */ static int bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) { struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct bnx2i_hba *hba = bnx2i_conn->hba; struct bnx2i_cmd *cmd = task->dd_data; memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); bnx2i_setup_cmd_wqe_template(cmd); bnx2i_conn->gen_pdu.req_buf_size = task->data_count; /* Tx PDU/data length count */ ADD_STATS_64(hba, tx_pdus, 1); ADD_STATS_64(hba, tx_bytes, task->data_count); if (task->data_count) { memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, task->data_count); bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf + task->data_count; } cmd->conn = conn->dd_data; cmd->scsi_cmd = NULL; return bnx2i_iscsi_send_generic_request(task); } /** * bnx2i_task_xmit - transmit iscsi command to chip for further processing * @task: transport layer command structure pointer * * maps SG buffers and send request to chip/firmware in the form of SQ WQE */ static int bnx2i_task_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); struct bnx2i_hba *hba = iscsi_host_priv(shost); struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct scsi_cmnd *sc = task->sc; struct bnx2i_cmd *cmd = task->dd_data; struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 > hba->max_sqes) return -ENOMEM; /* * If there is no scsi_cmnd this must be a mgmt task */ if (!sc) return bnx2i_mtask_xmit(conn, task); bnx2i_setup_cmd_wqe_template(cmd); cmd->req.op_code = ISCSI_OP_SCSI_CMD; cmd->conn = bnx2i_conn; cmd->scsi_cmd = sc; cmd->req.total_data_transfer_length = scsi_bufflen(sc); cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn); bnx2i_iscsi_map_sg_list(cmd); bnx2i_cpy_scsi_cdb(sc, cmd); cmd->req.op_attr = ISCSI_ATTR_SIMPLE; if (sc->sc_data_direction == DMA_TO_DEVICE) { cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE; cmd->req.itt = task->itt | (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); bnx2i_setup_write_cmd_bd_info(task); } else { if (scsi_bufflen(sc)) cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ; cmd->req.itt = task->itt | (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); } cmd->req.num_bds = cmd->io_tbl.bd_valid; if (!cmd->io_tbl.bd_valid) { cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma; cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32); cmd->req.num_bds = 1; } bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd); return 0; } /** * bnx2i_session_create - create a new iscsi session * @ep: pointer to iscsi endpoint * @cmds_max: user specified maximum commands * @qdepth: scsi queue depth to support * @initial_cmdsn: initial iscsi CMDSN to be used for this session * * Creates a new iSCSI session instance on given device. */ static struct iscsi_cls_session * bnx2i_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, uint16_t qdepth, uint32_t initial_cmdsn) { struct Scsi_Host *shost; struct iscsi_cls_session *cls_session; struct bnx2i_hba *hba; struct bnx2i_endpoint *bnx2i_ep; if (!ep) { printk(KERN_ERR "bnx2i: missing ep.\n"); return NULL; } bnx2i_ep = ep->dd_data; shost = bnx2i_ep->hba->shost; hba = iscsi_host_priv(shost); if (bnx2i_adapter_ready(hba)) return NULL; /* * user can override hw limit as long as it is within * the min/max. */ if (cmds_max > hba->max_sqes) cmds_max = hba->max_sqes; else if (cmds_max < BNX2I_SQ_WQES_MIN) cmds_max = BNX2I_SQ_WQES_MIN; cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost, cmds_max, 0, sizeof(struct bnx2i_cmd), initial_cmdsn, ISCSI_MAX_TARGET); if (!cls_session) return NULL; if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data)) goto session_teardown; return cls_session; session_teardown: iscsi_session_teardown(cls_session); return NULL; } /** * bnx2i_session_destroy - destroys iscsi session * @cls_session: pointer to iscsi cls session * * Destroys previously created iSCSI session instance and releases * all resources held by it */ static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct bnx2i_hba *hba = iscsi_host_priv(shost); bnx2i_destroy_cmd_pool(hba, session); iscsi_session_teardown(cls_session); } /** * bnx2i_conn_create - create iscsi connection instance * @cls_session: pointer to iscsi cls session * @cid: iscsi cid as per rfc (not NX2's CID terminology) * * Creates a new iSCSI connection instance for a given session */ static struct iscsi_cls_conn * bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct bnx2i_hba *hba = iscsi_host_priv(shost); struct bnx2i_conn *bnx2i_conn; struct iscsi_cls_conn *cls_conn; struct iscsi_conn *conn; cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn), cid); if (!cls_conn) return NULL; conn = cls_conn->dd_data; bnx2i_conn = conn->dd_data; bnx2i_conn->cls_conn = cls_conn; bnx2i_conn->hba = hba; atomic_set(&bnx2i_conn->work_cnt, 0); /* 'ep' ptr will be assigned in bind() call */ bnx2i_conn->ep = NULL; init_completion(&bnx2i_conn->cmd_cleanup_cmpl); if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) { iscsi_conn_printk(KERN_ALERT, conn, "conn_new: login resc alloc failed!!\n"); goto free_conn; } return cls_conn; free_conn: iscsi_conn_teardown(cls_conn); return NULL; } /** * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together * @cls_session: pointer to iscsi cls session * @cls_conn: pointer to iscsi cls conn * @transport_fd: 64-bit EP handle * @is_leading: leading connection on this session? * * Binds together iSCSI session instance, iSCSI connection instance * and the TCP connection. This routine returns error code if * TCP connection does not belong on the device iSCSI sess/conn * is bound */ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_fd, int is_leading) { struct iscsi_conn *conn = cls_conn->dd_data; struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct bnx2i_hba *hba = iscsi_host_priv(shost); struct bnx2i_endpoint *bnx2i_ep; struct iscsi_endpoint *ep; int ret_code; ep = iscsi_lookup_endpoint(transport_fd); if (!ep) return -EINVAL; /* * Forcefully terminate all in progress connection recovery at the * earliest, either in bind(), send_pdu(LOGIN), or conn_start() */ if (bnx2i_adapter_ready(hba)) { ret_code = -EIO; goto put_ep; } bnx2i_ep = ep->dd_data; if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) { /* Peer disconnect via' FIN or RST */ ret_code = -EINVAL; goto put_ep; } if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { ret_code = -EINVAL; goto put_ep; } if (bnx2i_ep->hba != hba) { /* Error - TCP connection does not belong to this device */ iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, "conn bind, ep=0x%p (%s) does not", bnx2i_ep, bnx2i_ep->hba->netdev->name); iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, "belong to hba (%s)\n", hba->netdev->name); ret_code = -EEXIST; goto put_ep; } bnx2i_ep->conn = bnx2i_conn; bnx2i_conn->ep = bnx2i_ep; bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, bnx2i_ep->ep_iscsi_cid); /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710 * driver needs to explicitly replenish RQ index during setup. */ if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) bnx2i_put_rq_buf(bnx2i_conn, 0); bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); put_ep: iscsi_put_endpoint(ep); return ret_code; } /** * bnx2i_conn_destroy - destroy iscsi connection instance & release resources * @cls_conn: pointer to iscsi cls conn * * Destroy an iSCSI connection instance and release memory resources held by * this connection */ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct Scsi_Host *shost; struct bnx2i_hba *hba; struct bnx2i_work *work, *tmp; unsigned cpu = 0; struct bnx2i_percpu_s *p; shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); hba = iscsi_host_priv(shost); bnx2i_conn_free_login_resources(hba, bnx2i_conn); if (atomic_read(&bnx2i_conn->work_cnt)) { for_each_online_cpu(cpu) { p = &per_cpu(bnx2i_percpu, cpu); spin_lock_bh(&p->p_work_lock); list_for_each_entry_safe(work, tmp, &p->work_list, list) { if (work->session == conn->session && work->bnx2i_conn == bnx2i_conn) { list_del_init(&work->list); kfree(work); if (!atomic_dec_and_test( &bnx2i_conn->work_cnt)) break; } } spin_unlock_bh(&p->p_work_lock); } } iscsi_conn_teardown(cls_conn); } /** * bnx2i_ep_get_param - return iscsi ep parameter to caller * @ep: pointer to iscsi endpoint * @param: parameter type identifier * @buf: buffer pointer * * returns iSCSI ep parameters */ static int bnx2i_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf) { struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; struct bnx2i_hba *hba = bnx2i_ep->hba; int len = -ENOTCONN; if (!hba) return -ENOTCONN; switch (param) { case ISCSI_PARAM_CONN_PORT: mutex_lock(&hba->net_dev_lock); if (bnx2i_ep->cm_sk) len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port); mutex_unlock(&hba->net_dev_lock); break; case ISCSI_PARAM_CONN_ADDRESS: mutex_lock(&hba->net_dev_lock); if (bnx2i_ep->cm_sk) len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip); mutex_unlock(&hba->net_dev_lock); break; default: return -ENOSYS; } return len; } /** * bnx2i_host_get_param - returns host (adapter) related parameters * @shost: scsi host pointer * @param: parameter type identifier * @buf: buffer pointer */ static int bnx2i_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct bnx2i_hba *hba = iscsi_host_priv(shost); int len = 0; switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6); break; case ISCSI_HOST_PARAM_NETDEV_NAME: len = sprintf(buf, "%s\n", hba->netdev->name); break; case ISCSI_HOST_PARAM_IPADDRESS: { struct list_head *active_list = &hba->ep_active_list; read_lock_bh(&hba->ep_rdwr_lock); if (!list_empty(&hba->ep_active_list)) { struct bnx2i_endpoint *bnx2i_ep; struct cnic_sock *csk; bnx2i_ep = list_first_entry(active_list, struct bnx2i_endpoint, link); csk = bnx2i_ep->cm_sk; if (test_bit(SK_F_IPV6, &csk->flags)) len = sprintf(buf, "%pI6\n", csk->src_ip); else len = sprintf(buf, "%pI4\n", csk->src_ip); } read_unlock_bh(&hba->ep_rdwr_lock); break; } default: return iscsi_host_get_param(shost, param, buf); } return len; } /** * bnx2i_conn_start - completes iscsi connection migration to FFP * @cls_conn: pointer to iscsi cls conn * * last call in FFP migration to handover iscsi conn to the driver */ static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct bnx2i_conn *bnx2i_conn = conn->dd_data; bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START; bnx2i_update_iscsi_conn(conn); /* * this should normally not sleep for a long time so it should * not disrupt the caller. */ timer_setup(&bnx2i_conn->ep->ofld_timer, bnx2i_ep_ofld_timer, 0); bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; add_timer(&bnx2i_conn->ep->ofld_timer); /* update iSCSI context for this conn, wait for CNIC to complete */ wait_event_interruptible(bnx2i_conn->ep->ofld_wait, bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START); if (signal_pending(current)) flush_signals(current); del_timer_sync(&bnx2i_conn->ep->ofld_timer); iscsi_conn_start(cls_conn); return 0; } /** * bnx2i_conn_get_stats - returns iSCSI stats * @cls_conn: pointer to iscsi cls conn * @stats: pointer to iscsi statistic struct */ static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { struct iscsi_conn *conn = cls_conn->dd_data; stats->txdata_octets = conn->txdata_octets; stats->rxdata_octets = conn->rxdata_octets; stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; stats->dataout_pdus = conn->dataout_pdus_cnt; stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; stats->datain_pdus = conn->datain_pdus_cnt; stats->r2t_pdus = conn->r2t_pdus_cnt; stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; stats->digest_err = 0; stats->timeout_err = 0; strcpy(stats->custom[0].desc, "eh_abort_cnt"); stats->custom[0].value = conn->eh_abort_cnt; stats->custom_length = 1; } /** * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices * @dst_addr: target IP address * * check if route resolves to BNX2 device */ static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr) { struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; struct bnx2i_hba *hba; struct cnic_dev *cnic = NULL; hba = get_adapter_list_head(); if (hba && hba->cnic) cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); if (!cnic) { printk(KERN_ALERT "bnx2i: no route," "can't connect using cnic\n"); goto no_nx2_route; } hba = bnx2i_find_hba_for_cnic(cnic); if (!hba) goto no_nx2_route; if (bnx2i_adapter_ready(hba)) { printk(KERN_ALERT "bnx2i: check route, hba not found\n"); goto no_nx2_route; } if (hba->netdev->mtu > hba->mtu_supported) { printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n", hba->netdev->name, hba->netdev->mtu); printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n", hba->mtu_supported); goto no_nx2_route; } return hba; no_nx2_route: return NULL; } /** * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources * @hba: pointer to adapter instance * @ep: endpoint (transport identifier) structure * * destroys cm_sock structure and on chip iscsi context */ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk) hba->cnic->cm_destroy(ep->cm_sk); if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && ep->state == EP_STATE_DISCONN_TIMEDOUT) { if (ep->conn && ep->conn->cls_conn && ep->conn->cls_conn->dd_data) { struct iscsi_conn *conn = ep->conn->cls_conn->dd_data; /* Must suspend all rx queue activity for this ep */ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); } /* CONN_DISCONNECT timeout may or may not be an issue depending * on what transcribed in TCP layer, different targets behave * differently */ printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, " "please submit GRC Dump, NW/PCIe trace, " "driver msgs to developers for analysis\n", hba->netdev->name); } ep->state = EP_STATE_CLEANUP_START; timer_setup(&ep->ofld_timer, bnx2i_ep_ofld_timer, 0); ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies; add_timer(&ep->ofld_timer); bnx2i_ep_destroy_list_add(hba, ep); /* destroy iSCSI context, wait for it to complete */ if (bnx2i_send_conn_destroy(hba, ep)) ep->state = EP_STATE_CLEANUP_CMPL; wait_event_interruptible(ep->ofld_wait, (ep->state != EP_STATE_CLEANUP_START)); if (signal_pending(current)) flush_signals(current); del_timer_sync(&ep->ofld_timer); bnx2i_ep_destroy_list_del(hba, ep); if (ep->state != EP_STATE_CLEANUP_CMPL) /* should never happen */ printk(KERN_ALERT "bnx2i - conn destroy failed\n"); return 0; } /** * bnx2i_ep_connect - establish TCP connection to target portal * @shost: scsi host * @dst_addr: target IP address * @non_blocking: blocking or non-blocking call * * this routine initiates the TCP/IP connection by invoking Option-2 i/f * with l5_core and the CNIC. This is a multi-step process of resolving * route to target, create a iscsi connection context, handshaking with * CNIC module to create/initialize the socket struct and finally * sending down option-2 request to complete TCP 3-way handshake */ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) { u32 iscsi_cid = BNX2I_CID_RESERVED; struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; struct sockaddr_in6 *desti6; struct bnx2i_endpoint *bnx2i_ep; struct bnx2i_hba *hba; struct cnic_dev *cnic; struct cnic_sockaddr saddr; struct iscsi_endpoint *ep; int rc = 0; if (shost) { /* driver is given scsi host to work with */ hba = iscsi_host_priv(shost); } else /* * check if the given destination can be reached through * a iscsi capable NetXtreme2 device */ hba = bnx2i_check_route(dst_addr); if (!hba) { rc = -EINVAL; goto nohba; } mutex_lock(&hba->net_dev_lock); if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) { rc = -EPERM; goto check_busy; } cnic = hba->cnic; ep = bnx2i_alloc_ep(hba); if (!ep) { rc = -ENOMEM; goto check_busy; } bnx2i_ep = ep->dd_data; atomic_set(&bnx2i_ep->num_active_cmds, 0); iscsi_cid = bnx2i_alloc_iscsi_cid(hba); if (iscsi_cid == -1) { printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate " "iscsi cid\n", hba->netdev->name); rc = -ENOMEM; bnx2i_free_ep(ep); goto check_busy; } bnx2i_ep->hba_age = hba->age; rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); if (rc != 0) { printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error" "\n", hba->netdev->name); rc = -ENOMEM; goto qp_resc_err; } bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid; bnx2i_ep->state = EP_STATE_OFLD_START; bnx2i_ep_ofld_list_add(hba, bnx2i_ep); timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0); bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies; add_timer(&bnx2i_ep->ofld_timer); if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) { if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) { printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n", hba->netdev->name, bnx2i_ep->ep_iscsi_cid); rc = -EBUSY; } else rc = -ENOSPC; printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe" "\n", hba->netdev->name); bnx2i_ep_ofld_list_del(hba, bnx2i_ep); goto conn_failed; } /* Wait for CNIC hardware to setup conn context and return 'cid' */ wait_event_interruptible(bnx2i_ep->ofld_wait, bnx2i_ep->state != EP_STATE_OFLD_START); if (signal_pending(current)) flush_signals(current); del_timer_sync(&bnx2i_ep->ofld_timer); bnx2i_ep_ofld_list_del(hba, bnx2i_ep); if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) { printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n", hba->netdev->name, bnx2i_ep->ep_iscsi_cid); rc = -EBUSY; } else rc = -ENOSPC; goto conn_failed; } rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid, iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); if (rc) { rc = -EINVAL; /* Need to terminate and cleanup the connection */ goto release_ep; } bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; bnx2i_ep->cm_sk->snd_buf = 256 * 1024; clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags); memset(&saddr, 0, sizeof(saddr)); if (dst_addr->sa_family == AF_INET) { desti = (struct sockaddr_in *) dst_addr; saddr.remote.v4 = *desti; saddr.local.v4.sin_family = desti->sin_family; } else if (dst_addr->sa_family == AF_INET6) { desti6 = (struct sockaddr_in6 *) dst_addr; saddr.remote.v6 = *desti6; saddr.local.v6.sin6_family = desti6->sin6_family; } bnx2i_ep->timestamp = jiffies; bnx2i_ep->state = EP_STATE_CONNECT_START; if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { rc = -EINVAL; goto conn_failed; } else rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr); if (rc) goto release_ep; bnx2i_ep_active_list_add(hba, bnx2i_ep); rc = bnx2i_map_ep_dbell_regs(bnx2i_ep); if (rc) goto del_active_ep; mutex_unlock(&hba->net_dev_lock); return ep; del_active_ep: bnx2i_ep_active_list_del(hba, bnx2i_ep); release_ep: if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { mutex_unlock(&hba->net_dev_lock); return ERR_PTR(rc); } conn_failed: bnx2i_free_qp_resc(hba, bnx2i_ep); qp_resc_err: bnx2i_free_ep(ep); check_busy: mutex_unlock(&hba->net_dev_lock); nohba: return ERR_PTR(rc); } /** * bnx2i_ep_poll - polls for TCP connection establishement * @ep: TCP connection (endpoint) handle * @timeout_ms: timeout value in milli secs * * polls for TCP connect request to complete */ static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { struct bnx2i_endpoint *bnx2i_ep; int rc = 0; bnx2i_ep = ep->dd_data; if ((bnx2i_ep->state == EP_STATE_IDLE) || (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) return -1; if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL) return 1; rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait, ((bnx2i_ep->state == EP_STATE_OFLD_FAILED) || (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)), msecs_to_jiffies(timeout_ms)); if (bnx2i_ep->state == EP_STATE_OFLD_FAILED) rc = -1; if (rc > 0) return 1; else if (!rc) return 0; /* timeout */ else return rc; } /** * bnx2i_ep_tcp_conn_active - check EP state transition * @bnx2i_ep: endpoint pointer * * check if underlying TCP connection is active */ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) { int ret; int cnic_dev_10g = 0; if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) cnic_dev_10g = 1; switch (bnx2i_ep->state) { case EP_STATE_CLEANUP_FAILED: case EP_STATE_OFLD_FAILED: case EP_STATE_DISCONN_TIMEDOUT: ret = 0; break; case EP_STATE_CONNECT_START: case EP_STATE_CONNECT_FAILED: case EP_STATE_CONNECT_COMPL: case EP_STATE_ULP_UPDATE_START: case EP_STATE_ULP_UPDATE_COMPL: case EP_STATE_TCP_FIN_RCVD: case EP_STATE_LOGOUT_SENT: case EP_STATE_LOGOUT_RESP_RCVD: case EP_STATE_ULP_UPDATE_FAILED: ret = 1; break; case EP_STATE_TCP_RST_RCVD: if (cnic_dev_10g) ret = 0; else ret = 1; break; default: ret = 0; } return ret; } /** * bnx2i_hw_ep_disconnect - executes TCP connection teardown process in the hw * @bnx2i_ep: TCP connection (bnx2i endpoint) handle * * executes TCP connection teardown process */ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) { struct bnx2i_hba *hba = bnx2i_ep->hba; struct cnic_dev *cnic; struct iscsi_session *session = NULL; struct iscsi_conn *conn = NULL; int ret = 0; int close = 0; int close_ret = 0; if (!hba) return 0; cnic = hba->cnic; if (!cnic) return 0; if (bnx2i_ep->state == EP_STATE_IDLE || bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT) return 0; if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) goto destroy_conn; if (bnx2i_ep->conn) { conn = bnx2i_ep->conn->cls_conn->dd_data; session = conn->session; } timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0); bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies; add_timer(&bnx2i_ep->ofld_timer); if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) goto out; if (session) { spin_lock_bh(&session->frwd_lock); if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) { if (session->state == ISCSI_STATE_LOGGING_OUT) { if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) { /* Logout sent, but no resp */ printk(KERN_ALERT "bnx2i (%s): WARNING" " logout response was not " "received!\n", bnx2i_ep->hba->netdev->name); } else if (bnx2i_ep->state == EP_STATE_LOGOUT_RESP_RCVD) close = 1; } } else close = 1; spin_unlock_bh(&session->frwd_lock); } bnx2i_ep->state = EP_STATE_DISCONN_START; if (close) close_ret = cnic->cm_close(bnx2i_ep->cm_sk); else close_ret = cnic->cm_abort(bnx2i_ep->cm_sk); if (close_ret) printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n", bnx2i_ep->hba->netdev->name, close, close_ret); else /* wait for option-2 conn teardown */ wait_event_interruptible(bnx2i_ep->ofld_wait, ((bnx2i_ep->state != EP_STATE_DISCONN_START) && (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD))); if (signal_pending(current)) flush_signals(current); del_timer_sync(&bnx2i_ep->ofld_timer); destroy_conn: bnx2i_ep_active_list_del(hba, bnx2i_ep); if (bnx2i_tear_down_conn(hba, bnx2i_ep)) return -EINVAL; out: bnx2i_ep->state = EP_STATE_IDLE; return ret; } /** * bnx2i_ep_disconnect - executes TCP connection teardown process * @ep: TCP connection (iscsi endpoint) handle * * executes TCP connection teardown process */ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) { struct bnx2i_endpoint *bnx2i_ep; struct bnx2i_conn *bnx2i_conn = NULL; struct bnx2i_hba *hba; bnx2i_ep = ep->dd_data; /* driver should not attempt connection cleanup until TCP_CONNECT * completes either successfully or fails. Timeout is 9-secs, so * wait for it to complete */ while ((bnx2i_ep->state == EP_STATE_CONNECT_START) && !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ))) msleep(250); if (bnx2i_ep->conn) bnx2i_conn = bnx2i_ep->conn; hba = bnx2i_ep->hba; mutex_lock(&hba->net_dev_lock); if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT) goto out; if (bnx2i_ep->state == EP_STATE_IDLE) goto free_resc; if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || (bnx2i_ep->hba_age != hba->age)) { bnx2i_ep_active_list_del(hba, bnx2i_ep); goto free_resc; } /* Do all chip cleanup here */ if (bnx2i_hw_ep_disconnect(bnx2i_ep)) { mutex_unlock(&hba->net_dev_lock); return; } free_resc: bnx2i_free_qp_resc(hba, bnx2i_ep); if (bnx2i_conn) bnx2i_conn->ep = NULL; bnx2i_free_ep(ep); out: mutex_unlock(&hba->net_dev_lock); wake_up_interruptible(&hba->eh_wait); } /** * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler * @shost: scsi host pointer * @params: pointer to buffer containing iscsi path message */ static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params) { struct bnx2i_hba *hba = iscsi_host_priv(shost); char *buf = (char *) params; u16 len = sizeof(*params); /* handled by cnic driver */ hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf, len); return 0; } static umode_t bnx2i_attr_is_visible(int param_type, int param) { switch (param_type) { case ISCSI_HOST_PARAM: switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: return S_IRUGO; default: return 0; } case ISCSI_PARAM: switch (param) { case ISCSI_PARAM_MAX_RECV_DLENGTH: case ISCSI_PARAM_MAX_XMIT_DLENGTH: case ISCSI_PARAM_HDRDGST_EN: case ISCSI_PARAM_DATADGST_EN: case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_EXP_STATSN: case ISCSI_PARAM_PERSISTENT_ADDRESS: case ISCSI_PARAM_PERSISTENT_PORT: case ISCSI_PARAM_PING_TMO: case ISCSI_PARAM_RECV_TMO: case ISCSI_PARAM_INITIAL_R2T_EN: case ISCSI_PARAM_MAX_R2T: case ISCSI_PARAM_IMM_DATA_EN: case ISCSI_PARAM_FIRST_BURST: case ISCSI_PARAM_MAX_BURST: case ISCSI_PARAM_PDU_INORDER_EN: case ISCSI_PARAM_DATASEQ_INORDER_EN: case ISCSI_PARAM_ERL: case ISCSI_PARAM_TARGET_NAME: case ISCSI_PARAM_TPGT: case ISCSI_PARAM_USERNAME: case ISCSI_PARAM_PASSWORD: case ISCSI_PARAM_USERNAME_IN: case ISCSI_PARAM_PASSWORD_IN: case ISCSI_PARAM_FAST_ABORT: case ISCSI_PARAM_ABORT_TMO: case ISCSI_PARAM_LU_RESET_TMO: case ISCSI_PARAM_TGT_RESET_TMO: case ISCSI_PARAM_IFACE_NAME: case ISCSI_PARAM_INITIATOR_NAME: case ISCSI_PARAM_BOOT_ROOT: case ISCSI_PARAM_BOOT_NIC: case ISCSI_PARAM_BOOT_TARGET: return S_IRUGO; default: return 0; } } return 0; } /* * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template * used while registering with the scsi host and iSCSI transport module. */ static const struct scsi_host_template bnx2i_host_template = { .module = THIS_MODULE, .name = "QLogic Offload iSCSI Initiator", .proc_name = "bnx2i", .queuecommand = iscsi_queuecommand, .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .change_queue_depth = scsi_change_queue_depth, .target_alloc = iscsi_target_alloc, .can_queue = 2048, .max_sectors = 127, .cmd_per_lun = 128, .this_id = -1, .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, .shost_groups = bnx2i_dev_groups, .track_queue_depth = 1, .cmd_size = sizeof(struct iscsi_cmd), }; struct iscsi_transport bnx2i_iscsi_transport = { .owner = THIS_MODULE, .name = "bnx2i", .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO, .create_session = bnx2i_session_create, .destroy_session = bnx2i_session_destroy, .create_conn = bnx2i_conn_create, .bind_conn = bnx2i_conn_bind, .unbind_conn = iscsi_conn_unbind, .destroy_conn = bnx2i_conn_destroy, .attr_is_visible = bnx2i_attr_is_visible, .set_param = iscsi_set_param, .get_conn_param = iscsi_conn_get_param, .get_session_param = iscsi_session_get_param, .get_host_param = bnx2i_host_get_param, .start_conn = bnx2i_conn_start, .stop_conn = iscsi_conn_stop, .send_pdu = iscsi_conn_send_pdu, .xmit_task = bnx2i_task_xmit, .get_stats = bnx2i_conn_get_stats, /* TCP connect - disconnect - option-2 interface calls */ .get_ep_param = bnx2i_ep_get_param, .ep_connect = bnx2i_ep_connect, .ep_poll = bnx2i_ep_poll, .ep_disconnect = bnx2i_ep_disconnect, .set_path = bnx2i_nl_set_path, /* Error recovery timeout call */ .session_recovery_timedout = iscsi_session_recovery_timedout, .cleanup_task = bnx2i_cleanup_task, };
linux-master
drivers/scsi/bnx2i/bnx2i_iscsi.c
/* bnx2i.c: QLogic NetXtreme II iSCSI driver. * * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * Copyright (c) 2014, QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Anil Veerabhadrappa ([email protected]) * Previously Maintained by: Eddie Wai ([email protected]) * Maintained by: [email protected] */ #include "bnx2i.h" static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); static u32 adapter_count; #define DRV_MODULE_NAME "bnx2i" #define DRV_MODULE_VERSION "2.7.10.1" #define DRV_MODULE_RELDATE "Jul 16, 2014" static char version[] = "QLogic NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Anil Veerabhadrappa <[email protected]> and " "Eddie Wai <[email protected]>"); MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/57710/57711/57712" "/57800/57810/57840 iSCSI Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); static DEFINE_MUTEX(bnx2i_dev_lock); unsigned int event_coal_min = 24; module_param(event_coal_min, int, 0664); MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands"); unsigned int event_coal_div = 2; module_param(event_coal_div, int, 0664); MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); unsigned int en_tcp_dack = 1; module_param(en_tcp_dack, int, 0664); MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK"); unsigned int error_mask1 = 0x00; module_param(error_mask1, uint, 0664); MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1"); unsigned int error_mask2 = 0x00; module_param(error_mask2, uint, 0664); MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2"); unsigned int sq_size; module_param(sq_size, int, 0664); MODULE_PARM_DESC(sq_size, "Configure SQ size"); unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT; module_param(rq_size, int, 0664); MODULE_PARM_DESC(rq_size, "Configure RQ size"); u64 iscsi_error_mask = 0x00; DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); /** * bnx2i_identify_device - identifies NetXtreme II device type * @hba: Adapter structure pointer * @dev: Corresponding cnic device * * This function identifies the NX2 device type and sets appropriate * queue mailbox register access method, 5709 requires driver to * access MBOX regs using *bin* mode */ void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev) { hba->cnic_dev_type = 0; if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { if (hba->pci_did == PCI_DEVICE_ID_NX2_5706 || hba->pci_did == PCI_DEVICE_ID_NX2_5706S) { set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type); } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5708 || hba->pci_did == PCI_DEVICE_ID_NX2_5708S) { set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type); } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5709 || hba->pci_did == PCI_DEVICE_ID_NX2_5709S) { set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); hba->mail_queue_access = BNX2I_MQ_BIN_MODE; } } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); } else { printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n", hba->pci_did); } } /** * get_adapter_list_head - returns head of adapter list */ struct bnx2i_hba *get_adapter_list_head(void) { struct bnx2i_hba *hba = NULL; struct bnx2i_hba *tmp_hba; if (!adapter_count) goto hba_not_found; mutex_lock(&bnx2i_dev_lock); list_for_each_entry(tmp_hba, &adapter_list, link) { if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) { hba = tmp_hba; break; } } mutex_unlock(&bnx2i_dev_lock); hba_not_found: return hba; } /** * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance * @cnic: pointer to cnic device instance * */ struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic) { struct bnx2i_hba *hba, *temp; mutex_lock(&bnx2i_dev_lock); list_for_each_entry_safe(hba, temp, &adapter_list, link) { if (hba->cnic == cnic) { mutex_unlock(&bnx2i_dev_lock); return hba; } } mutex_unlock(&bnx2i_dev_lock); return NULL; } /** * bnx2i_start - cnic callback to initialize & start adapter instance * @handle: transparent handle pointing to adapter structure * * This function maps adapter structure to pcidev structure and initiates * firmware handshake to enable/initialize on chip iscsi components * This bnx2i - cnic interface api callback is issued after following * 2 conditions are met - * a) underlying network interface is up (marked by event 'NETDEV_UP' * from netdev * b) bnx2i adapter instance is registered */ void bnx2i_start(void *handle) { #define BNX2I_INIT_POLL_TIME (1000 / HZ) struct bnx2i_hba *hba = handle; int i = HZ; /* On some bnx2x devices, it is possible that iSCSI is no * longer supported after firmware is downloaded. In that * case, the iscsi_init_msg will return failure. */ bnx2i_send_fw_iscsi_init_msg(hba); while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && !test_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state) && i--) msleep(BNX2I_INIT_POLL_TIME); } /** * bnx2i_chip_cleanup - local routine to handle chip cleanup * @hba: Adapter instance to register * * Driver checks if adapter still has any active connections before * executing the cleanup process */ static void bnx2i_chip_cleanup(struct bnx2i_hba *hba) { struct bnx2i_endpoint *bnx2i_ep; struct list_head *pos, *tmp; if (hba->ofld_conns_active) { /* Stage to force the disconnection * This is the case where the daemon is either slow or * not present */ printk(KERN_ALERT "bnx2i: (%s) chip cleanup for %d active " "connections\n", hba->netdev->name, hba->ofld_conns_active); mutex_lock(&hba->net_dev_lock); list_for_each_safe(pos, tmp, &hba->ep_active_list) { bnx2i_ep = list_entry(pos, struct bnx2i_endpoint, link); /* Clean up the chip only */ bnx2i_hw_ep_disconnect(bnx2i_ep); bnx2i_ep->cm_sk = NULL; } mutex_unlock(&hba->net_dev_lock); } } /** * bnx2i_stop - cnic callback to shutdown adapter instance * @handle: transparent handle pointing to adapter structure * * driver checks if adapter is already in shutdown mode, if not start * the shutdown process */ void bnx2i_stop(void *handle) { struct bnx2i_hba *hba = handle; int conns_active; int wait_delay = 1 * HZ; /* check if cleanup happened in GOING_DOWN context */ if (!test_and_set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) { iscsi_host_for_each_session(hba->shost, bnx2i_drop_session); wait_delay = hba->hba_shutdown_tmo; } /* Wait for inflight offload connection tasks to complete before * proceeding. Forcefully terminate all connection recovery in * progress at the earliest, either in bind(), send_pdu(LOGIN), * or conn_start() */ wait_event_interruptible_timeout(hba->eh_wait, (list_empty(&hba->ep_ofld_list) && list_empty(&hba->ep_destroy_list)), 2 * HZ); /* Wait for all endpoints to be torn down, Chip will be reset once * control returns to network driver. So it is required to cleanup and * release all connection resources before returning from this routine. */ while (hba->ofld_conns_active) { conns_active = hba->ofld_conns_active; wait_event_interruptible_timeout(hba->eh_wait, (hba->ofld_conns_active != conns_active), wait_delay); if (hba->ofld_conns_active == conns_active) break; } bnx2i_chip_cleanup(hba); /* This flag should be cleared last so that ep_disconnect() gracefully * cleans up connection context */ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); } /** * bnx2i_init_one - initialize an adapter instance and allocate memory resources * @hba: bnx2i adapter instance * @cnic: cnic device handle * * Global resource lock is held during critical sections below. This routine is * called from either cnic_register_driver() or device hot plug context and * and does majority of device specific initialization */ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) { int rc; mutex_lock(&bnx2i_dev_lock); if (!cnic->max_iscsi_conn) { printk(KERN_ALERT "bnx2i: dev %s does not support " "iSCSI\n", hba->netdev->name); rc = -EOPNOTSUPP; goto out; } hba->cnic = cnic; rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); if (!rc) { hba->age++; set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); list_add_tail(&hba->link, &adapter_list); adapter_count++; } else if (rc == -EBUSY) /* duplicate registration */ printk(KERN_ALERT "bnx2i, duplicate registration" "hba=%p, cnic=%p\n", hba, cnic); else if (rc == -EAGAIN) printk(KERN_ERR "bnx2i, driver not registered\n"); else if (rc == -EINVAL) printk(KERN_ERR "bnx2i, invalid type %d\n", CNIC_ULP_ISCSI); else printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc); out: mutex_unlock(&bnx2i_dev_lock); return rc; } /** * bnx2i_ulp_init - initialize an adapter instance * @dev: cnic device handle * * Called from cnic_register_driver() context to initialize all enumerated * cnic devices. This routine allocate adapter structure and other * device specific resources. */ void bnx2i_ulp_init(struct cnic_dev *dev) { struct bnx2i_hba *hba; /* Allocate a HBA structure for this device */ hba = bnx2i_alloc_hba(dev); if (!hba) { printk(KERN_ERR "bnx2i init: hba initialization failed\n"); return; } /* Get PCI related information and update hba struct members */ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); if (bnx2i_init_one(hba, dev)) { printk(KERN_ERR "bnx2i - hba %p init failed\n", hba); bnx2i_free_hba(hba); } } /** * bnx2i_ulp_exit - shuts down adapter instance and frees all resources * @dev: cnic device handle * */ void bnx2i_ulp_exit(struct cnic_dev *dev) { struct bnx2i_hba *hba; hba = bnx2i_find_hba_for_cnic(dev); if (!hba) { printk(KERN_INFO "bnx2i_ulp_exit: hba not " "found, dev 0x%p\n", dev); return; } mutex_lock(&bnx2i_dev_lock); list_del_init(&hba->link); adapter_count--; if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); } mutex_unlock(&bnx2i_dev_lock); bnx2i_free_hba(hba); } /** * bnx2i_get_stats - Retrieve various statistic from iSCSI offload * @handle: bnx2i_hba * * function callback exported via bnx2i - cnic driver interface to * retrieve various iSCSI offload related statistics. */ int bnx2i_get_stats(void *handle) { struct bnx2i_hba *hba = handle; struct iscsi_stats_info *stats; if (!hba) return -EINVAL; stats = (struct iscsi_stats_info *)hba->cnic->stats_addr; if (!stats) return -ENOMEM; strscpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version)); memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN); stats->max_frame_size = hba->netdev->mtu; stats->txq_size = hba->max_sqes; stats->rxq_size = hba->max_cqes; stats->txq_avg_depth = 0; stats->rxq_avg_depth = 0; GET_STATS_64(hba, stats, rx_pdus); GET_STATS_64(hba, stats, rx_bytes); GET_STATS_64(hba, stats, tx_pdus); GET_STATS_64(hba, stats, tx_bytes); return 0; } /** * bnx2i_cpu_online - Create a receive thread for an online CPU * * @cpu: cpu index for the online cpu */ static int bnx2i_cpu_online(unsigned int cpu) { struct bnx2i_percpu_s *p; struct task_struct *thread; p = &per_cpu(bnx2i_percpu, cpu); thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, cpu_to_node(cpu), "bnx2i_thread/%d", cpu); if (IS_ERR(thread)) return PTR_ERR(thread); /* bind thread to the cpu */ kthread_bind(thread, cpu); p->iothread = thread; wake_up_process(thread); return 0; } static int bnx2i_cpu_offline(unsigned int cpu) { struct bnx2i_percpu_s *p; struct task_struct *thread; struct bnx2i_work *work, *tmp; /* Prevent any new work from being queued for this CPU */ p = &per_cpu(bnx2i_percpu, cpu); spin_lock_bh(&p->p_work_lock); thread = p->iothread; p->iothread = NULL; /* Free all work in the list */ list_for_each_entry_safe(work, tmp, &p->work_list, list) { list_del_init(&work->list); bnx2i_process_scsi_cmd_resp(work->session, work->bnx2i_conn, &work->cqe); kfree(work); } spin_unlock_bh(&p->p_work_lock); if (thread) kthread_stop(thread); return 0; } static enum cpuhp_state bnx2i_online_state; /** * bnx2i_mod_init - module init entry point * * initialize any driver wide global data structures such as endpoint pool, * tcp port manager/queue, sysfs. finally driver will register itself * with the cnic module */ static int __init bnx2i_mod_init(void) { int err; unsigned cpu = 0; struct bnx2i_percpu_s *p; printk(KERN_INFO "%s", version); if (sq_size && !is_power_of_2(sq_size)) sq_size = roundup_pow_of_two(sq_size); bnx2i_scsi_xport_template = iscsi_register_transport(&bnx2i_iscsi_transport); if (!bnx2i_scsi_xport_template) { printk(KERN_ERR "Could not register bnx2i transport.\n"); err = -ENOMEM; goto out; } err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb); if (err) { printk(KERN_ERR "Could not register bnx2i cnic driver.\n"); goto unreg_xport; } /* Create percpu kernel threads to handle iSCSI I/O completions */ for_each_possible_cpu(cpu) { p = &per_cpu(bnx2i_percpu, cpu); INIT_LIST_HEAD(&p->work_list); spin_lock_init(&p->p_work_lock); p->iothread = NULL; } err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online", bnx2i_cpu_online, bnx2i_cpu_offline); if (err < 0) goto unreg_driver; bnx2i_online_state = err; return 0; unreg_driver: cnic_unregister_driver(CNIC_ULP_ISCSI); unreg_xport: iscsi_unregister_transport(&bnx2i_iscsi_transport); out: return err; } /** * bnx2i_mod_exit - module cleanup/exit entry point * * Global resource lock and host adapter lock is held during critical sections * in this function. Driver will browse through the adapter list, cleans-up * each instance, unregisters iscsi transport name and finally driver will * unregister itself with the cnic module */ static void __exit bnx2i_mod_exit(void) { struct bnx2i_hba *hba; mutex_lock(&bnx2i_dev_lock); while (!list_empty(&adapter_list)) { hba = list_entry(adapter_list.next, struct bnx2i_hba, link); list_del(&hba->link); adapter_count--; if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { bnx2i_chip_cleanup(hba); hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); } bnx2i_free_hba(hba); } mutex_unlock(&bnx2i_dev_lock); cpuhp_remove_state(bnx2i_online_state); iscsi_unregister_transport(&bnx2i_iscsi_transport); cnic_unregister_driver(CNIC_ULP_ISCSI); } module_init(bnx2i_mod_init); module_exit(bnx2i_mod_exit);
linux-master
drivers/scsi/bnx2i/bnx2i_init.c
/* bnx2fc_debug.c: QLogic Linux FCoE offload driver. * Handles operations such as session offload/upload etc, and manages * session resources such as connection id and qp resources. * * Copyright (c) 2008-2013 Broadcom Corporation * Copyright (c) 2014-2016 QLogic Corporation * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * */ #include "bnx2fc.h" void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...) { struct va_format vaf; va_list args; if (likely(!(bnx2fc_debug_level & LOG_IO))) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (io_req && io_req->port && io_req->port->lport && io_req->port->lport->host) shost_printk(KERN_INFO, io_req->port->lport->host, PFX "xid:0x%x %pV", io_req->xid, &vaf); else pr_info("NULL %pV", &vaf); va_end(args); } void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...) { struct va_format vaf; va_list args; if (likely(!(bnx2fc_debug_level & LOG_TGT))) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (tgt && tgt->port && tgt->port->lport && tgt->port->lport->host && tgt->rport) shost_printk(KERN_INFO, tgt->port->lport->host, PFX "port:%x %pV", tgt->rport->port_id, &vaf); else pr_info("NULL %pV", &vaf); va_end(args); } void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...) { struct va_format vaf; va_list args; if (likely(!(bnx2fc_debug_level & LOG_HBA))) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (lport && lport->host) shost_printk(KERN_INFO, lport->host, PFX "%pV", &vaf); else pr_info("NULL %pV", &vaf); va_end(args); }
linux-master
drivers/scsi/bnx2fc/bnx2fc_debug.c
/* bnx2fc_hwi.c: QLogic Linux FCoE offload driver. * This file contains the code that low level functions that interact * with 57712 FCoE firmware. * * Copyright (c) 2008-2013 Broadcom Corporation * Copyright (c) 2014-2016 QLogic Corporation * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Bhanu Prakash Gollapudi ([email protected]) */ #include "bnx2fc.h" DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, struct fcoe_kcqe *new_cqe_kcqe); static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe); static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe); static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *destroy_kcqe); int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) { struct fcoe_kwqe_stat stat_req; struct kwqe *kwqe_arr[2]; int num_kwqes = 1; int rc = 0; memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat)); stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT; stat_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); kwqe_arr[0] = (struct kwqe *) &stat_req; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w * * @hba: adapter structure pointer * * Send down FCoE firmware init KWQEs which initiates the initial handshake * with the f/w. * */ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) { struct fcoe_kwqe_init1 fcoe_init1; struct fcoe_kwqe_init2 fcoe_init2; struct fcoe_kwqe_init3 fcoe_init3; struct kwqe *kwqe_arr[3]; int num_kwqes = 3; int rc = 0; if (!hba->cnic) { printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n"); return -ENODEV; } /* fill init1 KWQE */ memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1)); fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1; fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); fcoe_init1.num_tasks = hba->max_tasks; fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX; fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; fcoe_init1.task_list_pbl_addr_hi = (u32) ((u64) hba->task_ctx_bd_dma >> 32); fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; fcoe_init1.flags = (PAGE_SHIFT << FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG; /* fill init2 KWQE */ memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2)); fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2; fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; fcoe_init2.hash_tbl_pbl_addr_hi = (u32) ((u64) hba->hash_tbl_pbl_dma >> 32); fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; fcoe_init2.t2_hash_tbl_addr_hi = (u32) ((u64) hba->t2_hash_tbl_dma >> 32); fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32) ((u64) hba->t2_hash_tbl_ptr_dma >> 32); fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS; /* fill init3 KWQE */ memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3)); fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3; fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); fcoe_init3.error_bit_map_lo = 0xffffffff; fcoe_init3.error_bit_map_hi = 0xffffffff; /* * enable both cached connection and cached tasks * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both */ fcoe_init3.perf_config = 3; kwqe_arr[0] = (struct kwqe *) &fcoe_init1; kwqe_arr[1] = (struct kwqe *) &fcoe_init2; kwqe_arr[2] = (struct kwqe *) &fcoe_init3; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) { struct fcoe_kwqe_destroy fcoe_destroy; struct kwqe *kwqe_arr[2]; int num_kwqes = 1; int rc = -1; /* fill destroy KWQE */ memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy)); fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY; fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); kwqe_arr[0] = (struct kwqe *) &fcoe_destroy; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process * * @port: port structure pointer * @tgt: bnx2fc_rport structure pointer */ int bnx2fc_send_session_ofld_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct fc_lport *lport = port->lport; struct bnx2fc_interface *interface = port->priv; struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct bnx2fc_hba *hba = interface->hba; struct kwqe *kwqe_arr[4]; struct fcoe_kwqe_conn_offload1 ofld_req1; struct fcoe_kwqe_conn_offload2 ofld_req2; struct fcoe_kwqe_conn_offload3 ofld_req3; struct fcoe_kwqe_conn_offload4 ofld_req4; struct fc_rport_priv *rdata = tgt->rdata; struct fc_rport *rport = tgt->rport; int num_kwqes = 4; u32 port_id; int rc = 0; u16 conn_id; /* Initialize offload request 1 structure */ memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1)); ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1; ofld_req1.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); conn_id = (u16)tgt->fcoe_conn_id; ofld_req1.fcoe_conn_id = conn_id; ofld_req1.sq_addr_lo = (u32) tgt->sq_dma; ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32); ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma; ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32); ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma; ofld_req1.rq_first_pbe_addr_hi = (u32)((u64) tgt->rq_dma >> 32); ofld_req1.rq_prod = 0x8000; /* Initialize offload request 2 structure */ memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2)); ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2; ofld_req2.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size; ofld_req2.cq_addr_lo = (u32) tgt->cq_dma; ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32); ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma; ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32); ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma; ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32); /* Initialize offload request 3 structure */ memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3)); ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3; ofld_req3.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); ofld_req3.vlan_tag = interface->vlan_id << FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; port_id = fc_host_port_id(lport->host); if (port_id == 0) { BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n"); return -EINVAL; } /* * Store s_id of the initiator for further reference. This will * be used during disable/destroy during linkdown processing as * when the lport is reset, the port_id also is reset to 0 */ tgt->sid = port_id; ofld_req3.s_id[0] = (port_id & 0x000000FF); ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8; ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16; port_id = rport->port_id; ofld_req3.d_id[0] = (port_id & 0x000000FF); ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8; ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16; ofld_req3.tx_total_conc_seqs = rdata->max_seq; ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq; ofld_req3.rx_max_fc_pay_len = lport->mfs; ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS; ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS; ofld_req3.rx_open_seqs_exch_c3 = 1; ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma; ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32); /* set mul_n_port_ids supported flag to 0, until it is supported */ ofld_req3.flags = 0; /* ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) << FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT); */ /* Info from PLOGI response */ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) << FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT); ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); /* * Info from PRLI response, this info is used for sequence level error * recovery support */ if (tgt->dev_type == TYPE_TAPE) { ofld_req3.flags |= 1 << FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT; ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED) ? 1 : 0) << FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT); } /* vlan flag */ ofld_req3.flags |= (interface->vlan_enabled << FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); /* C2_VALID and ACK flags are not set as they are not supported */ /* Initialize offload request 4 structure */ memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4)); ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4; ofld_req4.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5]; /* local mac */ ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4]; ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3]; ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; /* fcf mac */ ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma; ofld_req4.confq_pbl_base_addr_hi = (u32)((u64) tgt->confq_pbl_dma >> 32); kwqe_arr[0] = (struct kwqe *) &ofld_req1; kwqe_arr[1] = (struct kwqe *) &ofld_req2; kwqe_arr[2] = (struct kwqe *) &ofld_req3; kwqe_arr[3] = (struct kwqe *) &ofld_req4; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_session_enable_req - initiates FCoE Session enablement * * @port: port structure pointer * @tgt: bnx2fc_rport structure pointer */ int bnx2fc_send_session_enable_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct kwqe *kwqe_arr[2]; struct bnx2fc_interface *interface = port->priv; struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct bnx2fc_hba *hba = interface->hba; struct fcoe_kwqe_conn_enable_disable enbl_req; struct fc_lport *lport = port->lport; struct fc_rport *rport = tgt->rport; int num_kwqes = 1; int rc = 0; u32 port_id; memset(&enbl_req, 0x00, sizeof(struct fcoe_kwqe_conn_enable_disable)); enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN; enbl_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5]; /* local mac */ enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4]; enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3]; enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2]; enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1]; enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; port_id = fc_host_port_id(lport->host); if (port_id != tgt->sid) { printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x," "sid = 0x%x\n", port_id, tgt->sid); port_id = tgt->sid; } enbl_req.s_id[0] = (port_id & 0x000000FF); enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8; enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16; port_id = rport->port_id; enbl_req.d_id[0] = (port_id & 0x000000FF); enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; enbl_req.vlan_tag = interface->vlan_id << FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; enbl_req.vlan_flag = interface->vlan_enabled; enbl_req.context_id = tgt->context_id; enbl_req.conn_id = tgt->fcoe_conn_id; kwqe_arr[0] = (struct kwqe *) &enbl_req; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_session_disable_req - initiates FCoE Session disable * * @port: port structure pointer * @tgt: bnx2fc_rport structure pointer */ int bnx2fc_send_session_disable_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct bnx2fc_interface *interface = port->priv; struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct bnx2fc_hba *hba = interface->hba; struct fcoe_kwqe_conn_enable_disable disable_req; struct kwqe *kwqe_arr[2]; struct fc_rport *rport = tgt->rport; int num_kwqes = 1; int rc = 0; u32 port_id; memset(&disable_req, 0x00, sizeof(struct fcoe_kwqe_conn_enable_disable)); disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN; disable_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); disable_req.src_mac_addr_lo[0] = tgt->src_addr[5]; disable_req.src_mac_addr_lo[1] = tgt->src_addr[4]; disable_req.src_mac_addr_mid[0] = tgt->src_addr[3]; disable_req.src_mac_addr_mid[1] = tgt->src_addr[2]; disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; port_id = tgt->sid; disable_req.s_id[0] = (port_id & 0x000000FF); disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8; disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16; port_id = rport->port_id; disable_req.d_id[0] = (port_id & 0x000000FF); disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8; disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; disable_req.context_id = tgt->context_id; disable_req.conn_id = tgt->fcoe_conn_id; disable_req.vlan_tag = interface->vlan_id << FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; disable_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; disable_req.vlan_flag = interface->vlan_enabled; kwqe_arr[0] = (struct kwqe *) &disable_req; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy * * @hba: adapter structure pointer * @tgt: bnx2fc_rport structure pointer */ int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, struct bnx2fc_rport *tgt) { struct fcoe_kwqe_conn_destroy destroy_req; struct kwqe *kwqe_arr[2]; int num_kwqes = 1; int rc = 0; memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy)); destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN; destroy_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); destroy_req.context_id = tgt->context_id; destroy_req.conn_id = tgt->fcoe_conn_id; kwqe_arr[0] = (struct kwqe *) &destroy_req; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport) { struct bnx2fc_lport *blport; spin_lock_bh(&hba->hba_lock); list_for_each_entry(blport, &hba->vports, list) { if (blport->lport == lport) { spin_unlock_bh(&hba->hba_lock); return true; } } spin_unlock_bh(&hba->hba_lock); return false; } static void bnx2fc_unsol_els_work(struct work_struct *work) { struct bnx2fc_unsol_els *unsol_els; struct fc_lport *lport; struct bnx2fc_hba *hba; struct fc_frame *fp; unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); lport = unsol_els->lport; fp = unsol_els->fp; hba = unsol_els->hba; if (is_valid_lport(hba, lport)) fc_exch_recv(lport, fp); kfree(unsol_els); } void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, unsigned char *buf, u32 frame_len, u16 l2_oxid) { struct fcoe_port *port = tgt->port; struct fc_lport *lport = port->lport; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_unsol_els *unsol_els; struct fc_frame_header *fh; struct fc_frame *fp; struct sk_buff *skb; u32 payload_len; u32 crc; u8 op; unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC); if (!unsol_els) { BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n"); return; } BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n", l2_oxid, frame_len); payload_len = frame_len - sizeof(struct fc_frame_header); fp = fc_frame_alloc(lport, payload_len); if (!fp) { printk(KERN_ERR PFX "fc_frame_alloc failure\n"); kfree(unsol_els); return; } fh = (struct fc_frame_header *) fc_frame_header_get(fp); /* Copy FC Frame header and payload into the frame */ memcpy(fh, buf, frame_len); if (l2_oxid != FC_XID_UNKNOWN) fh->fh_ox_id = htons(l2_oxid); skb = fp_skb(fp); if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) || (fh->fh_r_ctl == FC_RCTL_ELS_REP)) { if (fh->fh_type == FC_TYPE_ELS) { op = fc_frame_payload_op(fp); if ((op == ELS_TEST) || (op == ELS_ESTC) || (op == ELS_FAN) || (op == ELS_CSU)) { /* * No need to reply for these * ELS requests */ printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); kfree_skb(skb); kfree(unsol_els); return; } } crc = fcoe_fc_crc(fp); fc_frame_init(fp); fr_dev(fp) = lport; fr_sof(fp) = FC_SOF_I3; fr_eof(fp) = FC_EOF_T; fr_crc(fp) = cpu_to_le32(~crc); unsol_els->lport = lport; unsol_els->hba = interface->hba; unsol_els->fp = fp; INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); } else { BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); kfree_skb(skb); kfree(unsol_els); } } static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) { u8 num_rq; struct fcoe_err_report_entry *err_entry; unsigned char *rq_data; unsigned char *buf = NULL, *buf1; int i; u16 xid; u32 frame_len, len; struct bnx2fc_cmd *io_req = NULL; struct bnx2fc_interface *interface = tgt->port->priv; struct bnx2fc_hba *hba = interface->hba; int rc = 0; u64 err_warn_bit_map; u8 err_warn = 0xff; BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { case FCOE_UNSOLICITED_FRAME_CQE_TYPE: frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT; num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; spin_lock_bh(&tgt->tgt_lock); rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); spin_unlock_bh(&tgt->tgt_lock); if (rq_data) { buf = rq_data; } else { buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ), GFP_ATOMIC); if (!buf1) { BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n"); break; } for (i = 0; i < num_rq; i++) { spin_lock_bh(&tgt->tgt_lock); rq_data = (unsigned char *) bnx2fc_get_next_rqe(tgt, 1); spin_unlock_bh(&tgt->tgt_lock); len = BNX2FC_RQ_BUF_SZ; memcpy(buf1, rq_data, len); buf1 += len; } } bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, FC_XID_UNKNOWN); if (buf != rq_data) kfree(buf); spin_lock_bh(&tgt->tgt_lock); bnx2fc_return_rqe(tgt, num_rq); spin_unlock_bh(&tgt->tgt_lock); break; case FCOE_ERROR_DETECTION_CQE_TYPE: /* * In case of error reporting CQE a single RQ entry * is consumed. */ spin_lock_bh(&tgt->tgt_lock); num_rq = 1; err_entry = (struct fcoe_err_report_entry *) bnx2fc_get_next_rqe(tgt, 1); xid = err_entry->fc_hdr.ox_id; BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", err_entry->data.err_warn_bitmap_hi, err_entry->data.err_warn_bitmap_lo); BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); if (xid > hba->max_xid) { BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); goto ret_err_rqe; } io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; if (!io_req) goto ret_err_rqe; if (io_req->cmd_type != BNX2FC_SCSI_CMD) { printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); goto ret_err_rqe; } if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags)) { BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " "progress.. ignore unsol err\n"); goto ret_err_rqe; } err_warn_bit_map = (u64) ((u64)err_entry->data.err_warn_bitmap_hi << 32) | (u64)err_entry->data.err_warn_bitmap_lo; for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { if (err_warn_bit_map & (u64)((u64)1 << i)) { err_warn = i; break; } } /* * If ABTS is already in progress, and FW error is * received after that, do not cancel the timeout_work * and let the error recovery continue by explicitly * logging out the target, when the ABTS eventually * times out. */ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " "in ABTS processing\n", xid); goto ret_err_rqe; } BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn); if (tgt->dev_type != TYPE_TAPE) goto skip_rec; switch (err_warn) { case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION: case FCOE_ERROR_CODE_DATA_OOO_RO: case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT: case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET: case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ: case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET: BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n", xid); memcpy(&io_req->err_entry, err_entry, sizeof(struct fcoe_err_report_entry)); if (!test_bit(BNX2FC_FLAG_SRR_SENT, &io_req->req_flags)) { spin_unlock_bh(&tgt->tgt_lock); rc = bnx2fc_send_rec(io_req); spin_lock_bh(&tgt->tgt_lock); if (rc) goto skip_rec; } else printk(KERN_ERR PFX "SRR in progress\n"); goto ret_err_rqe; default: break; } skip_rec: set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags); /* * Cancel the timeout_work, as we received IO * completion with FW error. */ if (cancel_delayed_work(&io_req->timeout_work)) kref_put(&io_req->refcount, bnx2fc_cmd_release); rc = bnx2fc_initiate_abts(io_req); if (rc != SUCCESS) { printk(KERN_ERR PFX "err_warn: initiate_abts " "failed xid = 0x%x. issue cleanup\n", io_req->xid); bnx2fc_initiate_cleanup(io_req); } ret_err_rqe: bnx2fc_return_rqe(tgt, 1); spin_unlock_bh(&tgt->tgt_lock); break; case FCOE_WARNING_DETECTION_CQE_TYPE: /* *In case of warning reporting CQE a single RQ entry * is consumes. */ spin_lock_bh(&tgt->tgt_lock); num_rq = 1; err_entry = (struct fcoe_err_report_entry *) bnx2fc_get_next_rqe(tgt, 1); xid = cpu_to_be16(err_entry->fc_hdr.ox_id); BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", err_entry->data.err_warn_bitmap_hi, err_entry->data.err_warn_bitmap_lo); BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); if (xid > hba->max_xid) { BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); goto ret_warn_rqe; } err_warn_bit_map = (u64) ((u64)err_entry->data.err_warn_bitmap_hi << 32) | (u64)err_entry->data.err_warn_bitmap_lo; for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { if (err_warn_bit_map & ((u64)1 << i)) { err_warn = i; break; } } BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn); io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; if (!io_req) goto ret_warn_rqe; if (io_req->cmd_type != BNX2FC_SCSI_CMD) { printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); goto ret_warn_rqe; } memcpy(&io_req->err_entry, err_entry, sizeof(struct fcoe_err_report_entry)); if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION) /* REC_TOV is not a warning code */ BUG_ON(1); else BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n"); ret_warn_rqe: bnx2fc_return_rqe(tgt, 1); spin_unlock_bh(&tgt->tgt_lock); break; default: printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n"); break; } } void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe, unsigned char *rq_data, u8 num_rq, struct fcoe_task_ctx_entry *task) { struct fcoe_port *port = tgt->port; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct bnx2fc_cmd *io_req; u16 xid; u8 cmd_type; u8 rx_state = 0; spin_lock_bh(&tgt->tgt_lock); xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; if (io_req == NULL) { printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n"); spin_unlock_bh(&tgt->tgt_lock); return; } /* Timestamp IO completion time */ cmd_type = io_req->cmd_type; rx_state = ((task->rxwr_txrd.var_ctx.rx_flags & FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >> FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT); /* Process other IO completion types */ switch (cmd_type) { case BNX2FC_SCSI_CMD: if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq, rq_data); spin_unlock_bh(&tgt->tgt_lock); return; } if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) bnx2fc_process_abts_compl(io_req, task, num_rq); else if (rx_state == FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) bnx2fc_process_cleanup_compl(io_req, task, num_rq); else printk(KERN_ERR PFX "Invalid rx state - %d\n", rx_state); break; case BNX2FC_TASK_MGMT_CMD: BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data); break; case BNX2FC_ABTS: /* * ABTS request received by firmware. ABTS response * will be delivered to the task belonging to the IO * that was aborted */ BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n"); kref_put(&io_req->refcount, bnx2fc_cmd_release); break; case BNX2FC_ELS: if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) bnx2fc_process_els_compl(io_req, task, num_rq); else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) bnx2fc_process_abts_compl(io_req, task, num_rq); else if (rx_state == FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) bnx2fc_process_cleanup_compl(io_req, task, num_rq); else printk(KERN_ERR PFX "Invalid rx state = %d\n", rx_state); break; case BNX2FC_CLEANUP: BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n"); kref_put(&io_req->refcount, bnx2fc_cmd_release); break; case BNX2FC_SEQ_CLEANUP: BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n", io_req->xid); bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state); kref_put(&io_req->refcount, bnx2fc_cmd_release); break; default: printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); break; } spin_unlock_bh(&tgt->tgt_lock); } void bnx2fc_arm_cq(struct bnx2fc_rport *tgt) { struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; u32 msg; wmb(); rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit << FCOE_CQE_TOGGLE_BIT_SHIFT); msg = *((u32 *)rx_db); writel(cpu_to_le32(msg), tgt->ctx_base); } static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe, unsigned char *rq_data, u8 num_rq, struct fcoe_task_ctx_entry *task) { struct bnx2fc_work *work; work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); if (!work) return NULL; INIT_LIST_HEAD(&work->list); work->tgt = tgt; work->wqe = wqe; work->num_rq = num_rq; work->task = task; if (rq_data) memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ); return work; } /* Pending work request completion */ static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) { unsigned int cpu = wqe % num_possible_cpus(); struct bnx2fc_percpu_s *fps; struct bnx2fc_work *work; struct fcoe_task_ctx_entry *task; struct fcoe_task_ctx_entry *task_page; struct fcoe_port *port = tgt->port; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; unsigned char *rq_data = NULL; unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ]; int task_idx, index; u16 xid; u8 num_rq; int i; xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; if (xid >= hba->max_tasks) { pr_err(PFX "ERROR:xid out of range\n"); return false; } task_idx = xid / BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; task = &task_page[index]; num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ); if (!num_rq) goto num_rq_zero; rq_data = bnx2fc_get_next_rqe(tgt, 1); if (num_rq > 1) { /* We do not need extra sense data */ for (i = 1; i < num_rq; i++) bnx2fc_get_next_rqe(tgt, 1); } if (rq_data) memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ); /* return RQ entries */ for (i = 0; i < num_rq; i++) bnx2fc_return_rqe(tgt, 1); num_rq_zero: fps = &per_cpu(bnx2fc_percpu, cpu); spin_lock_bh(&fps->fp_work_lock); if (fps->iothread) { work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff, num_rq, task); if (work) { list_add_tail(&work->list, &fps->work_list); wake_up_process(fps->iothread); spin_unlock_bh(&fps->fp_work_lock); return true; } } spin_unlock_bh(&fps->fp_work_lock); bnx2fc_process_cq_compl(tgt, wqe, rq_data_buff, num_rq, task); return true; } int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) { struct fcoe_cqe *cq; u32 cq_cons; struct fcoe_cqe *cqe; u32 num_free_sqes = 0; u32 num_cqes = 0; u16 wqe; /* * cq_lock is a low contention lock used to protect * the CQ data structure from being freed up during * the upload operation */ spin_lock_bh(&tgt->cq_lock); if (!tgt->cq) { printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n"); spin_unlock_bh(&tgt->cq_lock); return 0; } cq = tgt->cq; cq_cons = tgt->cq_cons_idx; cqe = &cq[cq_cons]; while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == (tgt->cq_curr_toggle_bit << FCOE_CQE_TOGGLE_BIT_SHIFT)) { /* new entry on the cq */ if (wqe & FCOE_CQE_CQE_TYPE) { /* Unsolicited event notification */ bnx2fc_process_unsol_compl(tgt, wqe); } else { if (bnx2fc_pending_work(tgt, wqe)) num_free_sqes++; } cqe++; tgt->cq_cons_idx++; num_cqes++; if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { tgt->cq_cons_idx = 0; cqe = cq; tgt->cq_curr_toggle_bit = 1 - tgt->cq_curr_toggle_bit; } } if (num_cqes) { /* Arm CQ only if doorbell is mapped */ if (tgt->ctx_base) bnx2fc_arm_cq(tgt); atomic_add(num_free_sqes, &tgt->free_sqes); } spin_unlock_bh(&tgt->cq_lock); return 0; } /** * bnx2fc_fastpath_notification - process global event queue (KCQ) * * @hba: adapter structure pointer * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry * * Fast path event notification handler */ static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, struct fcoe_kcqe *new_cqe_kcqe) { u32 conn_id = new_cqe_kcqe->fcoe_conn_id; struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id); return; } bnx2fc_process_new_cqes(tgt); } /** * bnx2fc_process_ofld_cmpl - process FCoE session offload completion * * @hba: adapter structure pointer * @ofld_kcqe: connection offload kcqe pointer * * handle session offload completion, enable the session if offload is * successful. */ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe) { struct bnx2fc_rport *tgt; struct bnx2fc_interface *interface; u32 conn_id; u32 context_id; conn_id = ofld_kcqe->fcoe_conn_id; context_id = ofld_kcqe->fcoe_conn_context_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n"); return; } BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", ofld_kcqe->fcoe_conn_context_id); interface = tgt->port->priv; if (hba != interface->hba) { printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n"); goto ofld_cmpl_err; } /* * cnic has allocated a context_id for this session; use this * while enabling the session. */ tgt->context_id = context_id; if (ofld_kcqe->completion_status) { if (ofld_kcqe->completion_status == FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) { printk(KERN_ERR PFX "unable to allocate FCoE context " "resources\n"); set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags); } } else { /* FW offload request successfully completed */ set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); } ofld_cmpl_err: set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); wake_up_interruptible(&tgt->ofld_wait); } /** * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion * * @hba: adapter structure pointer * @ofld_kcqe: connection offload kcqe pointer * * handle session enable completion, mark the rport as ready */ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe) { struct bnx2fc_rport *tgt; struct bnx2fc_interface *interface; u32 conn_id; u32 context_id; context_id = ofld_kcqe->fcoe_conn_context_id; conn_id = ofld_kcqe->fcoe_conn_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n"); return; } BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n", ofld_kcqe->fcoe_conn_context_id); /* * context_id should be the same for this target during offload * and enable */ if (tgt->context_id != context_id) { printk(KERN_ERR PFX "context id mismatch\n"); return; } interface = tgt->port->priv; if (hba != interface->hba) { printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n"); goto enbl_cmpl_err; } if (!ofld_kcqe->completion_status) /* enable successful - rport ready for issuing IOs */ set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); enbl_cmpl_err: set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); wake_up_interruptible(&tgt->ofld_wait); } static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *disable_kcqe) { struct bnx2fc_rport *tgt; u32 conn_id; conn_id = disable_kcqe->fcoe_conn_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n"); return; } BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); if (disable_kcqe->completion_status) { printk(KERN_ERR PFX "Disable failed with cmpl status %d\n", disable_kcqe->completion_status); set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags); set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); wake_up_interruptible(&tgt->upld_wait); } else { /* disable successful */ BNX2FC_TGT_DBG(tgt, "disable successful\n"); clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); wake_up_interruptible(&tgt->upld_wait); } } static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *destroy_kcqe) { struct bnx2fc_rport *tgt; u32 conn_id; conn_id = destroy_kcqe->fcoe_conn_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n"); return; } BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); if (destroy_kcqe->completion_status) { printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n", destroy_kcqe->completion_status); return; } else { /* destroy successful */ BNX2FC_TGT_DBG(tgt, "upload successful\n"); clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags); set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); wake_up_interruptible(&tgt->upld_wait); } } static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) { switch (err_code) { case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE: printk(KERN_ERR PFX "init_failure due to invalid opcode\n"); break; case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE: printk(KERN_ERR PFX "init failed due to ctx alloc failure\n"); break; case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: printk(KERN_ERR PFX "init_failure due to NIC error\n"); break; case FCOE_KCQE_COMPLETION_STATUS_ERROR: printk(KERN_ERR PFX "init failure due to compl status err\n"); break; case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: printk(KERN_ERR PFX "init failure due to HSI mismatch\n"); break; default: printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); } } /** * bnx2fc_indicate_kcqe() - process KCQE * * @context: adapter structure pointer * @kcq: kcqe pointer * @num_cqe: Number of completion queue elements * * Generic KCQ event handler */ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], u32 num_cqe) { struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; int i = 0; struct fcoe_kcqe *kcqe = NULL; while (i < num_cqe) { kcqe = (struct fcoe_kcqe *) kcq[i++]; switch (kcqe->op_code) { case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION: bnx2fc_fastpath_notification(hba, kcqe); break; case FCOE_KCQE_OPCODE_OFFLOAD_CONN: bnx2fc_process_ofld_cmpl(hba, kcqe); break; case FCOE_KCQE_OPCODE_ENABLE_CONN: bnx2fc_process_enable_conn_cmpl(hba, kcqe); break; case FCOE_KCQE_OPCODE_INIT_FUNC: if (kcqe->completion_status != FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { bnx2fc_init_failure(hba, kcqe->completion_status); } else { set_bit(ADAPTER_STATE_UP, &hba->adapter_state); bnx2fc_get_link_state(hba); printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n", (u8)hba->pcidev->bus->number); } break; case FCOE_KCQE_OPCODE_DESTROY_FUNC: if (kcqe->completion_status != FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { printk(KERN_ERR PFX "DESTROY failed\n"); } else { printk(KERN_ERR PFX "DESTROY success\n"); } set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); wake_up_interruptible(&hba->destroy_wait); break; case FCOE_KCQE_OPCODE_DISABLE_CONN: bnx2fc_process_conn_disable_cmpl(hba, kcqe); break; case FCOE_KCQE_OPCODE_DESTROY_CONN: bnx2fc_process_conn_destroy_cmpl(hba, kcqe); break; case FCOE_KCQE_OPCODE_STAT_FUNC: if (kcqe->completion_status != FCOE_KCQE_COMPLETION_STATUS_SUCCESS) printk(KERN_ERR PFX "STAT failed\n"); complete(&hba->stat_req_done); break; case FCOE_KCQE_OPCODE_FCOE_ERROR: default: printk(KERN_ERR PFX "unknown opcode 0x%x\n", kcqe->op_code); } } } void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) { struct fcoe_sqe *sqe; sqe = &tgt->sq[tgt->sq_prod_idx]; /* Fill SQ WQE */ sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; /* Advance SQ Prod Idx */ if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) { tgt->sq_prod_idx = 0; tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit; } } void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) { struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; u32 msg; wmb(); sq_db->prod = tgt->sq_prod_idx | (tgt->sq_curr_toggle_bit << 15); msg = *((u32 *)sq_db); writel(cpu_to_le32(msg), tgt->ctx_base); } int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) { u32 context_id = tgt->context_id; struct fcoe_port *port = tgt->port; u32 reg_off; resource_size_t reg_base; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; reg_base = pci_resource_start(hba->pcidev, BNX2X_DOORBELL_PCI_BAR); reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF); tgt->ctx_base = ioremap(reg_base + reg_off, 4); if (!tgt->ctx_base) return -ENOMEM; return 0; } char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items) { char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ); if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX) return NULL; tgt->rq_cons_idx += num_items; if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX) tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX; return buf; } void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) { /* return the rq buffer */ u32 next_prod_idx = tgt->rq_prod_idx + num_items; if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) { /* Wrap around RQ */ next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX; } tgt->rq_prod_idx = next_prod_idx; tgt->conn_db->rq_prod = tgt->rq_prod_idx; } void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req, struct fcoe_task_ctx_entry *task, struct bnx2fc_cmd *orig_io_req, u32 offset) { struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd; struct bnx2fc_rport *tgt = seq_clnp_req->tgt; struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl; struct fcoe_ext_mul_sges_ctx *sgl; u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP; u8 orig_task_type; u16 orig_xid = orig_io_req->xid; u32 context_id = tgt->context_id; u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma; u32 orig_offset = offset; int bd_count; int i; memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) orig_task_type = FCOE_TASK_TYPE_WRITE; else orig_task_type = FCOE_TASK_TYPE_READ; /* Tx flags */ task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP << FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; /* init flags */ task->txwr_rxrd.const_ctx.init_flags = task_type << FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; task->rxwr_txrd.const_ctx.init_flags = context_id << FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; task->rxwr_txrd.const_ctx.init_flags = context_id << FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0; task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset; bd_count = orig_io_req->bd_tbl->bd_valid; /* obtain the appropriate bd entry from relative offset */ for (i = 0; i < bd_count; i++) { if (offset < bd[i].buf_len) break; offset -= bd[i].buf_len; } phys_addr += (i * sizeof(struct fcoe_bd_ctx)); if (orig_task_type == FCOE_TASK_TYPE_WRITE) { task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = (u32)phys_addr; task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32); task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = bd_count; task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off = offset; /* adjusted offset */ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i; } else { /* Multiple SGEs were used for this IO */ sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr; sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32); sgl->mul_sgl.sgl_size = bd_count; sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */ sgl->mul_sgl.cur_sge_idx = i; memset(&task->rxwr_only.rx_seq_ctx, 0, sizeof(struct fcoe_rx_seq_ctx)); task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset; task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset; } } void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u16 orig_xid) { u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP; struct bnx2fc_rport *tgt = io_req->tgt; u32 context_id = tgt->context_id; memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); /* Tx Write Rx Read */ /* init flags */ task->txwr_rxrd.const_ctx.init_flags = task_type << FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; if (tgt->dev_type == TYPE_TAPE) task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_TAPE << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; else task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_DISK << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; /* Tx flags */ task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; /* Rx Read Tx Write */ task->rxwr_txrd.const_ctx.init_flags = context_id << FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; task->rxwr_txrd.var_ctx.rx_flags |= 1 << FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; } void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task) { struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); struct bnx2fc_rport *tgt = io_req->tgt; struct fc_frame_header *fc_hdr; struct fcoe_ext_mul_sges_ctx *sgl; u8 task_type = 0; u64 *hdr; u64 temp_hdr[3]; u32 context_id; /* Obtain task_type */ if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) || (io_req->cmd_type == BNX2FC_ELS)) { task_type = FCOE_TASK_TYPE_MIDPATH; } else if (io_req->cmd_type == BNX2FC_ABTS) { task_type = FCOE_TASK_TYPE_ABTS; } memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); /* Setup the task from io_req for easy reference */ io_req->task = task; BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n", io_req->cmd_type, task_type); /* Tx only */ if ((task_type == FCOE_TASK_TYPE_MIDPATH) || (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_req_bd_dma; task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = (u32)((u64)mp_req->mp_req_bd_dma >> 32); task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1; } /* Tx Write Rx Read */ /* init flags */ task->txwr_rxrd.const_ctx.init_flags = task_type << FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; if (tgt->dev_type == TYPE_TAPE) task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_TAPE << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; else task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_DISK << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; /* tx flags */ task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT << FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; /* Rx Write Tx Read */ task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; /* rx flags */ task->rxwr_txrd.var_ctx.rx_flags |= 1 << FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; context_id = tgt->context_id; task->rxwr_txrd.const_ctx.init_flags = context_id << FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; fc_hdr = &(mp_req->req_fc_hdr); if (task_type == FCOE_TASK_TYPE_MIDPATH) { fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); fc_hdr->fh_rx_id = htons(0xffff); task->rxwr_txrd.var_ctx.rx_id = 0xffff; } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); } /* Fill FC Header into middle path buffer */ hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr; memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); hdr[0] = cpu_to_be64(temp_hdr[0]); hdr[1] = cpu_to_be64(temp_hdr[1]); hdr[2] = cpu_to_be64(temp_hdr[2]); /* Rx Only */ if (task_type == FCOE_TASK_TYPE_MIDPATH) { sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma; sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)mp_req->mp_resp_bd_dma >> 32); sgl->mul_sgl.sgl_size = 1; } } void bnx2fc_init_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task) { u8 task_type; struct scsi_cmnd *sc_cmd = io_req->sc_cmd; struct io_bdt *bd_tbl = io_req->bd_tbl; struct bnx2fc_rport *tgt = io_req->tgt; struct fcoe_cached_sge_ctx *cached_sge; struct fcoe_ext_mul_sges_ctx *sgl; int dev_type = tgt->dev_type; u64 *fcp_cmnd; u64 tmp_fcp_cmnd[4]; u32 context_id; int cnt, i; int bd_count; memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); /* Setup the task from io_req for easy reference */ io_req->task = task; if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) task_type = FCOE_TASK_TYPE_WRITE; else task_type = FCOE_TASK_TYPE_READ; /* Tx only */ bd_count = bd_tbl->bd_valid; cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; if (task_type == FCOE_TASK_TYPE_WRITE) { if ((dev_type == TYPE_DISK) && (bd_count == 1)) { struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo = cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi = cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem = cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; task->txwr_rxrd.const_ctx.init_flags |= 1 << FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; } else { task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = (u32)((u64)bd_tbl->bd_tbl_dma >> 32); task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = bd_tbl->bd_valid; } } /*Tx Write Rx Read */ /* Init state to NORMAL */ task->txwr_rxrd.const_ctx.init_flags |= task_type << FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; if (dev_type == TYPE_TAPE) { task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_TAPE << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; io_req->rec_retry = 0; io_req->rec_retry = 0; } else task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_DISK << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; /* tx flags */ task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL << FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; /* Set initial seq counter */ task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1; /* Fill FCP_CMND IU */ fcp_cmnd = (u64 *) task->txwr_rxrd.union_ctx.fcp_cmd.opaque; bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); /* swap fcp_cmnd */ cnt = sizeof(struct fcp_cmnd) / sizeof(u64); for (i = 0; i < cnt; i++) { *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]); fcp_cmnd++; } /* Rx Write Tx Read */ task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; context_id = tgt->context_id; task->rxwr_txrd.const_ctx.init_flags = context_id << FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; /* rx flags */ /* Set state to "waiting for the first packet" */ task->rxwr_txrd.var_ctx.rx_flags |= 1 << FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; task->rxwr_txrd.var_ctx.rx_id = 0xffff; /* Rx Only */ if (task_type != FCOE_TASK_TYPE_READ) return; sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; bd_count = bd_tbl->bd_valid; if (dev_type == TYPE_DISK) { if (bd_count == 1) { struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; task->txwr_rxrd.const_ctx.init_flags |= 1 << FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; } else if (bd_count == 2) { struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; fcoe_bd_tbl++; cached_sge->second_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; cached_sge->second_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len; task->txwr_rxrd.const_ctx.init_flags |= 1 << FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; } else { sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)bd_tbl->bd_tbl_dma >> 32); sgl->mul_sgl.sgl_size = bd_count; } } else { sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)bd_tbl->bd_tbl_dma >> 32); sgl->mul_sgl.sgl_size = bd_count; } } /** * bnx2fc_setup_task_ctx - allocate and map task context * * @hba: pointer to adapter structure * * allocate memory for task context, and associated BD table to be used * by firmware * */ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) { int rc = 0; struct regpair *task_ctx_bdt; dma_addr_t addr; int task_ctx_arr_sz; int i; /* * Allocate task context bd table. A page size of bd table * can map 256 buffers. Each buffer contains 32 task context * entries. Hence the limit with one page is 8192 task context * entries. */ hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->task_ctx_bd_dma, GFP_KERNEL); if (!hba->task_ctx_bd_tbl) { printk(KERN_ERR PFX "unable to allocate task context BDT\n"); rc = -1; goto out; } /* * Allocate task_ctx which is an array of pointers pointing to * a page containing 32 task contexts */ task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)), GFP_KERNEL); if (!hba->task_ctx) { printk(KERN_ERR PFX "unable to allocate task context array\n"); rc = -1; goto out1; } /* * Allocate task_ctx_dma which is an array of dma addresses */ hba->task_ctx_dma = kmalloc((task_ctx_arr_sz * sizeof(dma_addr_t)), GFP_KERNEL); if (!hba->task_ctx_dma) { printk(KERN_ERR PFX "unable to alloc context mapping array\n"); rc = -1; goto out2; } task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; for (i = 0; i < task_ctx_arr_sz; i++) { hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->task_ctx_dma[i], GFP_KERNEL); if (!hba->task_ctx[i]) { printk(KERN_ERR PFX "unable to alloc task context\n"); rc = -1; goto out3; } addr = (u64)hba->task_ctx_dma[i]; task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); task_ctx_bdt->lo = cpu_to_le32((u32)addr); task_ctx_bdt++; } return 0; out3: for (i = 0; i < task_ctx_arr_sz; i++) { if (hba->task_ctx[i]) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx[i], hba->task_ctx_dma[i]); hba->task_ctx[i] = NULL; } } kfree(hba->task_ctx_dma); hba->task_ctx_dma = NULL; out2: kfree(hba->task_ctx); hba->task_ctx = NULL; out1: dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); hba->task_ctx_bd_tbl = NULL; out: return rc; } void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) { int task_ctx_arr_sz; int i; if (hba->task_ctx_bd_tbl) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); hba->task_ctx_bd_tbl = NULL; } task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); if (hba->task_ctx) { for (i = 0; i < task_ctx_arr_sz; i++) { if (hba->task_ctx[i]) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx[i], hba->task_ctx_dma[i]); hba->task_ctx[i] = NULL; } } kfree(hba->task_ctx); hba->task_ctx = NULL; } kfree(hba->task_ctx_dma); hba->task_ctx_dma = NULL; } static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) { int i; int segment_count; u32 *pbl; if (hba->hash_tbl_segments) { pbl = hba->hash_tbl_pbl; if (pbl) { segment_count = hba->hash_tbl_segment_count; for (i = 0; i < segment_count; ++i) { dma_addr_t dma_address; dma_address = le32_to_cpu(*pbl); ++pbl; dma_address += ((u64)le32_to_cpu(*pbl)) << 32; ++pbl; dma_free_coherent(&hba->pcidev->dev, BNX2FC_HASH_TBL_CHUNK_SIZE, hba->hash_tbl_segments[i], dma_address); } } kfree(hba->hash_tbl_segments); hba->hash_tbl_segments = NULL; } if (hba->hash_tbl_pbl) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->hash_tbl_pbl, hba->hash_tbl_pbl_dma); hba->hash_tbl_pbl = NULL; } } static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) { int i; int hash_table_size; int segment_count; int segment_array_size; int dma_segment_array_size; dma_addr_t *dma_segment_array; u32 *pbl; hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * sizeof(struct fcoe_hash_table_entry); segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1; segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE; hba->hash_tbl_segment_count = segment_count; segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL); if (!hba->hash_tbl_segments) { printk(KERN_ERR PFX "hash table pointers alloc failed\n"); return -ENOMEM; } dma_segment_array_size = segment_count * sizeof(*dma_segment_array); dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); if (!dma_segment_array) { printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); goto cleanup_ht; } for (i = 0; i < segment_count; ++i) { hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev, BNX2FC_HASH_TBL_CHUNK_SIZE, &dma_segment_array[i], GFP_KERNEL); if (!hba->hash_tbl_segments[i]) { printk(KERN_ERR PFX "hash segment alloc failed\n"); goto cleanup_dma; } } hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->hash_tbl_pbl_dma, GFP_KERNEL); if (!hba->hash_tbl_pbl) { printk(KERN_ERR PFX "hash table pbl alloc failed\n"); goto cleanup_dma; } pbl = hba->hash_tbl_pbl; for (i = 0; i < segment_count; ++i) { u64 paddr = dma_segment_array[i]; *pbl = cpu_to_le32((u32) paddr); ++pbl; *pbl = cpu_to_le32((u32) (paddr >> 32)); ++pbl; } pbl = hba->hash_tbl_pbl; i = 0; while (*pbl && *(pbl + 1)) { ++pbl; ++pbl; ++i; } kfree(dma_segment_array); return 0; cleanup_dma: for (i = 0; i < segment_count; ++i) { if (hba->hash_tbl_segments[i]) dma_free_coherent(&hba->pcidev->dev, BNX2FC_HASH_TBL_CHUNK_SIZE, hba->hash_tbl_segments[i], dma_segment_array[i]); } kfree(dma_segment_array); cleanup_ht: kfree(hba->hash_tbl_segments); hba->hash_tbl_segments = NULL; return -ENOMEM; } /** * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer * * @hba: Pointer to adapter structure * */ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) { u64 addr; u32 mem_size; int i; if (bnx2fc_allocate_hash_table(hba)) return -ENOMEM; mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, &hba->t2_hash_tbl_ptr_dma, GFP_KERNEL); if (!hba->t2_hash_tbl_ptr) { printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); bnx2fc_free_fw_resc(hba); return -ENOMEM; } mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct fcoe_t2_hash_table_entry); hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, &hba->t2_hash_tbl_dma, GFP_KERNEL); if (!hba->t2_hash_tbl) { printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); bnx2fc_free_fw_resc(hba); return -ENOMEM; } for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { addr = (unsigned long) hba->t2_hash_tbl_dma + ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; hba->t2_hash_tbl[i].next.hi = addr >> 32; } hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->dummy_buf_dma, GFP_KERNEL); if (!hba->dummy_buffer) { printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n"); bnx2fc_free_fw_resc(hba); return -ENOMEM; } hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->stats_buf_dma, GFP_KERNEL); if (!hba->stats_buffer) { printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); bnx2fc_free_fw_resc(hba); return -ENOMEM; } return 0; } void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) { u32 mem_size; if (hba->stats_buffer) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->stats_buffer, hba->stats_buf_dma); hba->stats_buffer = NULL; } if (hba->dummy_buffer) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->dummy_buffer, hba->dummy_buf_dma); hba->dummy_buffer = NULL; } if (hba->t2_hash_tbl_ptr) { mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); dma_free_coherent(&hba->pcidev->dev, mem_size, hba->t2_hash_tbl_ptr, hba->t2_hash_tbl_ptr_dma); hba->t2_hash_tbl_ptr = NULL; } if (hba->t2_hash_tbl) { mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct fcoe_t2_hash_table_entry); dma_free_coherent(&hba->pcidev->dev, mem_size, hba->t2_hash_tbl, hba->t2_hash_tbl_dma); hba->t2_hash_tbl = NULL; } bnx2fc_free_hash_table(hba); }
linux-master
drivers/scsi/bnx2fc/bnx2fc_hwi.c
/* bnx2fc_tgt.c: QLogic Linux FCoE offload driver. * Handles operations such as session offload/upload etc, and manages * session resources such as connection id and qp resources. * * Copyright (c) 2008-2013 Broadcom Corporation * Copyright (c) 2014-2016 QLogic Corporation * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Bhanu Prakash Gollapudi ([email protected]) */ #include "bnx2fc.h" static void bnx2fc_upld_timer(struct timer_list *t); static void bnx2fc_ofld_timer(struct timer_list *t); static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, struct fcoe_port *port, struct fc_rport_priv *rdata); static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, struct bnx2fc_rport *tgt); static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, struct bnx2fc_rport *tgt); static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, struct bnx2fc_rport *tgt); static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id); static void bnx2fc_upld_timer(struct timer_list *t) { struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer); BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n"); /* fake upload completion */ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); wake_up_interruptible(&tgt->upld_wait); } static void bnx2fc_ofld_timer(struct timer_list *t) { struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer); BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n"); /* NOTE: This function should never be called, as * offload should never timeout */ /* * If the timer has expired, this session is dead * Clear offloaded flag and logout of this device. * Since OFFLOADED flag is cleared, this case * will be considered as offload error and the * port will be logged off, and conn_id, session * resources are freed up in bnx2fc_offload_session */ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); wake_up_interruptible(&tgt->ofld_wait); } static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt) { timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0); mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT); wait_event_interruptible(tgt->ofld_wait, (test_bit( BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags))); if (signal_pending(current)) flush_signals(current); del_timer_sync(&tgt->ofld_timer); } static void bnx2fc_offload_session(struct fcoe_port *port, struct bnx2fc_rport *tgt, struct fc_rport_priv *rdata) { struct fc_rport *rport = rdata->rport; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; int rval; int i = 0; /* Initialize bnx2fc_rport */ /* NOTE: tgt is already bzero'd */ rval = bnx2fc_init_tgt(tgt, port, rdata); if (rval) { printk(KERN_ERR PFX "Failed to allocate conn id for " "port_id (%6x)\n", rport->port_id); goto tgt_init_err; } /* Allocate session resources */ rval = bnx2fc_alloc_session_resc(hba, tgt); if (rval) { printk(KERN_ERR PFX "Failed to allocate resources\n"); goto ofld_err; } /* * Initialize FCoE session offload process. * Upon completion of offload process add * rport to list of rports */ retry_ofld: clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); rval = bnx2fc_send_session_ofld_req(port, tgt); if (rval) { printk(KERN_ERR PFX "ofld_req failed\n"); goto ofld_err; } /* * wait for the session is offloaded and enabled. 3 Secs * should be ample time for this process to complete. */ bnx2fc_ofld_wait(tgt); if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) { if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags)) { BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, " "retry ofld..%d\n", i++); msleep_interruptible(1000); if (i > 3) { i = 0; goto ofld_err; } goto retry_ofld; } goto ofld_err; } if (bnx2fc_map_doorbell(tgt)) { printk(KERN_ERR PFX "map doorbell failed - no mem\n"); goto ofld_err; } clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); rval = bnx2fc_send_session_enable_req(port, tgt); if (rval) { pr_err(PFX "enable session failed\n"); goto ofld_err; } bnx2fc_ofld_wait(tgt); if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) goto ofld_err; return; ofld_err: /* couldn't offload the session. log off from this rport */ BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n"); clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); /* Free session resources */ bnx2fc_free_session_resc(hba, tgt); tgt_init_err: if (tgt->fcoe_conn_id != -1) bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); fc_rport_logoff(rdata); } void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) { struct bnx2fc_cmd *io_req; struct bnx2fc_cmd *tmp; int rc; int i = 0; BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n", tgt->num_active_ios.counter); spin_lock_bh(&tgt->tgt_lock); tgt->flush_in_prog = 1; list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) { i++; list_del_init(&io_req->link); io_req->on_active_queue = 0; BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n"); if (cancel_delayed_work(&io_req->timeout_work)) { if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags)) { /* Handle eh_abort timeout */ BNX2FC_IO_DBG(io_req, "eh_abort for IO " "cleaned up\n"); complete(&io_req->abts_done); } kref_put(&io_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ } set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags); set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); /* Do not issue cleanup when disable request failed */ if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) bnx2fc_process_cleanup_compl(io_req, io_req->task, 0); else { rc = bnx2fc_initiate_cleanup(io_req); BUG_ON(rc); } } list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) { i++; list_del_init(&io_req->link); io_req->on_tmf_queue = 0; BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n"); if (io_req->wait_for_abts_comp) complete(&io_req->abts_done); } list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) { i++; list_del_init(&io_req->link); io_req->on_active_queue = 0; BNX2FC_IO_DBG(io_req, "els_queue cleanup\n"); if (cancel_delayed_work(&io_req->timeout_work)) kref_put(&io_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ if ((io_req->cb_func) && (io_req->cb_arg)) { io_req->cb_func(io_req->cb_arg); io_req->cb_arg = NULL; } /* Do not issue cleanup when disable request failed */ if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) bnx2fc_process_cleanup_compl(io_req, io_req->task, 0); else { rc = bnx2fc_initiate_cleanup(io_req); BUG_ON(rc); } } list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) { i++; list_del_init(&io_req->link); BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); if (cancel_delayed_work(&io_req->timeout_work)) { if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags)) { /* Handle eh_abort timeout */ BNX2FC_IO_DBG(io_req, "eh_abort for IO " "in retire_q\n"); if (io_req->wait_for_abts_comp) complete(&io_req->abts_done); } kref_put(&io_req->refcount, bnx2fc_cmd_release); } clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); } BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i); i = 0; spin_unlock_bh(&tgt->tgt_lock); /* wait for active_ios to go to 0 */ while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT)) msleep(25); if (tgt->num_active_ios.counter != 0) printk(KERN_ERR PFX "CLEANUP on port 0x%x:" " active_ios = %d\n", tgt->rdata->ids.port_id, tgt->num_active_ios.counter); spin_lock_bh(&tgt->tgt_lock); tgt->flush_in_prog = 0; spin_unlock_bh(&tgt->tgt_lock); } static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt) { timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0); mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT); wait_event_interruptible(tgt->upld_wait, (test_bit( BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags))); if (signal_pending(current)) flush_signals(current); del_timer_sync(&tgt->upld_timer); } static void bnx2fc_upload_session(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n", tgt->num_active_ios.counter); /* * Called with hba->hba_mutex held. * This is a blocking call */ clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); bnx2fc_send_session_disable_req(port, tgt); /* * wait for upload to complete. 3 Secs * should be sufficient time for this process to complete. */ BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n"); bnx2fc_upld_wait(tgt); /* * traverse thru the active_q and tmf_q and cleanup * IOs in these lists */ BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n", tgt->flags); bnx2fc_flush_active_ios(tgt); /* Issue destroy KWQE */ if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) { BNX2FC_TGT_DBG(tgt, "send destroy req\n"); clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); bnx2fc_send_session_destroy_req(hba, tgt); /* wait for destroy to complete */ bnx2fc_upld_wait(tgt); if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags))) printk(KERN_ERR PFX "ERROR!! destroy timed out\n"); BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n", tgt->flags); } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) { printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy" " not sent to FW\n"); } else { printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy" " not sent to FW\n"); } /* Free session resources */ bnx2fc_free_session_resc(hba, tgt); bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); } static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, struct fcoe_port *port, struct fc_rport_priv *rdata) { struct fc_rport *rport = rdata->rport; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; tgt->rport = rport; tgt->rdata = rdata; tgt->port = port; if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) { BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n"); tgt->fcoe_conn_id = -1; return -1; } tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt); if (tgt->fcoe_conn_id == -1) return -1; BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id); tgt->max_sqes = BNX2FC_SQ_WQES_MAX; tgt->max_rqes = BNX2FC_RQ_WQES_MAX; tgt->max_cqes = BNX2FC_CQ_WQES_MAX; atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX); /* Initialize the toggle bit */ tgt->sq_curr_toggle_bit = 1; tgt->cq_curr_toggle_bit = 1; tgt->sq_prod_idx = 0; tgt->cq_cons_idx = 0; tgt->rq_prod_idx = 0x8000; tgt->rq_cons_idx = 0; atomic_set(&tgt->num_active_ios, 0); tgt->retry_delay_timestamp = 0; if (rdata->flags & FC_RP_FLAGS_RETRY && rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) { tgt->dev_type = TYPE_TAPE; tgt->io_timeout = 0; /* use default ULP timeout */ } else { tgt->dev_type = TYPE_DISK; tgt->io_timeout = BNX2FC_IO_TIMEOUT; } /* initialize sq doorbell */ sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE; sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE << B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; /* initialize rx doorbell */ rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) | (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) | (B577XX_FCOE_CONNECTION_TYPE << B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT)); rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) | (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT); spin_lock_init(&tgt->tgt_lock); spin_lock_init(&tgt->cq_lock); /* Initialize active_cmd_queue list */ INIT_LIST_HEAD(&tgt->active_cmd_queue); /* Initialize IO retire queue */ INIT_LIST_HEAD(&tgt->io_retire_queue); INIT_LIST_HEAD(&tgt->els_queue); /* Initialize active_tm_queue list */ INIT_LIST_HEAD(&tgt->active_tm_queue); init_waitqueue_head(&tgt->ofld_wait); init_waitqueue_head(&tgt->upld_wait); return 0; } /* * This event_callback is called after successful completion of libfc * initiated target login. bnx2fc can proceed with initiating the session * establishment. */ void bnx2fc_rport_event_handler(struct fc_lport *lport, struct fc_rport_priv *rdata, enum fc_rport_event event) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct fc_rport *rport = rdata->rport; struct fc_rport_libfc_priv *rp; struct bnx2fc_rport *tgt; u32 port_id; BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n", event, rdata->ids.port_id); switch (event) { case RPORT_EV_READY: if (!rport) { printk(KERN_ERR PFX "rport is NULL: ERROR!\n"); break; } rp = rport->dd_data; if (rport->port_id == FC_FID_DIR_SERV) { /* * bnx2fc_rport structure doesn't exist for * directory server. * We should not come here, as lport will * take care of fabric login */ printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n", rdata->ids.port_id); break; } if (rdata->spp_type != FC_TYPE_FCP) { BNX2FC_HBA_DBG(lport, "not FCP type target." " not offloading\n"); break; } if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { BNX2FC_HBA_DBG(lport, "not FCP_TARGET" " not offloading\n"); break; } /* * Offload process is protected with hba mutex. * Use the same mutex_lock for upload process too */ mutex_lock(&hba->hba_mutex); tgt = (struct bnx2fc_rport *)&rp[1]; /* This can happen when ADISC finds the same target */ if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) { BNX2FC_TGT_DBG(tgt, "already offloaded\n"); mutex_unlock(&hba->hba_mutex); return; } /* * Offload the session. This is a blocking call, and will * wait until the session is offloaded. */ bnx2fc_offload_session(port, tgt, rdata); BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n", hba->num_ofld_sess); if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) { /* Session is offloaded and enabled. */ BNX2FC_TGT_DBG(tgt, "sess offloaded\n"); /* This counter is protected with hba mutex */ hba->num_ofld_sess++; set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); } else { /* * Offload or enable would have failed. * In offload/enable completion path, the * rport would have already been removed */ BNX2FC_TGT_DBG(tgt, "Port is being logged off as " "offloaded flag not set\n"); } mutex_unlock(&hba->hba_mutex); break; case RPORT_EV_LOGO: case RPORT_EV_FAILED: case RPORT_EV_STOP: port_id = rdata->ids.port_id; if (port_id == FC_FID_DIR_SERV) break; if (!rport) { printk(KERN_INFO PFX "%x - rport not created Yet!!\n", port_id); break; } rp = rport->dd_data; mutex_lock(&hba->hba_mutex); /* * Perform session upload. Note that rdata->peers is already * removed from disc->rports list before we get this event. */ tgt = (struct bnx2fc_rport *)&rp[1]; if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) { mutex_unlock(&hba->hba_mutex); break; } clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); bnx2fc_upload_session(port, tgt); hba->num_ofld_sess--; BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n", hba->num_ofld_sess); /* * Try to wake up the linkdown wait thread. If num_ofld_sess * is 0, the waiting therad wakes up */ if ((hba->wait_for_link_down) && (hba->num_ofld_sess == 0)) { wake_up_interruptible(&hba->shutdown_wait); } mutex_unlock(&hba->hba_mutex); break; case RPORT_EV_NONE: break; } } /** * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id * * @port: fcoe_port struct to lookup the target port on * @port_id: The remote port ID to look up */ struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, u32 port_id) { struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct bnx2fc_rport *tgt; struct fc_rport_priv *rdata; int i; for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { tgt = hba->tgt_ofld_list[i]; if ((tgt) && (tgt->port == port)) { rdata = tgt->rdata; if (rdata->ids.port_id == port_id) { if (rdata->rp_state != RPORT_ST_DELETE) { BNX2FC_TGT_DBG(tgt, "rport " "obtained\n"); return tgt; } else { BNX2FC_TGT_DBG(tgt, "rport 0x%x " "is in DELETED state\n", rdata->ids.port_id); return NULL; } } } } return NULL; } /** * bnx2fc_alloc_conn_id - allocates FCOE Connection id * * @hba: pointer to adapter structure * @tgt: pointer to bnx2fc_rport structure */ static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, struct bnx2fc_rport *tgt) { u32 conn_id, next; /* called with hba mutex held */ /* * tgt_ofld_list access is synchronized using * both hba mutex and hba lock. Atleast hba mutex or * hba lock needs to be held for read access. */ spin_lock_bh(&hba->hba_lock); next = hba->next_conn_id; conn_id = hba->next_conn_id++; if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS) hba->next_conn_id = 0; while (hba->tgt_ofld_list[conn_id] != NULL) { conn_id++; if (conn_id == BNX2FC_NUM_MAX_SESS) conn_id = 0; if (conn_id == next) { /* No free conn_ids are available */ spin_unlock_bh(&hba->hba_lock); return -1; } } hba->tgt_ofld_list[conn_id] = tgt; tgt->fcoe_conn_id = conn_id; spin_unlock_bh(&hba->hba_lock); return conn_id; } static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id) { /* called with hba mutex held */ spin_lock_bh(&hba->hba_lock); hba->tgt_ofld_list[conn_id] = NULL; spin_unlock_bh(&hba->hba_lock); } /* * bnx2fc_alloc_session_resc - Allocate qp resources for the session */ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, struct bnx2fc_rport *tgt) { dma_addr_t page; int num_pages; u32 *pbl; /* Allocate and map SQ */ tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, &tgt->sq_dma, GFP_KERNEL); if (!tgt->sq) { printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", tgt->sq_mem_size); goto mem_alloc_failure; } /* Allocate and map CQ */ tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, &tgt->cq_dma, GFP_KERNEL); if (!tgt->cq) { printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", tgt->cq_mem_size); goto mem_alloc_failure; } /* Allocate and map RQ and RQ PBL */ tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, &tgt->rq_dma, GFP_KERNEL); if (!tgt->rq) { printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", tgt->rq_mem_size); goto mem_alloc_failure; } tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, &tgt->rq_pbl_dma, GFP_KERNEL); if (!tgt->rq_pbl) { printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", tgt->rq_pbl_size); goto mem_alloc_failure; } num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE; page = tgt->rq_dma; pbl = (u32 *)tgt->rq_pbl; while (num_pages--) { *pbl = (u32)page; pbl++; *pbl = (u32)((u64)page >> 32); pbl++; page += CNIC_PAGE_SIZE; } /* Allocate and map XFERQ */ tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, &tgt->xferq_dma, GFP_KERNEL); if (!tgt->xferq) { printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", tgt->xferq_mem_size); goto mem_alloc_failure; } /* Allocate and map CONFQ & CONFQ PBL */ tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, &tgt->confq_dma, GFP_KERNEL); if (!tgt->confq) { printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", tgt->confq_mem_size); goto mem_alloc_failure; } tgt->confq_pbl_size = (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); tgt->confq_pbl_size = (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_pbl_size, &tgt->confq_pbl_dma, GFP_KERNEL); if (!tgt->confq_pbl) { printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", tgt->confq_pbl_size); goto mem_alloc_failure; } num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE; page = tgt->confq_dma; pbl = (u32 *)tgt->confq_pbl; while (num_pages--) { *pbl = (u32)page; pbl++; *pbl = (u32)((u64)page >> 32); pbl++; page += CNIC_PAGE_SIZE; } /* Allocate and map ConnDB */ tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size, &tgt->conn_db_dma, GFP_KERNEL); if (!tgt->conn_db) { printk(KERN_ERR PFX "unable to allocate conn_db %d\n", tgt->conn_db_mem_size); goto mem_alloc_failure; } /* Allocate and map LCQ */ tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, &tgt->lcq_dma, GFP_KERNEL); if (!tgt->lcq) { printk(KERN_ERR PFX "unable to allocate lcq %d\n", tgt->lcq_mem_size); goto mem_alloc_failure; } tgt->conn_db->rq_prod = 0x8000; return 0; mem_alloc_failure: return -ENOMEM; } /** * bnx2fc_free_session_resc - free qp resources for the session * * @hba: adapter structure pointer * @tgt: bnx2fc_rport structure pointer * * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL */ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, struct bnx2fc_rport *tgt) { void __iomem *ctx_base_ptr; BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n"); spin_lock_bh(&tgt->cq_lock); ctx_base_ptr = tgt->ctx_base; tgt->ctx_base = NULL; /* Free LCQ */ if (tgt->lcq) { dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, tgt->lcq, tgt->lcq_dma); tgt->lcq = NULL; } /* Free connDB */ if (tgt->conn_db) { dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size, tgt->conn_db, tgt->conn_db_dma); tgt->conn_db = NULL; } /* Free confq and confq pbl */ if (tgt->confq_pbl) { dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size, tgt->confq_pbl, tgt->confq_pbl_dma); tgt->confq_pbl = NULL; } if (tgt->confq) { dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size, tgt->confq, tgt->confq_dma); tgt->confq = NULL; } /* Free XFERQ */ if (tgt->xferq) { dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, tgt->xferq, tgt->xferq_dma); tgt->xferq = NULL; } /* Free RQ PBL and RQ */ if (tgt->rq_pbl) { dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, tgt->rq_pbl, tgt->rq_pbl_dma); tgt->rq_pbl = NULL; } if (tgt->rq) { dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size, tgt->rq, tgt->rq_dma); tgt->rq = NULL; } /* Free CQ */ if (tgt->cq) { dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, tgt->cq, tgt->cq_dma); tgt->cq = NULL; } /* Free SQ */ if (tgt->sq) { dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, tgt->sq, tgt->sq_dma); tgt->sq = NULL; } spin_unlock_bh(&tgt->cq_lock); if (ctx_base_ptr) iounmap(ctx_base_ptr); }
linux-master
drivers/scsi/bnx2fc/bnx2fc_tgt.c
/* * bnx2fc_els.c: QLogic Linux FCoE offload driver. * This file contains helper routines that handle ELS requests * and responses. * * Copyright (c) 2008-2013 Broadcom Corporation * Copyright (c) 2014-2016 QLogic Corporation * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Bhanu Prakash Gollapudi ([email protected]) */ #include "bnx2fc.h" static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg); static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg); static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, void *data, u32 data_len, void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec); static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg) { struct bnx2fc_cmd *orig_io_req; struct bnx2fc_cmd *rrq_req; int rc = 0; BUG_ON(!cb_arg); rrq_req = cb_arg->io_req; orig_io_req = cb_arg->aborted_io_req; BUG_ON(!orig_io_req); BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n", orig_io_req->xid, rrq_req->xid); kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) { /* * els req is timed out. cleanup the IO with FW and * drop the completion. Remove from active_cmd_queue. */ BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n", rrq_req->xid); if (rrq_req->on_active_queue) { list_del_init(&rrq_req->link); rrq_req->on_active_queue = 0; rc = bnx2fc_initiate_cleanup(rrq_req); BUG_ON(rc); } } kfree(cb_arg); } int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req) { struct fc_els_rrq rrq; struct bnx2fc_rport *tgt = aborted_io_req->tgt; struct fc_lport *lport = NULL; struct bnx2fc_els_cb_arg *cb_arg = NULL; u32 sid = 0; u32 r_a_tov = 0; unsigned long start = jiffies; int rc; if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) return -EINVAL; lport = tgt->rdata->local_port; sid = tgt->sid; r_a_tov = lport->r_a_tov; BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n", aborted_io_req->xid); memset(&rrq, 0, sizeof(rrq)); cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO); if (!cb_arg) { printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n"); rc = -ENOMEM; goto rrq_err; } cb_arg->aborted_io_req = aborted_io_req; rrq.rrq_cmd = ELS_RRQ; hton24(rrq.rrq_s_id, sid); rrq.rrq_ox_id = htons(aborted_io_req->xid); rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id); retry_rrq: rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq), bnx2fc_rrq_compl, cb_arg, r_a_tov); if (rc == -ENOMEM) { if (time_after(jiffies, start + (10 * HZ))) { BNX2FC_ELS_DBG("rrq Failed\n"); rc = FAILED; goto rrq_err; } msleep(20); goto retry_rrq; } rrq_err: if (rc) { BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n", aborted_io_req->xid); kfree(cb_arg); spin_lock_bh(&tgt->tgt_lock); kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); } return rc; } static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg) { struct bnx2fc_cmd *els_req; struct bnx2fc_rport *tgt; struct bnx2fc_mp_req *mp_req; struct fc_frame_header *fc_hdr; unsigned char *buf; void *resp_buf; u32 resp_len, hdr_len; u16 l2_oxid; int frame_len; int rc = 0; l2_oxid = cb_arg->l2_oxid; BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid); els_req = cb_arg->io_req; if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) { /* * els req is timed out. cleanup the IO with FW and * drop the completion. libfc will handle the els timeout */ if (els_req->on_active_queue) { list_del_init(&els_req->link); els_req->on_active_queue = 0; rc = bnx2fc_initiate_cleanup(els_req); BUG_ON(rc); } goto free_arg; } tgt = els_req->tgt; mp_req = &(els_req->mp_req); fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; resp_buf = mp_req->resp_buf; buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); if (!buf) { printk(KERN_ERR PFX "Unable to alloc mp buf\n"); goto free_arg; } hdr_len = sizeof(*fc_hdr); if (hdr_len + resp_len > PAGE_SIZE) { printk(KERN_ERR PFX "l2_els_compl: resp len is " "beyond page size\n"); goto free_buf; } memcpy(buf, fc_hdr, hdr_len); memcpy(buf + hdr_len, resp_buf, resp_len); frame_len = hdr_len + resp_len; bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid); free_buf: kfree(buf); free_arg: kfree(cb_arg); } int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp) { struct fc_els_adisc *adisc; struct fc_frame_header *fh; struct bnx2fc_els_cb_arg *cb_arg; struct fc_lport *lport = tgt->rdata->local_port; u32 r_a_tov = lport->r_a_tov; int rc; fh = fc_frame_header_get(fp); cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); if (!cb_arg) { printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n"); return -ENOMEM; } cb_arg->l2_oxid = ntohs(fh->fh_ox_id); BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid); adisc = fc_frame_payload_get(fp, sizeof(*adisc)); /* adisc is initialized by libfc */ rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc), bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); if (rc) kfree(cb_arg); return rc; } int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp) { struct fc_els_logo *logo; struct fc_frame_header *fh; struct bnx2fc_els_cb_arg *cb_arg; struct fc_lport *lport = tgt->rdata->local_port; u32 r_a_tov = lport->r_a_tov; int rc; fh = fc_frame_header_get(fp); cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); if (!cb_arg) { printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); return -ENOMEM; } cb_arg->l2_oxid = ntohs(fh->fh_ox_id); BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid); logo = fc_frame_payload_get(fp, sizeof(*logo)); /* logo is initialized by libfc */ rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo), bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); if (rc) kfree(cb_arg); return rc; } int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp) { struct fc_els_rls *rls; struct fc_frame_header *fh; struct bnx2fc_els_cb_arg *cb_arg; struct fc_lport *lport = tgt->rdata->local_port; u32 r_a_tov = lport->r_a_tov; int rc; fh = fc_frame_header_get(fp); cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); if (!cb_arg) { printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); return -ENOMEM; } cb_arg->l2_oxid = ntohs(fh->fh_ox_id); rls = fc_frame_payload_get(fp, sizeof(*rls)); /* rls is initialized by libfc */ rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls), bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); if (rc) kfree(cb_arg); return rc; } static void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg) { struct bnx2fc_mp_req *mp_req; struct fc_frame_header *fc_hdr, *fh; struct bnx2fc_cmd *srr_req; struct bnx2fc_cmd *orig_io_req; struct fc_frame *fp; unsigned char *buf; void *resp_buf; u32 resp_len, hdr_len; u8 opcode; int rc = 0; orig_io_req = cb_arg->aborted_io_req; srr_req = cb_arg->io_req; if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) { /* SRR timedout */ BNX2FC_IO_DBG(srr_req, "srr timed out, abort " "orig_io - 0x%x\n", orig_io_req->xid); rc = bnx2fc_initiate_abts(srr_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " "failed. issue cleanup\n"); bnx2fc_initiate_cleanup(srr_req); } if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) || test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx", orig_io_req->xid, orig_io_req->req_flags); goto srr_compl_done; } orig_io_req->srr_retry++; if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) { struct bnx2fc_rport *tgt = orig_io_req->tgt; spin_unlock_bh(&tgt->tgt_lock); rc = bnx2fc_send_srr(orig_io_req, orig_io_req->srr_offset, orig_io_req->srr_rctl); spin_lock_bh(&tgt->tgt_lock); if (!rc) goto srr_compl_done; } rc = bnx2fc_initiate_abts(orig_io_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " "failed xid = 0x%x. issue cleanup\n", orig_io_req->xid); bnx2fc_initiate_cleanup(orig_io_req); } goto srr_compl_done; } if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) || test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx", orig_io_req->xid, orig_io_req->req_flags); goto srr_compl_done; } mp_req = &(srr_req->mp_req); fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; resp_buf = mp_req->resp_buf; hdr_len = sizeof(*fc_hdr); buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); if (!buf) { printk(KERN_ERR PFX "srr buf: mem alloc failure\n"); goto srr_compl_done; } memcpy(buf, fc_hdr, hdr_len); memcpy(buf + hdr_len, resp_buf, resp_len); fp = fc_frame_alloc(NULL, resp_len); if (!fp) { printk(KERN_ERR PFX "fc_frame_alloc failure\n"); goto free_buf; } fh = (struct fc_frame_header *) fc_frame_header_get(fp); /* Copy FC Frame header and payload into the frame */ memcpy(fh, buf, hdr_len + resp_len); opcode = fc_frame_payload_op(fp); switch (opcode) { case ELS_LS_ACC: BNX2FC_IO_DBG(srr_req, "SRR success\n"); break; case ELS_LS_RJT: BNX2FC_IO_DBG(srr_req, "SRR rejected\n"); rc = bnx2fc_initiate_abts(orig_io_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " "failed xid = 0x%x. issue cleanup\n", orig_io_req->xid); bnx2fc_initiate_cleanup(orig_io_req); } break; default: BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n", opcode); break; } fc_frame_free(fp); free_buf: kfree(buf); srr_compl_done: kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); } static void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg) { struct bnx2fc_cmd *orig_io_req, *new_io_req; struct bnx2fc_cmd *rec_req; struct bnx2fc_mp_req *mp_req; struct fc_frame_header *fc_hdr, *fh; struct fc_els_ls_rjt *rjt; struct fc_els_rec_acc *acc; struct bnx2fc_rport *tgt; struct fcoe_err_report_entry *err_entry; struct scsi_cmnd *sc_cmd; enum fc_rctl r_ctl; unsigned char *buf; void *resp_buf; struct fc_frame *fp; u8 opcode; u32 offset; u32 e_stat; u32 resp_len, hdr_len; int rc = 0; bool send_seq_clnp = false; bool abort_io = false; BNX2FC_MISC_DBG("Entered rec_compl callback\n"); rec_req = cb_arg->io_req; orig_io_req = cb_arg->aborted_io_req; BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid); tgt = orig_io_req->tgt; /* Handle REC timeout case */ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) { BNX2FC_IO_DBG(rec_req, "timed out, abort " "orig_io - 0x%x\n", orig_io_req->xid); /* els req is timed out. send abts for els */ rc = bnx2fc_initiate_abts(rec_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " "failed. issue cleanup\n"); bnx2fc_initiate_cleanup(rec_req); } orig_io_req->rec_retry++; /* REC timedout. send ABTS to the orig IO req */ if (orig_io_req->rec_retry <= REC_RETRY_COUNT) { spin_unlock_bh(&tgt->tgt_lock); rc = bnx2fc_send_rec(orig_io_req); spin_lock_bh(&tgt->tgt_lock); if (!rc) goto rec_compl_done; } rc = bnx2fc_initiate_abts(orig_io_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " "failed xid = 0x%x. issue cleanup\n", orig_io_req->xid); bnx2fc_initiate_cleanup(orig_io_req); } goto rec_compl_done; } if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) { BNX2FC_IO_DBG(rec_req, "completed" "orig_io - 0x%x\n", orig_io_req->xid); goto rec_compl_done; } if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { BNX2FC_IO_DBG(rec_req, "abts in prog " "orig_io - 0x%x\n", orig_io_req->xid); goto rec_compl_done; } mp_req = &(rec_req->mp_req); fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; acc = resp_buf = mp_req->resp_buf; hdr_len = sizeof(*fc_hdr); buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); if (!buf) { printk(KERN_ERR PFX "rec buf: mem alloc failure\n"); goto rec_compl_done; } memcpy(buf, fc_hdr, hdr_len); memcpy(buf + hdr_len, resp_buf, resp_len); fp = fc_frame_alloc(NULL, resp_len); if (!fp) { printk(KERN_ERR PFX "fc_frame_alloc failure\n"); goto free_buf; } fh = (struct fc_frame_header *) fc_frame_header_get(fp); /* Copy FC Frame header and payload into the frame */ memcpy(fh, buf, hdr_len + resp_len); opcode = fc_frame_payload_op(fp); if (opcode == ELS_LS_RJT) { BNX2FC_IO_DBG(rec_req, "opcode is RJT\n"); rjt = fc_frame_payload_get(fp, sizeof(*rjt)); if ((rjt->er_reason == ELS_RJT_LOGIC || rjt->er_reason == ELS_RJT_UNAB) && rjt->er_explan == ELS_EXPL_OXID_RXID) { BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n"); new_io_req = bnx2fc_cmd_alloc(tgt); if (!new_io_req) goto abort_io; new_io_req->sc_cmd = orig_io_req->sc_cmd; /* cleanup orig_io_req that is with the FW */ set_bit(BNX2FC_FLAG_CMD_LOST, &orig_io_req->req_flags); bnx2fc_initiate_cleanup(orig_io_req); /* Post a new IO req with the same sc_cmd */ BNX2FC_IO_DBG(rec_req, "Post IO request again\n"); rc = bnx2fc_post_io_req(tgt, new_io_req); if (!rc) goto free_frame; BNX2FC_IO_DBG(rec_req, "REC: io post err\n"); } abort_io: rc = bnx2fc_initiate_abts(orig_io_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " "failed. issue cleanup\n"); bnx2fc_initiate_cleanup(orig_io_req); } } else if (opcode == ELS_LS_ACC) { /* REVISIT: Check if the exchange is already aborted */ offset = ntohl(acc->reca_fc4value); e_stat = ntohl(acc->reca_e_stat); if (e_stat & ESB_ST_SEQ_INIT) { BNX2FC_IO_DBG(rec_req, "target has the seq init\n"); goto free_frame; } BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n", e_stat, offset); /* Seq initiative is with us */ err_entry = (struct fcoe_err_report_entry *) &orig_io_req->err_entry; sc_cmd = orig_io_req->sc_cmd; if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { /* SCSI WRITE command */ if (offset == orig_io_req->data_xfer_len) { BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n"); /* FCP_RSP lost */ r_ctl = FC_RCTL_DD_CMD_STATUS; offset = 0; } else { /* start transmitting from offset */ BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n"); send_seq_clnp = true; r_ctl = FC_RCTL_DD_DATA_DESC; if (bnx2fc_initiate_seq_cleanup(orig_io_req, offset, r_ctl)) abort_io = true; /* XFER_RDY */ } } else { /* SCSI READ command */ if (err_entry->data.rx_buf_off == orig_io_req->data_xfer_len) { /* FCP_RSP lost */ BNX2FC_IO_DBG(rec_req, "READ - resp lost\n"); r_ctl = FC_RCTL_DD_CMD_STATUS; offset = 0; } else { /* request retransmission from this offset */ send_seq_clnp = true; offset = err_entry->data.rx_buf_off; BNX2FC_IO_DBG(rec_req, "RD DATA lost\n"); /* FCP_DATA lost */ r_ctl = FC_RCTL_DD_SOL_DATA; if (bnx2fc_initiate_seq_cleanup(orig_io_req, offset, r_ctl)) abort_io = true; } } if (abort_io) { rc = bnx2fc_initiate_abts(orig_io_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts" " failed. issue cleanup\n"); bnx2fc_initiate_cleanup(orig_io_req); } } else if (!send_seq_clnp) { BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n"); spin_unlock_bh(&tgt->tgt_lock); rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); spin_lock_bh(&tgt->tgt_lock); if (rc) { BNX2FC_IO_DBG(rec_req, "Unable to send SRR" " IO will abort\n"); } } } free_frame: fc_frame_free(fp); free_buf: kfree(buf); rec_compl_done: kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); kfree(cb_arg); } int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req) { struct fc_els_rec rec; struct bnx2fc_rport *tgt = orig_io_req->tgt; struct fc_lport *lport = tgt->rdata->local_port; struct bnx2fc_els_cb_arg *cb_arg = NULL; u32 sid = tgt->sid; u32 r_a_tov = lport->r_a_tov; int rc; BNX2FC_IO_DBG(orig_io_req, "Sending REC\n"); memset(&rec, 0, sizeof(rec)); cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); if (!cb_arg) { printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n"); rc = -ENOMEM; goto rec_err; } kref_get(&orig_io_req->refcount); cb_arg->aborted_io_req = orig_io_req; rec.rec_cmd = ELS_REC; hton24(rec.rec_s_id, sid); rec.rec_ox_id = htons(orig_io_req->xid); rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id); rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec), bnx2fc_rec_compl, cb_arg, r_a_tov); if (rc) { BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n"); spin_lock_bh(&tgt->tgt_lock); kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); kfree(cb_arg); } rec_err: return rc; } int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl) { struct fcp_srr srr; struct bnx2fc_rport *tgt = orig_io_req->tgt; struct fc_lport *lport = tgt->rdata->local_port; struct bnx2fc_els_cb_arg *cb_arg = NULL; u32 r_a_tov = lport->r_a_tov; int rc; BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n"); memset(&srr, 0, sizeof(srr)); cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); if (!cb_arg) { printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n"); rc = -ENOMEM; goto srr_err; } kref_get(&orig_io_req->refcount); cb_arg->aborted_io_req = orig_io_req; srr.srr_op = ELS_SRR; srr.srr_ox_id = htons(orig_io_req->xid); srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id); srr.srr_rel_off = htonl(offset); srr.srr_r_ctl = r_ctl; orig_io_req->srr_offset = offset; orig_io_req->srr_rctl = r_ctl; rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr), bnx2fc_srr_compl, cb_arg, r_a_tov); if (rc) { BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n"); spin_lock_bh(&tgt->tgt_lock); kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); kfree(cb_arg); } else set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags); srr_err: return rc; } static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, void *data, u32 data_len, void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec) { struct fcoe_port *port = tgt->port; struct bnx2fc_interface *interface = port->priv; struct fc_rport *rport = tgt->rport; struct fc_lport *lport = port->lport; struct bnx2fc_cmd *els_req; struct bnx2fc_mp_req *mp_req; struct fc_frame_header *fc_hdr; struct fcoe_task_ctx_entry *task; struct fcoe_task_ctx_entry *task_page; int rc = 0; int task_idx, index; u32 did, sid; u16 xid; rc = fc_remote_port_chkready(rport); if (rc) { printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op); rc = -EINVAL; goto els_err; } if (lport->state != LPORT_ST_READY || !(lport->link_up)) { printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op); rc = -EINVAL; goto els_err; } if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op); rc = -EINVAL; goto els_err; } els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS); if (!els_req) { rc = -ENOMEM; goto els_err; } els_req->sc_cmd = NULL; els_req->port = port; els_req->tgt = tgt; els_req->cb_func = cb_func; cb_arg->io_req = els_req; els_req->cb_arg = cb_arg; els_req->data_xfer_len = data_len; mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req); rc = bnx2fc_init_mp_req(els_req); if (rc == FAILED) { printk(KERN_ERR PFX "ELS MP request init failed\n"); spin_lock_bh(&tgt->tgt_lock); kref_put(&els_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); rc = -ENOMEM; goto els_err; } else { /* rc SUCCESS */ rc = 0; } /* Set the data_xfer_len to the size of ELS payload */ mp_req->req_len = data_len; els_req->data_xfer_len = mp_req->req_len; /* Fill ELS Payload */ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { memcpy(mp_req->req_buf, data, data_len); } else { printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op); els_req->cb_func = NULL; els_req->cb_arg = NULL; spin_lock_bh(&tgt->tgt_lock); kref_put(&els_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); rc = -EINVAL; } if (rc) goto els_err; /* Fill FC header */ fc_hdr = &(mp_req->req_fc_hdr); did = tgt->rport->port_id; sid = tgt->sid; if (op == ELS_SRR) __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid, FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); else __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); /* Obtain exchange id */ xid = els_req->xid; task_idx = xid/BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; /* Initialize task context for this IO request */ task_page = (struct fcoe_task_ctx_entry *) interface->hba->task_ctx[task_idx]; task = &(task_page[index]); bnx2fc_init_mp_task(els_req, task); spin_lock_bh(&tgt->tgt_lock); if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { printk(KERN_ERR PFX "initiate_els.. session not ready\n"); els_req->cb_func = NULL; els_req->cb_arg = NULL; kref_put(&els_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); return -EINVAL; } if (timer_msec) bnx2fc_cmd_timer_set(els_req, timer_msec); bnx2fc_add_2_sq(tgt, xid); els_req->on_active_queue = 1; list_add_tail(&els_req->link, &tgt->els_queue); /* Ring doorbell */ bnx2fc_ring_doorbell(tgt); spin_unlock_bh(&tgt->tgt_lock); els_err: return rc; } void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, struct fcoe_task_ctx_entry *task, u8 num_rq) { struct bnx2fc_mp_req *mp_req; struct fc_frame_header *fc_hdr; u64 *hdr; u64 *temp_hdr; BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x" "cmd_type = %d\n", els_req->xid, els_req->cmd_type); if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, &els_req->req_flags)) { BNX2FC_ELS_DBG("Timer context finished processing this " "els - 0x%x\n", els_req->xid); /* This IO doesn't receive cleanup completion */ kref_put(&els_req->refcount, bnx2fc_cmd_release); return; } /* Cancel the timeout_work, as we received the response */ if (cancel_delayed_work(&els_req->timeout_work)) kref_put(&els_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ if (els_req->on_active_queue) { list_del_init(&els_req->link); els_req->on_active_queue = 0; } mp_req = &(els_req->mp_req); fc_hdr = &(mp_req->resp_fc_hdr); hdr = (u64 *)fc_hdr; temp_hdr = (u64 *) &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; hdr[0] = cpu_to_be64(temp_hdr[0]); hdr[1] = cpu_to_be64(temp_hdr[1]); hdr[2] = cpu_to_be64(temp_hdr[2]); mp_req->resp_len = task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; /* Parse ELS response */ if ((els_req->cb_func) && (els_req->cb_arg)) { els_req->cb_func(els_req->cb_arg); els_req->cb_arg = NULL; } kref_put(&els_req->refcount, bnx2fc_cmd_release); } #define BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC 1 #define BNX2FC_FCOE_MAC_METHOD_FCF_MAP 2 #define BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC 3 static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) { struct fcoe_ctlr *fip = arg; struct fc_exch *exch = fc_seq_exch(seq); struct fc_lport *lport = exch->lp; struct fc_frame_header *fh; u8 *granted_mac; u8 fcoe_mac[6]; u8 fc_map[3]; int method; if (IS_ERR(fp)) goto done; fh = fc_frame_header_get(fp); granted_mac = fr_cb(fp)->granted_mac; /* * We set the source MAC for FCoE traffic based on the Granted MAC * address from the switch. * * If granted_mac is non-zero, we use that. * If the granted_mac is zeroed out, create the FCoE MAC based on * the sel_fcf->fc_map and the d_id fo the FLOGI frame. * If sel_fcf->fc_map is 0, then we use the default FCF-MAC plus the * d_id of the FLOGI frame. */ if (!is_zero_ether_addr(granted_mac)) { ether_addr_copy(fcoe_mac, granted_mac); method = BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC; } else if (fip->sel_fcf && fip->sel_fcf->fc_map != 0) { hton24(fc_map, fip->sel_fcf->fc_map); fcoe_mac[0] = fc_map[0]; fcoe_mac[1] = fc_map[1]; fcoe_mac[2] = fc_map[2]; fcoe_mac[3] = fh->fh_d_id[0]; fcoe_mac[4] = fh->fh_d_id[1]; fcoe_mac[5] = fh->fh_d_id[2]; method = BNX2FC_FCOE_MAC_METHOD_FCF_MAP; } else { fc_fcoe_set_mac(fcoe_mac, fh->fh_d_id); method = BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC; } BNX2FC_HBA_DBG(lport, "fcoe_mac=%pM method=%d\n", fcoe_mac, method); fip->update_mac(lport, fcoe_mac); done: fc_lport_flogi_resp(seq, fp, lport); } static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) { struct fcoe_ctlr *fip = arg; struct fc_exch *exch = fc_seq_exch(seq); struct fc_lport *lport = exch->lp; static u8 zero_mac[ETH_ALEN] = { 0 }; if (!IS_ERR(fp)) fip->update_mac(lport, zero_mac); fc_lport_logo_resp(seq, fp, lport); } struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, struct fc_frame *fp, unsigned int op, void (*resp)(struct fc_seq *, struct fc_frame *, void *), void *arg, u32 timeout) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface); struct fc_frame_header *fh = fc_frame_header_get(fp); switch (op) { case ELS_FLOGI: case ELS_FDISC: return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp, fip, timeout); case ELS_LOGO: /* only hook onto fabric logouts, not port logouts */ if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) break; return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp, fip, timeout); } return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); }
linux-master
drivers/scsi/bnx2fc/bnx2fc_els.c
/* bnx2fc_io.c: QLogic Linux FCoE offload driver. * IO manager and SCSI IO processing. * * Copyright (c) 2008-2013 Broadcom Corporation * Copyright (c) 2014-2016 QLogic Corporation * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Bhanu Prakash Gollapudi ([email protected]) */ #include "bnx2fc.h" #define RESERVE_FREE_LIST_INDEX num_possible_cpus() static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, int bd_index); static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, struct fcoe_fcp_rsp_payload *fcp_rsp, u8 num_rq, unsigned char *rq_data); void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, unsigned int timer_msec) { struct bnx2fc_interface *interface = io_req->port->priv; if (queue_delayed_work(interface->timer_work_queue, &io_req->timeout_work, msecs_to_jiffies(timer_msec))) kref_get(&io_req->refcount); } static void bnx2fc_cmd_timeout(struct work_struct *work) { struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, timeout_work.work); u8 cmd_type = io_req->cmd_type; struct bnx2fc_rport *tgt = io_req->tgt; int rc; BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d," "req_flags = %lx\n", cmd_type, io_req->req_flags); spin_lock_bh(&tgt->tgt_lock); if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) { clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); /* * ideally we should hold the io_req until RRQ complets, * and release io_req from timeout hold. */ spin_unlock_bh(&tgt->tgt_lock); bnx2fc_send_rrq(io_req); return; } if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) { BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n"); goto done; } switch (cmd_type) { case BNX2FC_SCSI_CMD: if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags)) { /* Handle eh_abort timeout */ BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); complete(&io_req->abts_done); } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { /* Handle internally generated ABTS timeout */ BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", kref_read(&io_req->refcount)); if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags))) { /* * Cleanup and return original command to * mid-layer. */ bnx2fc_initiate_cleanup(io_req); kref_put(&io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); return; } } else { /* Hanlde IO timeout */ BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n"); if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { BNX2FC_IO_DBG(io_req, "IO completed before " " timer expiry\n"); goto done; } if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { rc = bnx2fc_initiate_abts(io_req); if (rc == SUCCESS) goto done; kref_put(&io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); return; } else { BNX2FC_IO_DBG(io_req, "IO already in " "ABTS processing\n"); } } break; case BNX2FC_ELS: if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n"); if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags)) { kref_put(&io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); return; } } else { /* * Handle ELS timeout. * tgt_lock is used to sync compl path and timeout * path. If els compl path is processing this IO, we * have nothing to do here, just release the timer hold */ BNX2FC_IO_DBG(io_req, "ELS timed out\n"); if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, &io_req->req_flags)) goto done; /* Indicate the cb_func that this ELS is timed out */ set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags); if ((io_req->cb_func) && (io_req->cb_arg)) { io_req->cb_func(io_req->cb_arg); io_req->cb_arg = NULL; } } break; default: printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", cmd_type); break; } done: /* release the cmd that was held when timer was set */ kref_put(&io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); } static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) { /* Called with host lock held */ struct scsi_cmnd *sc_cmd = io_req->sc_cmd; /* * active_cmd_queue may have other command types as well, * and during flush operation, we want to error back only * scsi commands. */ if (io_req->cmd_type != BNX2FC_SCSI_CMD) return; BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) { /* Do not call scsi done for this IO */ return; } bnx2fc_unmap_sg_list(io_req); io_req->sc_cmd = NULL; /* Sanity checks before returning command to mid-layer */ if (!sc_cmd) { printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " "IO(0x%x) already cleaned up\n", io_req->xid); return; } if (!sc_cmd->device) { pr_err(PFX "0x%x: sc_cmd->device is NULL.\n", io_req->xid); return; } if (!sc_cmd->device->host) { pr_err(PFX "0x%x: sc_cmd->device->host is NULL.\n", io_req->xid); return; } sc_cmd->result = err_code << 16; BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries, sc_cmd->allowed); scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); bnx2fc_priv(sc_cmd)->io_req = NULL; scsi_done(sc_cmd); } struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) { struct bnx2fc_cmd_mgr *cmgr; struct io_bdt *bdt_info; struct bnx2fc_cmd *io_req; size_t len; u32 mem_size; u16 xid; int i; int num_ios, num_pri_ios; size_t bd_tbl_sz; int arr_sz = num_possible_cpus() + 1; u16 min_xid = BNX2FC_MIN_XID; u16 max_xid = hba->max_xid; if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ and max_xid 0x%x\n", min_xid, max_xid); return NULL; } BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid); num_ios = max_xid - min_xid + 1; len = (num_ios * (sizeof(struct bnx2fc_cmd *))); len += sizeof(struct bnx2fc_cmd_mgr); cmgr = kzalloc(len, GFP_KERNEL); if (!cmgr) { printk(KERN_ERR PFX "failed to alloc cmgr\n"); return NULL; } cmgr->hba = hba; cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), GFP_KERNEL); if (!cmgr->free_list) { printk(KERN_ERR PFX "failed to alloc free_list\n"); goto mem_err; } cmgr->free_list_lock = kcalloc(arr_sz, sizeof(*cmgr->free_list_lock), GFP_KERNEL); if (!cmgr->free_list_lock) { printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); kfree(cmgr->free_list); cmgr->free_list = NULL; goto mem_err; } cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); for (i = 0; i < arr_sz; i++) { INIT_LIST_HEAD(&cmgr->free_list[i]); spin_lock_init(&cmgr->free_list_lock[i]); } /* * Pre-allocated pool of bnx2fc_cmds. * Last entry in the free list array is the free list * of slow path requests. */ xid = BNX2FC_MIN_XID; num_pri_ios = num_ios - hba->elstm_xids; for (i = 0; i < num_ios; i++) { io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); if (!io_req) { printk(KERN_ERR PFX "failed to alloc io_req\n"); goto mem_err; } INIT_LIST_HEAD(&io_req->link); INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); io_req->xid = xid++; if (i < num_pri_ios) list_add_tail(&io_req->link, &cmgr->free_list[io_req->xid % num_possible_cpus()]); else list_add_tail(&io_req->link, &cmgr->free_list[num_possible_cpus()]); io_req++; } /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ mem_size = num_ios * sizeof(struct io_bdt *); cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL); if (!cmgr->io_bdt_pool) { printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); goto mem_err; } mem_size = sizeof(struct io_bdt); for (i = 0; i < num_ios; i++) { cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL); if (!cmgr->io_bdt_pool[i]) { printk(KERN_ERR PFX "failed to alloc " "io_bdt_pool[%d]\n", i); goto mem_err; } } /* Allocate an map fcoe_bdt_ctx structures */ bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); for (i = 0; i < num_ios; i++) { bdt_info = cmgr->io_bdt_pool[i]; bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, bd_tbl_sz, &bdt_info->bd_tbl_dma, GFP_KERNEL); if (!bdt_info->bd_tbl) { printk(KERN_ERR PFX "failed to alloc " "bdt_tbl[%d]\n", i); goto mem_err; } } return cmgr; mem_err: bnx2fc_cmd_mgr_free(cmgr); return NULL; } void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) { struct io_bdt *bdt_info; struct bnx2fc_hba *hba = cmgr->hba; size_t bd_tbl_sz; u16 min_xid = BNX2FC_MIN_XID; u16 max_xid = hba->max_xid; int num_ios; int i; num_ios = max_xid - min_xid + 1; /* Free fcoe_bdt_ctx structures */ if (!cmgr->io_bdt_pool) goto free_cmd_pool; bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); for (i = 0; i < num_ios; i++) { bdt_info = cmgr->io_bdt_pool[i]; if (bdt_info->bd_tbl) { dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, bdt_info->bd_tbl, bdt_info->bd_tbl_dma); bdt_info->bd_tbl = NULL; } } /* Destroy io_bdt pool */ for (i = 0; i < num_ios; i++) { kfree(cmgr->io_bdt_pool[i]); cmgr->io_bdt_pool[i] = NULL; } kfree(cmgr->io_bdt_pool); cmgr->io_bdt_pool = NULL; free_cmd_pool: kfree(cmgr->free_list_lock); /* Destroy cmd pool */ if (!cmgr->free_list) goto free_cmgr; for (i = 0; i < num_possible_cpus() + 1; i++) { struct bnx2fc_cmd *tmp, *io_req; list_for_each_entry_safe(io_req, tmp, &cmgr->free_list[i], link) { list_del(&io_req->link); kfree(io_req); } } kfree(cmgr->free_list); free_cmgr: /* Free command manager itself */ kfree(cmgr); } struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) { struct fcoe_port *port = tgt->port; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; struct bnx2fc_cmd *io_req; struct list_head *listp; struct io_bdt *bd_tbl; int index = RESERVE_FREE_LIST_INDEX; u32 free_sqes; u32 max_sqes; u16 xid; max_sqes = tgt->max_sqes; switch (type) { case BNX2FC_TASK_MGMT_CMD: max_sqes = BNX2FC_TM_MAX_SQES; break; case BNX2FC_ELS: max_sqes = BNX2FC_ELS_MAX_SQES; break; default: break; } /* * NOTE: Free list insertions and deletions are protected with * cmgr lock */ spin_lock_bh(&cmd_mgr->free_list_lock[index]); free_sqes = atomic_read(&tgt->free_sqes); if ((list_empty(&(cmd_mgr->free_list[index]))) || (tgt->num_active_ios.counter >= max_sqes) || (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " "ios(%d):sqes(%d)\n", tgt->num_active_ios.counter, tgt->max_sqes); if (list_empty(&(cmd_mgr->free_list[index]))) printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); spin_unlock_bh(&cmd_mgr->free_list_lock[index]); return NULL; } listp = (struct list_head *) cmd_mgr->free_list[index].next; list_del_init(listp); io_req = (struct bnx2fc_cmd *) listp; xid = io_req->xid; cmd_mgr->cmds[xid] = io_req; atomic_inc(&tgt->num_active_ios); atomic_dec(&tgt->free_sqes); spin_unlock_bh(&cmd_mgr->free_list_lock[index]); INIT_LIST_HEAD(&io_req->link); io_req->port = port; io_req->cmd_mgr = cmd_mgr; io_req->req_flags = 0; io_req->cmd_type = type; /* Bind io_bdt for this io_req */ /* Have a static link between io_req and io_bdt_pool */ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; bd_tbl->io_req = io_req; /* Hold the io_req against deletion */ kref_init(&io_req->refcount); return io_req; } struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) { struct fcoe_port *port = tgt->port; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; struct bnx2fc_cmd *io_req; struct list_head *listp; struct io_bdt *bd_tbl; u32 free_sqes; u32 max_sqes; u16 xid; int index = raw_smp_processor_id(); max_sqes = BNX2FC_SCSI_MAX_SQES; /* * NOTE: Free list insertions and deletions are protected with * cmgr lock */ spin_lock_bh(&cmd_mgr->free_list_lock[index]); free_sqes = atomic_read(&tgt->free_sqes); if ((list_empty(&cmd_mgr->free_list[index])) || (tgt->num_active_ios.counter >= max_sqes) || (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { spin_unlock_bh(&cmd_mgr->free_list_lock[index]); return NULL; } listp = (struct list_head *) cmd_mgr->free_list[index].next; list_del_init(listp); io_req = (struct bnx2fc_cmd *) listp; xid = io_req->xid; cmd_mgr->cmds[xid] = io_req; atomic_inc(&tgt->num_active_ios); atomic_dec(&tgt->free_sqes); spin_unlock_bh(&cmd_mgr->free_list_lock[index]); INIT_LIST_HEAD(&io_req->link); io_req->port = port; io_req->cmd_mgr = cmd_mgr; io_req->req_flags = 0; /* Bind io_bdt for this io_req */ /* Have a static link between io_req and io_bdt_pool */ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; bd_tbl->io_req = io_req; /* Hold the io_req against deletion */ kref_init(&io_req->refcount); return io_req; } void bnx2fc_cmd_release(struct kref *ref) { struct bnx2fc_cmd *io_req = container_of(ref, struct bnx2fc_cmd, refcount); struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; int index; if (io_req->cmd_type == BNX2FC_SCSI_CMD) index = io_req->xid % num_possible_cpus(); else index = RESERVE_FREE_LIST_INDEX; spin_lock_bh(&cmd_mgr->free_list_lock[index]); if (io_req->cmd_type != BNX2FC_SCSI_CMD) bnx2fc_free_mp_resc(io_req); cmd_mgr->cmds[io_req->xid] = NULL; /* Delete IO from retire queue */ list_del_init(&io_req->link); /* Add it to the free list */ list_add(&io_req->link, &cmd_mgr->free_list[index]); atomic_dec(&io_req->tgt->num_active_ios); spin_unlock_bh(&cmd_mgr->free_list_lock[index]); } static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) { struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); struct bnx2fc_interface *interface = io_req->port->priv; struct bnx2fc_hba *hba = interface->hba; size_t sz = sizeof(struct fcoe_bd_ctx); /* clear tm flags */ mp_req->tm_flags = 0; if (mp_req->mp_req_bd) { dma_free_coherent(&hba->pcidev->dev, sz, mp_req->mp_req_bd, mp_req->mp_req_bd_dma); mp_req->mp_req_bd = NULL; } if (mp_req->mp_resp_bd) { dma_free_coherent(&hba->pcidev->dev, sz, mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma); mp_req->mp_resp_bd = NULL; } if (mp_req->req_buf) { dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, mp_req->req_buf, mp_req->req_buf_dma); mp_req->req_buf = NULL; } if (mp_req->resp_buf) { dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, mp_req->resp_buf, mp_req->resp_buf_dma); mp_req->resp_buf = NULL; } } int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) { struct bnx2fc_mp_req *mp_req; struct fcoe_bd_ctx *mp_req_bd; struct fcoe_bd_ctx *mp_resp_bd; struct bnx2fc_interface *interface = io_req->port->priv; struct bnx2fc_hba *hba = interface->hba; dma_addr_t addr; size_t sz; mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); memset(mp_req, 0, sizeof(struct bnx2fc_mp_req)); if (io_req->cmd_type != BNX2FC_ELS) { mp_req->req_len = sizeof(struct fcp_cmnd); io_req->data_xfer_len = mp_req->req_len; } else mp_req->req_len = io_req->data_xfer_len; mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &mp_req->req_buf_dma, GFP_ATOMIC); if (!mp_req->req_buf) { printk(KERN_ERR PFX "unable to alloc MP req buffer\n"); bnx2fc_free_mp_resc(io_req); return FAILED; } mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_ATOMIC); if (!mp_req->resp_buf) { printk(KERN_ERR PFX "unable to alloc TM resp buffer\n"); bnx2fc_free_mp_resc(io_req); return FAILED; } memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE); /* Allocate and map mp_req_bd and mp_resp_bd */ sz = sizeof(struct fcoe_bd_ctx); mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, &mp_req->mp_req_bd_dma, GFP_ATOMIC); if (!mp_req->mp_req_bd) { printk(KERN_ERR PFX "unable to alloc MP req bd\n"); bnx2fc_free_mp_resc(io_req); return FAILED; } mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, &mp_req->mp_resp_bd_dma, GFP_ATOMIC); if (!mp_req->mp_resp_bd) { printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); bnx2fc_free_mp_resc(io_req); return FAILED; } /* Fill bd table */ addr = mp_req->req_buf_dma; mp_req_bd = mp_req->mp_req_bd; mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); mp_req_bd->buf_len = CNIC_PAGE_SIZE; mp_req_bd->flags = 0; /* * MP buffer is either a task mgmt command or an ELS. * So the assumption is that it consumes a single bd * entry in the bd table */ mp_resp_bd = mp_req->mp_resp_bd; addr = mp_req->resp_buf_dma; mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); mp_resp_bd->buf_len = CNIC_PAGE_SIZE; mp_resp_bd->flags = 0; return SUCCESS; } static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) { struct fc_lport *lport; struct fc_rport *rport; struct fc_rport_libfc_priv *rp; struct fcoe_port *port; struct bnx2fc_interface *interface; struct bnx2fc_rport *tgt; struct bnx2fc_cmd *io_req; struct bnx2fc_mp_req *tm_req; struct fcoe_task_ctx_entry *task; struct fcoe_task_ctx_entry *task_page; struct Scsi_Host *host = sc_cmd->device->host; struct fc_frame_header *fc_hdr; struct fcp_cmnd *fcp_cmnd; int task_idx, index; int rc = SUCCESS; u16 xid; u32 sid, did; unsigned long start = jiffies; lport = shost_priv(host); rport = starget_to_rport(scsi_target(sc_cmd->device)); port = lport_priv(lport); interface = port->priv; if (rport == NULL) { printk(KERN_ERR PFX "device_reset: rport is NULL\n"); rc = FAILED; goto tmf_err; } rp = rport->dd_data; rc = fc_block_scsi_eh(sc_cmd); if (rc) return rc; if (lport->state != LPORT_ST_READY || !(lport->link_up)) { printk(KERN_ERR PFX "device_reset: link is not ready\n"); rc = FAILED; goto tmf_err; } /* rport and tgt are allocated together, so tgt should be non-NULL */ tgt = (struct bnx2fc_rport *)&rp[1]; if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { printk(KERN_ERR PFX "device_reset: tgt not offloaded\n"); rc = FAILED; goto tmf_err; } retry_tmf: io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD); if (!io_req) { if (time_after(jiffies, start + HZ)) { printk(KERN_ERR PFX "tmf: Failed TMF"); rc = FAILED; goto tmf_err; } msleep(20); goto retry_tmf; } /* Initialize rest of io_req fields */ io_req->sc_cmd = sc_cmd; io_req->port = port; io_req->tgt = tgt; tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); rc = bnx2fc_init_mp_req(io_req); if (rc == FAILED) { printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); spin_lock_bh(&tgt->tgt_lock); kref_put(&io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); goto tmf_err; } /* Set TM flags */ io_req->io_req_flags = 0; tm_req->tm_flags = tm_flags; /* Fill FCP_CMND */ bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len); fcp_cmnd->fc_dl = 0; /* Fill FC header */ fc_hdr = &(tm_req->req_fc_hdr); sid = tgt->sid; did = rport->port_id; __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid, FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); /* Obtain exchange id */ xid = io_req->xid; BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid); task_idx = xid/BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; /* Initialize task context for this IO request */ task_page = (struct fcoe_task_ctx_entry *) interface->hba->task_ctx[task_idx]; task = &(task_page[index]); bnx2fc_init_mp_task(io_req, task); bnx2fc_priv(sc_cmd)->io_req = io_req; /* Obtain free SQ entry */ spin_lock_bh(&tgt->tgt_lock); bnx2fc_add_2_sq(tgt, xid); /* Enqueue the io_req to active_tm_queue */ io_req->on_tmf_queue = 1; list_add_tail(&io_req->link, &tgt->active_tm_queue); init_completion(&io_req->abts_done); io_req->wait_for_abts_comp = 1; /* Ring doorbell */ bnx2fc_ring_doorbell(tgt); spin_unlock_bh(&tgt->tgt_lock); rc = wait_for_completion_timeout(&io_req->abts_done, interface->tm_timeout * HZ); spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_abts_comp = 0; if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) { set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); if (io_req->on_tmf_queue) { list_del_init(&io_req->link); io_req->on_tmf_queue = 0; } io_req->wait_for_cleanup_comp = 1; init_completion(&io_req->cleanup_done); bnx2fc_initiate_cleanup(io_req); spin_unlock_bh(&tgt->tgt_lock); rc = wait_for_completion_timeout(&io_req->cleanup_done, BNX2FC_FW_TIMEOUT); spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_cleanup_comp = 0; if (!rc) kref_put(&io_req->refcount, bnx2fc_cmd_release); } spin_unlock_bh(&tgt->tgt_lock); if (!rc) { BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n"); rc = FAILED; } else { BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n"); rc = SUCCESS; } tmf_err: return rc; } int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) { struct fc_lport *lport; struct bnx2fc_rport *tgt = io_req->tgt; struct fc_rport *rport = tgt->rport; struct fc_rport_priv *rdata = tgt->rdata; struct bnx2fc_interface *interface; struct fcoe_port *port; struct bnx2fc_cmd *abts_io_req; struct fcoe_task_ctx_entry *task; struct fcoe_task_ctx_entry *task_page; struct fc_frame_header *fc_hdr; struct bnx2fc_mp_req *abts_req; int task_idx, index; u32 sid, did; u16 xid; int rc = SUCCESS; u32 r_a_tov = rdata->r_a_tov; /* called with tgt_lock held */ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); port = io_req->port; interface = port->priv; lport = port->lport; if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n"); rc = FAILED; goto abts_err; } if (rport == NULL) { printk(KERN_ERR PFX "initiate_abts: rport is NULL\n"); rc = FAILED; goto abts_err; } if (lport->state != LPORT_ST_READY || !(lport->link_up)) { printk(KERN_ERR PFX "initiate_abts: link is not ready\n"); rc = FAILED; goto abts_err; } abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); if (!abts_io_req) { printk(KERN_ERR PFX "abts: couldn't allocate cmd\n"); rc = FAILED; goto abts_err; } /* Initialize rest of io_req fields */ abts_io_req->sc_cmd = NULL; abts_io_req->port = port; abts_io_req->tgt = tgt; abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */ abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req); memset(abts_req, 0, sizeof(struct bnx2fc_mp_req)); /* Fill FC header */ fc_hdr = &(abts_req->req_fc_hdr); /* Obtain oxid and rxid for the original exchange to be aborted */ fc_hdr->fh_ox_id = htons(io_req->xid); fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id); sid = tgt->sid; did = rport->port_id; __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid, FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); xid = abts_io_req->xid; BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n"); task_idx = xid/BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; /* Initialize task context for this IO request */ task_page = (struct fcoe_task_ctx_entry *) interface->hba->task_ctx[task_idx]; task = &(task_page[index]); bnx2fc_init_mp_task(abts_io_req, task); /* * ABTS task is a temporary task that will be cleaned up * irrespective of ABTS response. We need to start the timer * for the original exchange, as the CQE is posted for the original * IO request. * * Timer for ABTS is started only when it is originated by a * TM request. For the ABTS issued as part of ULP timeout, * scsi-ml maintains the timers. */ /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/ bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov); /* Obtain free SQ entry */ bnx2fc_add_2_sq(tgt, xid); /* Ring doorbell */ bnx2fc_ring_doorbell(tgt); abts_err: return rc; } int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, enum fc_rctl r_ctl) { struct bnx2fc_rport *tgt = orig_io_req->tgt; struct bnx2fc_interface *interface; struct fcoe_port *port; struct bnx2fc_cmd *seq_clnp_req; struct fcoe_task_ctx_entry *task; struct fcoe_task_ctx_entry *task_page; struct bnx2fc_els_cb_arg *cb_arg = NULL; int task_idx, index; u16 xid; int rc = 0; BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n", orig_io_req->xid); kref_get(&orig_io_req->refcount); port = orig_io_req->port; interface = port->priv; cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); if (!cb_arg) { printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n"); rc = -ENOMEM; goto cleanup_err; } seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP); if (!seq_clnp_req) { printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n"); rc = -ENOMEM; kfree(cb_arg); goto cleanup_err; } /* Initialize rest of io_req fields */ seq_clnp_req->sc_cmd = NULL; seq_clnp_req->port = port; seq_clnp_req->tgt = tgt; seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */ xid = seq_clnp_req->xid; task_idx = xid/BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; /* Initialize task context for this IO request */ task_page = (struct fcoe_task_ctx_entry *) interface->hba->task_ctx[task_idx]; task = &(task_page[index]); cb_arg->aborted_io_req = orig_io_req; cb_arg->io_req = seq_clnp_req; cb_arg->r_ctl = r_ctl; cb_arg->offset = offset; seq_clnp_req->cb_arg = cb_arg; printk(KERN_ERR PFX "call init_seq_cleanup_task\n"); bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset); /* Obtain free SQ entry */ bnx2fc_add_2_sq(tgt, xid); /* Ring doorbell */ bnx2fc_ring_doorbell(tgt); cleanup_err: return rc; } int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) { struct bnx2fc_rport *tgt = io_req->tgt; struct bnx2fc_interface *interface; struct fcoe_port *port; struct bnx2fc_cmd *cleanup_io_req; struct fcoe_task_ctx_entry *task; struct fcoe_task_ctx_entry *task_page; int task_idx, index; u16 xid, orig_xid; int rc = 0; /* ASSUMPTION: called with tgt_lock held */ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); port = io_req->port; interface = port->priv; cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); if (!cleanup_io_req) { printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n"); rc = -1; goto cleanup_err; } /* Initialize rest of io_req fields */ cleanup_io_req->sc_cmd = NULL; cleanup_io_req->port = port; cleanup_io_req->tgt = tgt; cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */ xid = cleanup_io_req->xid; task_idx = xid/BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; /* Initialize task context for this IO request */ task_page = (struct fcoe_task_ctx_entry *) interface->hba->task_ctx[task_idx]; task = &(task_page[index]); orig_xid = io_req->xid; BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid); bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); /* Obtain free SQ entry */ bnx2fc_add_2_sq(tgt, xid); /* Set flag that cleanup request is pending with the firmware */ set_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags); /* Ring doorbell */ bnx2fc_ring_doorbell(tgt); cleanup_err: return rc; } /** * bnx2fc_eh_target_reset: Reset a target * * @sc_cmd: SCSI command * * Set from SCSI host template to send task mgmt command to the target * and wait for the response */ int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd) { return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); } /** * bnx2fc_eh_device_reset - Reset a single LUN * * @sc_cmd: SCSI command * * Set from SCSI host template to send task mgmt command to the target * and wait for the response */ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) { return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); } static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req) __must_hold(&tgt->tgt_lock) { struct bnx2fc_rport *tgt = io_req->tgt; unsigned int time_left; init_completion(&io_req->cleanup_done); io_req->wait_for_cleanup_comp = 1; bnx2fc_initiate_cleanup(io_req); spin_unlock_bh(&tgt->tgt_lock); /* * Can't wait forever on cleanup response lest we let the SCSI error * handler wait forever */ time_left = wait_for_completion_timeout(&io_req->cleanup_done, BNX2FC_FW_TIMEOUT); if (!time_left) { BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n", __func__); /* * Put the extra reference to the SCSI command since it would * not have been returned in this case. */ kref_put(&io_req->refcount, bnx2fc_cmd_release); } spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_cleanup_comp = 0; return SUCCESS; } /** * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding * SCSI command * * @sc_cmd: SCSI_ML command pointer * * SCSI abort request handler */ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) { struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); struct fc_rport_libfc_priv *rp = rport->dd_data; struct bnx2fc_cmd *io_req; struct fc_lport *lport; struct bnx2fc_rport *tgt; int rc; unsigned int time_left; rc = fc_block_scsi_eh(sc_cmd); if (rc) return rc; lport = shost_priv(sc_cmd->device->host); if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { printk(KERN_ERR PFX "eh_abort: link not ready\n"); return FAILED; } tgt = (struct bnx2fc_rport *)&rp[1]; BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n"); spin_lock_bh(&tgt->tgt_lock); io_req = bnx2fc_priv(sc_cmd)->io_req; if (!io_req) { /* Command might have just completed */ printk(KERN_ERR PFX "eh_abort: io_req is NULL\n"); spin_unlock_bh(&tgt->tgt_lock); return SUCCESS; } BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", kref_read(&io_req->refcount)); /* Hold IO request across abort processing */ kref_get(&io_req->refcount); BUG_ON(tgt != io_req->tgt); /* Remove the io_req from the active_q. */ /* * Task Mgmt functions (LUN RESET & TGT RESET) will not * issue an ABTS on this particular IO req, as the * io_req is no longer in the active_q. */ if (tgt->flush_in_prog) { printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " "flush in progress\n", io_req->xid); kref_put(&io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); return SUCCESS; } if (io_req->on_active_queue == 0) { printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " "not on active_q\n", io_req->xid); /* * The IO is still with the FW. * Return failure and let SCSI-ml retry eh_abort. */ spin_unlock_bh(&tgt->tgt_lock); return FAILED; } /* * Only eh_abort processing will remove the IO from * active_cmd_q before processing the request. this is * done to avoid race conditions between IOs aborted * as part of task management completion and eh_abort * processing */ list_del_init(&io_req->link); io_req->on_active_queue = 0; /* Move IO req to retire queue */ list_add_tail(&io_req->link, &tgt->io_retire_queue); init_completion(&io_req->abts_done); init_completion(&io_req->cleanup_done); if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " "already in abts processing\n", io_req->xid); if (cancel_delayed_work(&io_req->timeout_work)) kref_put(&io_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ /* * We don't want to hold off the upper layer timer so simply * cleanup the command and return that I/O was successfully * aborted. */ bnx2fc_abts_cleanup(io_req); /* This only occurs when an task abort was requested while ABTS is in progress. Setting the IO_CLEANUP flag will skip the RRQ process in the case when the fw generated SCSI_CMD cmpl was a result from the ABTS request rather than the CLEANUP request */ set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); rc = FAILED; goto done; } /* Cancel the current timer running on this io_req */ if (cancel_delayed_work(&io_req->timeout_work)) kref_put(&io_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); io_req->wait_for_abts_comp = 1; rc = bnx2fc_initiate_abts(io_req); if (rc == FAILED) { io_req->wait_for_cleanup_comp = 1; bnx2fc_initiate_cleanup(io_req); spin_unlock_bh(&tgt->tgt_lock); wait_for_completion(&io_req->cleanup_done); spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_cleanup_comp = 0; goto done; } spin_unlock_bh(&tgt->tgt_lock); /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */ time_left = wait_for_completion_timeout(&io_req->abts_done, msecs_to_jiffies(2 * rp->r_a_tov + 1)); if (time_left) BNX2FC_IO_DBG(io_req, "Timed out in eh_abort waiting for abts_done"); spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_abts_comp = 0; if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { BNX2FC_IO_DBG(io_req, "IO completed in a different context\n"); rc = SUCCESS; } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags))) { /* Let the scsi-ml try to recover this command */ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", io_req->xid); /* * Cleanup firmware residuals before returning control back * to SCSI ML. */ rc = bnx2fc_abts_cleanup(io_req); goto done; } else { /* * We come here even when there was a race condition * between timeout and abts completion, and abts * completion happens just in time. */ BNX2FC_IO_DBG(io_req, "abort succeeded\n"); rc = SUCCESS; bnx2fc_scsi_done(io_req, DID_ABORT); kref_put(&io_req->refcount, bnx2fc_cmd_release); } done: /* release the reference taken in eh_abort */ kref_put(&io_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); return rc; } void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req, struct fcoe_task_ctx_entry *task, u8 rx_state) { struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg; struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req; u32 offset = cb_arg->offset; enum fc_rctl r_ctl = cb_arg->r_ctl; int rc = 0; struct bnx2fc_rport *tgt = orig_io_req->tgt; BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x" "cmd_type = %d\n", seq_clnp_req->xid, seq_clnp_req->cmd_type); if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) { printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n", seq_clnp_req->xid); goto free_cb_arg; } spin_unlock_bh(&tgt->tgt_lock); rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); spin_lock_bh(&tgt->tgt_lock); if (rc) printk(KERN_ERR PFX "clnup_compl: Unable to send SRR" " IO will abort\n"); seq_clnp_req->cb_arg = NULL; kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); free_cb_arg: kfree(cb_arg); return; } void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u8 num_rq) { BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " "refcnt = %d, cmd_type = %d\n", kref_read(&io_req->refcount), io_req->cmd_type); /* * Test whether there is a cleanup request pending. If not just * exit. */ if (!test_and_clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags)) return; /* * If we receive a cleanup completion for this request then the * firmware will not give us an abort completion for this request * so clear any ABTS pending flags. */ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags) && !test_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags)) { set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags); if (io_req->wait_for_abts_comp) complete(&io_req->abts_done); } bnx2fc_scsi_done(io_req, DID_ERROR); kref_put(&io_req->refcount, bnx2fc_cmd_release); if (io_req->wait_for_cleanup_comp) complete(&io_req->cleanup_done); } void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u8 num_rq) { u32 r_ctl; u32 r_a_tov = FC_DEF_R_A_TOV; u8 issue_rrq = 0; struct bnx2fc_rport *tgt = io_req->tgt; BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" "refcnt = %d, cmd_type = %d\n", io_req->xid, kref_read(&io_req->refcount), io_req->cmd_type); if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags)) { BNX2FC_IO_DBG(io_req, "Timer context finished processing" " this io\n"); return; } /* * If we receive an ABTS completion here then we will not receive * a cleanup completion so clear any cleanup pending flags. */ if (test_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags)) { clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags); if (io_req->wait_for_cleanup_comp) complete(&io_req->cleanup_done); } /* Do not issue RRQ as this IO is already cleanedup */ if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags)) goto io_compl; /* * For ABTS issued due to SCSI eh_abort_handler, timeout * values are maintained by scsi-ml itself. Cancel timeout * in case ABTS issued as part of task management function * or due to FW error. */ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) if (cancel_delayed_work(&io_req->timeout_work)) kref_put(&io_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl; switch (r_ctl) { case FC_RCTL_BA_ACC: /* * Dont release this cmd yet. It will be relesed * after we get RRQ response */ BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n"); issue_rrq = 1; break; case FC_RCTL_BA_RJT: BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n"); break; default: printk(KERN_ERR PFX "Unknown ABTS response\n"); break; } if (issue_rrq) { BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n"); set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); } set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); bnx2fc_cmd_timer_set(io_req, r_a_tov); io_compl: if (io_req->wait_for_abts_comp) { if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags)) complete(&io_req->abts_done); } else { /* * We end up here when ABTS is issued as * in asynchronous context, i.e., as part * of task management completion, or * when FW error is received or when the * ABTS is issued when the IO is timed * out. */ if (io_req->on_active_queue) { list_del_init(&io_req->link); io_req->on_active_queue = 0; /* Move IO req to retire queue */ list_add_tail(&io_req->link, &tgt->io_retire_queue); } bnx2fc_scsi_done(io_req, DID_ERROR); kref_put(&io_req->refcount, bnx2fc_cmd_release); } } static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) { struct scsi_cmnd *sc_cmd = io_req->sc_cmd; struct bnx2fc_rport *tgt = io_req->tgt; struct bnx2fc_cmd *cmd, *tmp; u64 tm_lun = sc_cmd->device->lun; u64 lun; int rc = 0; /* called with tgt_lock held */ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); /* * Walk thru the active_ios queue and ABORT the IO * that matches with the LUN that was reset */ list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); lun = cmd->sc_cmd->device->lun; if (lun == tm_lun) { /* Initiate ABTS on this cmd */ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &cmd->req_flags)) { /* cancel the IO timeout */ if (cancel_delayed_work(&io_req->timeout_work)) kref_put(&io_req->refcount, bnx2fc_cmd_release); /* timer hold */ rc = bnx2fc_initiate_abts(cmd); /* abts shouldn't fail in this context */ WARN_ON(rc != SUCCESS); } else printk(KERN_ERR PFX "lun_rst: abts already in" " progress for this IO 0x%x\n", cmd->xid); } } } static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) { struct bnx2fc_rport *tgt = io_req->tgt; struct bnx2fc_cmd *cmd, *tmp; int rc = 0; /* called with tgt_lock held */ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n"); /* * Walk thru the active_ios queue and ABORT the IO * that matches with the LUN that was reset */ list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); /* Initiate ABTS */ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &cmd->req_flags)) { /* cancel the IO timeout */ if (cancel_delayed_work(&io_req->timeout_work)) kref_put(&io_req->refcount, bnx2fc_cmd_release); /* timer hold */ rc = bnx2fc_initiate_abts(cmd); /* abts shouldn't fail in this context */ WARN_ON(rc != SUCCESS); } else printk(KERN_ERR PFX "tgt_rst: abts already in progress" " for this IO 0x%x\n", cmd->xid); } } void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u8 num_rq, unsigned char *rq_data) { struct bnx2fc_mp_req *tm_req; struct fc_frame_header *fc_hdr; struct scsi_cmnd *sc_cmd = io_req->sc_cmd; u64 *hdr; u64 *temp_hdr; void *rsp_buf; /* Called with tgt_lock held */ BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n"); if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags))) set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags); else { /* TM has already timed out and we got * delayed completion. Ignore completion * processing. */ return; } tm_req = &(io_req->mp_req); fc_hdr = &(tm_req->resp_fc_hdr); hdr = (u64 *)fc_hdr; temp_hdr = (u64 *) &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; hdr[0] = cpu_to_be64(temp_hdr[0]); hdr[1] = cpu_to_be64(temp_hdr[1]); hdr[2] = cpu_to_be64(temp_hdr[2]); tm_req->resp_len = task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; rsp_buf = tm_req->resp_buf; if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { bnx2fc_parse_fcp_rsp(io_req, (struct fcoe_fcp_rsp_payload *) rsp_buf, num_rq, rq_data); if (io_req->fcp_rsp_code == 0) { /* TM successful */ if (tm_req->tm_flags & FCP_TMF_LUN_RESET) bnx2fc_lun_reset_cmpl(io_req); else if (tm_req->tm_flags & FCP_TMF_TGT_RESET) bnx2fc_tgt_reset_cmpl(io_req); } } else { printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n", fc_hdr->fh_r_ctl); } if (!bnx2fc_priv(sc_cmd)->io_req) { printk(KERN_ERR PFX "tm_compl: io_req is NULL\n"); return; } switch (io_req->fcp_status) { case FC_GOOD: if (io_req->cdb_status == 0) { /* Good IO completion */ sc_cmd->result = DID_OK << 16; } else { /* Transport status is good, SCSI status not good */ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; } if (io_req->fcp_resid) scsi_set_resid(sc_cmd, io_req->fcp_resid); break; default: BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n", io_req->fcp_status); break; } sc_cmd = io_req->sc_cmd; io_req->sc_cmd = NULL; /* check if the io_req exists in tgt's tmf_q */ if (io_req->on_tmf_queue) { list_del_init(&io_req->link); io_req->on_tmf_queue = 0; } else { printk(KERN_ERR PFX "Command not on active_cmd_queue!\n"); return; } bnx2fc_priv(sc_cmd)->io_req = NULL; scsi_done(sc_cmd); kref_put(&io_req->refcount, bnx2fc_cmd_release); if (io_req->wait_for_abts_comp) { BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); complete(&io_req->abts_done); } } static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, int bd_index) { struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; int frag_size, sg_frags; sg_frags = 0; while (sg_len) { if (sg_len >= BNX2FC_BD_SPLIT_SZ) frag_size = BNX2FC_BD_SPLIT_SZ; else frag_size = sg_len; bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff; bd[bd_index + sg_frags].buf_addr_hi = addr >> 32; bd[bd_index + sg_frags].buf_len = (u16)frag_size; bd[bd_index + sg_frags].flags = 0; addr += (u64) frag_size; sg_frags++; sg_len -= frag_size; } return sg_frags; } static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) { struct bnx2fc_interface *interface = io_req->port->priv; struct bnx2fc_hba *hba = interface->hba; struct scsi_cmnd *sc = io_req->sc_cmd; struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; struct scatterlist *sg; int byte_count = 0; int sg_count = 0; int bd_count = 0; int sg_frags; unsigned int sg_len; u64 addr; int i; WARN_ON(scsi_sg_count(sc) > BNX2FC_MAX_BDS_PER_CMD); /* * Use dma_map_sg directly to ensure we're using the correct * dev struct off of pcidev. */ sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc), scsi_sg_count(sc), sc->sc_data_direction); scsi_for_each_sg(sc, sg, sg_count, i) { sg_len = sg_dma_len(sg); addr = sg_dma_address(sg); if (sg_len > BNX2FC_MAX_BD_LEN) { sg_frags = bnx2fc_split_bd(io_req, addr, sg_len, bd_count); } else { sg_frags = 1; bd[bd_count].buf_addr_lo = addr & 0xffffffff; bd[bd_count].buf_addr_hi = addr >> 32; bd[bd_count].buf_len = (u16)sg_len; bd[bd_count].flags = 0; } bd_count += sg_frags; byte_count += sg_len; } if (byte_count != scsi_bufflen(sc)) printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, " "task_id = 0x%x\n", byte_count, scsi_bufflen(sc), io_req->xid); return bd_count; } static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) { struct scsi_cmnd *sc = io_req->sc_cmd; struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; int bd_count; if (scsi_sg_count(sc)) { bd_count = bnx2fc_map_sg(io_req); if (bd_count == 0) return -ENOMEM; } else { bd_count = 0; bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; bd[0].buf_len = bd[0].flags = 0; } io_req->bd_tbl->bd_valid = bd_count; /* * Return the command to ML if BD count exceeds the max number * that can be handled by FW. */ if (bd_count > BNX2FC_FW_MAX_BDS_PER_CMD) { pr_err("bd_count = %d exceeded FW supported max BD(255), task_id = 0x%x\n", bd_count, io_req->xid); return -ENOMEM; } return 0; } static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) { struct scsi_cmnd *sc = io_req->sc_cmd; struct bnx2fc_interface *interface = io_req->port->priv; struct bnx2fc_hba *hba = interface->hba; /* * Use dma_unmap_sg directly to ensure we're using the correct * dev struct off of pcidev. */ if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc), scsi_sg_count(sc), sc->sc_data_direction); io_req->bd_tbl->bd_valid = 0; } } void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, struct fcp_cmnd *fcp_cmnd) { struct scsi_cmnd *sc_cmd = io_req->sc_cmd; memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun); fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); fcp_cmnd->fc_cmdref = 0; fcp_cmnd->fc_pri_ta = 0; fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; fcp_cmnd->fc_flags = io_req->io_req_flags; fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; } static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, struct fcoe_fcp_rsp_payload *fcp_rsp, u8 num_rq, unsigned char *rq_data) { struct scsi_cmnd *sc_cmd = io_req->sc_cmd; u8 rsp_flags = fcp_rsp->fcp_flags.flags; u32 rq_buff_len = 0; int fcp_sns_len = 0; int fcp_rsp_len = 0; io_req->fcp_status = FC_GOOD; io_req->fcp_resid = 0; if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER | FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER)) io_req->fcp_resid = fcp_rsp->fcp_resid; io_req->scsi_comp_flags = rsp_flags; io_req->cdb_status = fcp_rsp->scsi_status_code; /* Fetch fcp_rsp_info and fcp_sns_info if available */ if (num_rq) { /* * We do not anticipate num_rq >1, as the linux defined * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO * 256 bytes of single rq buffer is good enough to hold this. */ if (rsp_flags & FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) { fcp_rsp_len = rq_buff_len = fcp_rsp->fcp_rsp_len; } if (rsp_flags & FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) { fcp_sns_len = fcp_rsp->fcp_sns_len; rq_buff_len += fcp_rsp->fcp_sns_len; } io_req->fcp_rsp_len = fcp_rsp_len; io_req->fcp_sns_len = fcp_sns_len; if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { /* Invalid sense sense length. */ printk(KERN_ERR PFX "invalid sns length %d\n", rq_buff_len); /* reset rq_buff_len */ rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; } /* fetch fcp_rsp_code */ if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { /* Only for task management function */ io_req->fcp_rsp_code = rq_data[3]; BNX2FC_IO_DBG(io_req, "fcp_rsp_code = %d\n", io_req->fcp_rsp_code); } /* fetch sense data */ rq_data += fcp_rsp_len; if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { printk(KERN_ERR PFX "Truncating sense buffer\n"); fcp_sns_len = SCSI_SENSE_BUFFERSIZE; } memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (fcp_sns_len) memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); } } /** * bnx2fc_queuecommand - Queuecommand function of the scsi template * * @host: The Scsi_Host the command was issued to * @sc_cmd: struct scsi_cmnd to be executed * * This is the IO strategy routine, called by SCSI-ML **/ int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd) { struct fc_lport *lport = shost_priv(host); struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); struct fc_rport_libfc_priv *rp = rport->dd_data; struct bnx2fc_rport *tgt; struct bnx2fc_cmd *io_req; int rc = 0; int rval; rval = fc_remote_port_chkready(rport); if (rval) { sc_cmd->result = rval; scsi_done(sc_cmd); return 0; } if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { rc = SCSI_MLQUEUE_HOST_BUSY; goto exit_qcmd; } /* rport and tgt are allocated together, so tgt should be non-NULL */ tgt = (struct bnx2fc_rport *)&rp[1]; if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { /* * Session is not offloaded yet. Let SCSI-ml retry * the command. */ rc = SCSI_MLQUEUE_TARGET_BUSY; goto exit_qcmd; } if (tgt->retry_delay_timestamp) { if (time_after(jiffies, tgt->retry_delay_timestamp)) { tgt->retry_delay_timestamp = 0; } else { /* If retry_delay timer is active, flow off the ML */ rc = SCSI_MLQUEUE_TARGET_BUSY; goto exit_qcmd; } } spin_lock_bh(&tgt->tgt_lock); io_req = bnx2fc_cmd_alloc(tgt); if (!io_req) { rc = SCSI_MLQUEUE_HOST_BUSY; goto exit_qcmd_tgtlock; } io_req->sc_cmd = sc_cmd; if (bnx2fc_post_io_req(tgt, io_req)) { printk(KERN_ERR PFX "Unable to post io_req\n"); rc = SCSI_MLQUEUE_HOST_BUSY; goto exit_qcmd_tgtlock; } exit_qcmd_tgtlock: spin_unlock_bh(&tgt->tgt_lock); exit_qcmd: return rc; } void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u8 num_rq, unsigned char *rq_data) { struct fcoe_fcp_rsp_payload *fcp_rsp; struct bnx2fc_rport *tgt = io_req->tgt; struct scsi_cmnd *sc_cmd; u16 scope = 0, qualifier = 0; /* scsi_cmd_cmpl is called with tgt lock held */ if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { /* we will not receive ABTS response for this IO */ BNX2FC_IO_DBG(io_req, "Timer context finished processing " "this scsi cmd\n"); if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags)) { BNX2FC_IO_DBG(io_req, "Actual completion after cleanup request cleaning up\n"); bnx2fc_process_cleanup_compl(io_req, task, num_rq); } return; } /* Cancel the timeout_work, as we received IO completion */ if (cancel_delayed_work(&io_req->timeout_work)) kref_put(&io_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ sc_cmd = io_req->sc_cmd; if (sc_cmd == NULL) { printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n"); return; } /* Fetch fcp_rsp from task context and perform cmd completion */ fcp_rsp = (struct fcoe_fcp_rsp_payload *) &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); /* parse fcp_rsp and obtain sense data from RQ if available */ bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data); if (!bnx2fc_priv(sc_cmd)->io_req) { printk(KERN_ERR PFX "io_req is NULL\n"); return; } if (io_req->on_active_queue) { list_del_init(&io_req->link); io_req->on_active_queue = 0; /* Move IO req to retire queue */ list_add_tail(&io_req->link, &tgt->io_retire_queue); } else { /* This should not happen, but could have been pulled * by bnx2fc_flush_active_ios(), or during a race * between command abort and (late) completion. */ BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); if (io_req->wait_for_abts_comp) if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags)) complete(&io_req->abts_done); } bnx2fc_unmap_sg_list(io_req); io_req->sc_cmd = NULL; switch (io_req->fcp_status) { case FC_GOOD: if (io_req->cdb_status == 0) { /* Good IO completion */ sc_cmd->result = DID_OK << 16; } else { /* Transport status is good, SCSI status not good */ BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d" " fcp_resid = 0x%x\n", io_req->cdb_status, io_req->fcp_resid); sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || io_req->cdb_status == SAM_STAT_BUSY) { /* Newer array firmware with BUSY or * TASK_SET_FULL may return a status that needs * the scope bits masked. * Or a huge delay timestamp up to 27 minutes * can result. */ if (fcp_rsp->retry_delay_timer) { /* Upper 2 bits */ scope = fcp_rsp->retry_delay_timer & 0xC000; /* Lower 14 bits */ qualifier = fcp_rsp->retry_delay_timer & 0x3FFF; } if (scope > 0 && qualifier > 0 && qualifier <= 0x3FEF) { /* Set the jiffies + * retry_delay_timer * 100ms * for the rport/tgt */ tgt->retry_delay_timestamp = jiffies + (qualifier * HZ / 10); } } } if (io_req->fcp_resid) scsi_set_resid(sc_cmd, io_req->fcp_resid); break; default: printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n", io_req->fcp_status); break; } bnx2fc_priv(sc_cmd)->io_req = NULL; scsi_done(sc_cmd); kref_put(&io_req->refcount, bnx2fc_cmd_release); } int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req) { struct fcoe_task_ctx_entry *task; struct fcoe_task_ctx_entry *task_page; struct scsi_cmnd *sc_cmd = io_req->sc_cmd; struct fcoe_port *port = tgt->port; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct fc_lport *lport = port->lport; int task_idx, index; u16 xid; /* bnx2fc_post_io_req() is called with the tgt_lock held */ /* Initialize rest of io_req fields */ io_req->cmd_type = BNX2FC_SCSI_CMD; io_req->port = port; io_req->tgt = tgt; io_req->data_xfer_len = scsi_bufflen(sc_cmd); bnx2fc_priv(sc_cmd)->io_req = io_req; if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { io_req->io_req_flags = BNX2FC_READ; this_cpu_inc(lport->stats->InputRequests); this_cpu_add(lport->stats->InputBytes, io_req->data_xfer_len); } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { io_req->io_req_flags = BNX2FC_WRITE; this_cpu_inc(lport->stats->OutputRequests); this_cpu_add(lport->stats->OutputBytes, io_req->data_xfer_len); } else { io_req->io_req_flags = 0; this_cpu_inc(lport->stats->ControlRequests); } xid = io_req->xid; /* Build buffer descriptor list for firmware from sg list */ if (bnx2fc_build_bd_list_from_sg(io_req)) { printk(KERN_ERR PFX "BD list creation failed\n"); kref_put(&io_req->refcount, bnx2fc_cmd_release); return -EAGAIN; } task_idx = xid / BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; /* Initialize task context for this IO request */ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; task = &(task_page[index]); bnx2fc_init_task(io_req, task); if (tgt->flush_in_prog) { printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); kref_put(&io_req->refcount, bnx2fc_cmd_release); return -EAGAIN; } if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { printk(KERN_ERR PFX "Session not ready...post_io\n"); kref_put(&io_req->refcount, bnx2fc_cmd_release); return -EAGAIN; } /* Time IO req */ if (tgt->io_timeout) bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); /* Obtain free SQ entry */ bnx2fc_add_2_sq(tgt, xid); /* Enqueue the io_req to active_cmd_queue */ io_req->on_active_queue = 1; /* move io_req from pending_queue to active_queue */ list_add_tail(&io_req->link, &tgt->active_cmd_queue); /* Ring doorbell */ bnx2fc_ring_doorbell(tgt); return 0; }
linux-master
drivers/scsi/bnx2fc/bnx2fc_io.c
/* bnx2fc_fcoe.c: QLogic Linux FCoE offload driver. * This file contains the code that interacts with libfc, libfcoe, * cnic modules to create FCoE instances, send/receive non-offloaded * FIP/FCoE packets, listen to link events etc. * * Copyright (c) 2008-2013 Broadcom Corporation * Copyright (c) 2014-2016 QLogic Corporation * Copyright (c) 2016-2017 Cavium Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Bhanu Prakash Gollapudi ([email protected]) */ #include "bnx2fc.h" #include <linux/ethtool.h> static struct list_head adapter_list; static struct list_head if_list; static u32 adapter_count; static DEFINE_MUTEX(bnx2fc_dev_lock); DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION #define DRV_MODULE_RELDATE "October 15, 2015" static char version[] = "QLogic FCoE Driver " DRV_MODULE_NAME \ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Bhanu Prakash Gollapudi <[email protected]>"); MODULE_DESCRIPTION("QLogic FCoE Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); #define BNX2FC_MAX_QUEUE_DEPTH 256 #define BNX2FC_MIN_QUEUE_DEPTH 32 #define FCOE_WORD_TO_BYTE 4 static struct scsi_transport_template *bnx2fc_transport_template; static struct scsi_transport_template *bnx2fc_vport_xport_template; struct workqueue_struct *bnx2fc_wq; /* bnx2fc structure needs only one instance of the fcoe_percpu_s structure. * Here the io threads are per cpu but the l2 thread is just one */ struct fcoe_percpu_s bnx2fc_global; static DEFINE_SPINLOCK(bnx2fc_global_lock); static struct cnic_ulp_ops bnx2fc_cnic_cb; static struct libfc_function_template bnx2fc_libfc_fcn_templ; static struct scsi_host_template bnx2fc_shost_template; static struct fc_function_template bnx2fc_transport_function; static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ; static struct fc_function_template bnx2fc_vport_xport_function; static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode); static void __bnx2fc_destroy(struct bnx2fc_interface *interface); static int bnx2fc_destroy(struct net_device *net_device); static int bnx2fc_enable(struct net_device *netdev); static int bnx2fc_disable(struct net_device *netdev); /* fcoe_syfs control interface handlers */ static int bnx2fc_ctlr_alloc(struct net_device *netdev); static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev); static void bnx2fc_recv_frame(struct sk_buff *skb); static void bnx2fc_start_disc(struct bnx2fc_interface *interface); static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); static int bnx2fc_lport_config(struct fc_lport *lport); static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba); static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, struct device *parent, int npiv); static void bnx2fc_port_destroy(struct fcoe_port *port); static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device *phys_dev); static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface); static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic); static int bnx2fc_fw_init(struct bnx2fc_hba *hba); static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba); static void bnx2fc_port_shutdown(struct fc_lport *lport); static void bnx2fc_stop(struct bnx2fc_interface *interface); static int __init bnx2fc_mod_init(void); static void __exit bnx2fc_mod_exit(void); unsigned int bnx2fc_debug_level; module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "Option to enable extended logging,\n" "\t\tDefault is 0 - no logging.\n" "\t\t0x01 - SCSI cmd error, cleanup.\n" "\t\t0x02 - Session setup, cleanup, etc.\n" "\t\t0x04 - lport events, link, mtu, etc.\n" "\t\t0x08 - ELS logs.\n" "\t\t0x10 - fcoe L2 fame related logs.\n" "\t\t0xff - LOG all messages."); static uint bnx2fc_devloss_tmo; module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO); MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports " "attached via bnx2fc."); static uint bnx2fc_max_luns = BNX2FC_MAX_LUN; module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO); MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default " "0xffff."); static uint bnx2fc_queue_depth; module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO); MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices " "attached via bnx2fc."); static uint bnx2fc_log_fka; module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is " "initiating a FIP keep alive when debug logging is enabled."); static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport) { return ((struct bnx2fc_interface *) ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; } static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev) { struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev); struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr); fcf_dev->vlan_id = fcoe->vlan_id; } static void bnx2fc_clean_rx_queue(struct fc_lport *lp) { struct fcoe_percpu_s *bg; struct fcoe_rcv_info *fr; struct sk_buff_head *list; struct sk_buff *skb, *next; bg = &bnx2fc_global; spin_lock_bh(&bg->fcoe_rx_list.lock); list = &bg->fcoe_rx_list; skb_queue_walk_safe(list, skb, next) { fr = fcoe_dev_from_skb(skb); if (fr->fr_dev == lp) { __skb_unlink(skb, list); kfree_skb(skb); } } spin_unlock_bh(&bg->fcoe_rx_list.lock); } int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen) { int rc; spin_lock(&bnx2fc_global_lock); rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global); spin_unlock(&bnx2fc_global_lock); return rc; } static void bnx2fc_abort_io(struct fc_lport *lport) { /* * This function is no-op for bnx2fc, but we do * not want to leave it as NULL either, as libfc * can call the default function which is * fc_fcp_abort_io. */ } static void bnx2fc_cleanup(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct bnx2fc_rport *tgt; int i; BNX2FC_MISC_DBG("Entered %s\n", __func__); mutex_lock(&hba->hba_mutex); spin_lock_bh(&hba->hba_lock); for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { tgt = hba->tgt_ofld_list[i]; if (tgt) { /* Cleanup IOs belonging to requested vport */ if (tgt->port == port) { spin_unlock_bh(&hba->hba_lock); BNX2FC_TGT_DBG(tgt, "flush/cleanup\n"); bnx2fc_flush_active_ios(tgt); spin_lock_bh(&hba->hba_lock); } } } spin_unlock_bh(&hba->hba_lock); mutex_unlock(&hba->hba_mutex); } static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt, struct fc_frame *fp) { struct fc_rport_priv *rdata = tgt->rdata; struct fc_frame_header *fh; int rc = 0; fh = fc_frame_header_get(fp); BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, " "r_ctl = 0x%x\n", rdata->ids.port_id, ntohs(fh->fh_ox_id), fh->fh_r_ctl); if ((fh->fh_type == FC_TYPE_ELS) && (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { switch (fc_frame_payload_op(fp)) { case ELS_ADISC: rc = bnx2fc_send_adisc(tgt, fp); break; case ELS_LOGO: rc = bnx2fc_send_logo(tgt, fp); break; case ELS_RLS: rc = bnx2fc_send_rls(tgt, fp); break; default: break; } } else if ((fh->fh_type == FC_TYPE_BLS) && (fh->fh_r_ctl == FC_RCTL_BA_ABTS)) BNX2FC_TGT_DBG(tgt, "ABTS frame\n"); else { BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x " "rctl 0x%x thru non-offload path\n", fh->fh_type, fh->fh_r_ctl); return -ENODEV; } if (rc) return -ENOMEM; else return 0; } /** * bnx2fc_xmit - bnx2fc's FCoE frame transmit function * * @lport: the associated local port * @fp: the fc_frame to be transmitted */ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) { struct ethhdr *eh; struct fcoe_crc_eof *cp; struct sk_buff *skb; struct fc_frame_header *fh; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; struct bnx2fc_hba *hba; struct fcoe_port *port; struct fcoe_hdr *hp; struct bnx2fc_rport *tgt; u8 sof, eof; u32 crc; unsigned int hlen, tlen, elen; int wlen, rc = 0; port = (struct fcoe_port *)lport_priv(lport); interface = port->priv; ctlr = bnx2fc_to_ctlr(interface); hba = interface->hba; fh = fc_frame_header_get(fp); skb = fp_skb(fp); if (!lport->link_up) { BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n"); kfree_skb(skb); return 0; } if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { if (!ctlr->sel_fcf) { BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); kfree_skb(skb); return -EINVAL; } if (fcoe_ctlr_els_send(ctlr, lport, skb)) return 0; } sof = fr_sof(fp); eof = fr_eof(fp); /* * Snoop the frame header to check if the frame is for * an offloaded session */ /* * tgt_ofld_list access is synchronized using * both hba mutex and hba lock. Atleast hba mutex or * hba lock needs to be held for read access. */ spin_lock_bh(&hba->hba_lock); tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id)); if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { /* This frame is for offloaded session */ BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session " "port_id = 0x%x\n", ntoh24(fh->fh_d_id)); spin_unlock_bh(&hba->hba_lock); rc = bnx2fc_xmit_l2_frame(tgt, fp); if (rc != -ENODEV) { kfree_skb(skb); return rc; } } else { spin_unlock_bh(&hba->hba_lock); } elen = sizeof(struct ethhdr); hlen = sizeof(struct fcoe_hdr); tlen = sizeof(struct fcoe_crc_eof); wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; skb->ip_summed = CHECKSUM_NONE; crc = fcoe_fc_crc(fp); /* copy port crc and eof to the skb buff */ if (skb_is_nonlinear(skb)) { skb_frag_t *frag; if (bnx2fc_get_paged_crc_eof(skb, tlen)) { kfree_skb(skb); return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } else { cp = skb_put(skb, tlen); } memset(cp, 0, sizeof(*cp)); cp->fcoe_eof = eof; cp->fcoe_crc32 = cpu_to_le32(~crc); if (skb_is_nonlinear(skb)) { kunmap_atomic(cp); cp = NULL; } /* adjust skb network/transport offsets to match mac/fcoe/port */ skb_push(skb, elen + hlen); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->mac_len = elen; skb->protocol = htons(ETH_P_FCOE); skb->dev = interface->netdev; /* fill up mac and fcoe headers */ eh = eth_hdr(skb); eh->h_proto = htons(ETH_P_FCOE); if (ctlr->map_dest) fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); else /* insert GW address */ memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN); if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN)) memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN); else memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); hp = (struct fcoe_hdr *)(eh + 1); memset(hp, 0, sizeof(*hp)); if (FC_FCOE_VER) FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); hp->fcoe_sof = sof; /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ if (lport->seq_offload && fr_max_payload(fp)) { skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; skb_shinfo(skb)->gso_size = fr_max_payload(fp); } else { skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->gso_size = 0; } /*update tx stats */ this_cpu_inc(lport->stats->TxFrames); this_cpu_add(lport->stats->TxWords, wlen); /* send down to lld */ fr_dev(fp) = lport; if (port->fcoe_pending_queue.qlen) fcoe_check_wait_queue(lport, skb); else if (fcoe_start_io(skb)) fcoe_check_wait_queue(lport, skb); return 0; } /** * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ * * @skb: the receive socket buffer * @dev: associated net device * @ptype: context * @olddev: last device * * This function receives the packet and builds FC frame and passes it up */ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *olddev) { struct fc_lport *lport; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; struct fcoe_rcv_info *fr; struct fcoe_percpu_s *bg; struct sk_buff *tmp_skb; interface = container_of(ptype, struct bnx2fc_interface, fcoe_packet_type); ctlr = bnx2fc_to_ctlr(interface); lport = ctlr->lp; if (unlikely(lport == NULL)) { printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n"); goto err; } tmp_skb = skb_share_check(skb, GFP_ATOMIC); if (!tmp_skb) goto err; skb = tmp_skb; if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); goto err; } /* * Check for minimum frame length, and make sure required FCoE * and FC headers are pulled into the linear data area. */ if (unlikely((skb->len < FCOE_MIN_FRAME) || !pskb_may_pull(skb, FCOE_HEADER_LEN))) goto err; skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); fr = fcoe_dev_from_skb(skb); fr->fr_dev = lport; bg = &bnx2fc_global; spin_lock(&bg->fcoe_rx_list.lock); __skb_queue_tail(&bg->fcoe_rx_list, skb); if (bg->fcoe_rx_list.qlen == 1) wake_up_process(bg->kthread); spin_unlock(&bg->fcoe_rx_list.lock); return 0; err: kfree_skb(skb); return -1; } static int bnx2fc_l2_rcv_thread(void *arg) { struct fcoe_percpu_s *bg = arg; struct sk_buff *skb; set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); spin_lock_bh(&bg->fcoe_rx_list.lock); while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { spin_unlock_bh(&bg->fcoe_rx_list.lock); bnx2fc_recv_frame(skb); spin_lock_bh(&bg->fcoe_rx_list.lock); } __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_bh(&bg->fcoe_rx_list.lock); } __set_current_state(TASK_RUNNING); return 0; } static void bnx2fc_recv_frame(struct sk_buff *skb) { u64 crc_err; u32 fr_len, fr_crc; struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fc_frame_header *fh; struct fcoe_crc_eof crc_eof; struct fc_frame *fp; struct fc_lport *vn_port; struct fcoe_port *port, *phys_port; u8 *mac = NULL; u8 *dest_mac = NULL; struct fcoe_hdr *hp; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; fr = fcoe_dev_from_skb(skb); lport = fr->fr_dev; if (unlikely(lport == NULL)) { printk(KERN_ERR PFX "Invalid lport struct\n"); kfree_skb(skb); return; } if (skb_is_nonlinear(skb)) skb_linearize(skb); mac = eth_hdr(skb)->h_source; dest_mac = eth_hdr(skb)->h_dest; /* Pull the header */ hp = (struct fcoe_hdr *) skb_network_header(skb); fh = (struct fc_frame_header *) skb_transport_header(skb); skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); this_cpu_inc(lport->stats->RxFrames); this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE); fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; fr_sof(fp) = hp->fcoe_sof; if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { kfree_skb(skb); return; } fr_eof(fp) = crc_eof.fcoe_eof; fr_crc(fp) = crc_eof.fcoe_crc32; if (pskb_trim(skb, fr_len)) { kfree_skb(skb); return; } phys_port = lport_priv(lport); interface = phys_port->priv; ctlr = bnx2fc_to_ctlr(interface); fh = fc_frame_header_get(fp); if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) { BNX2FC_HBA_DBG(lport, "FC frame d_id mismatch with MAC %pM.\n", dest_mac); kfree_skb(skb); return; } vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); if (vn_port) { port = lport_priv(vn_port); if (!ether_addr_equal(port->data_src_addr, dest_mac)) { BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); kfree_skb(skb); return; } } if (ctlr->state) { if (!ether_addr_equal(mac, ctlr->dest_addr)) { BNX2FC_HBA_DBG(lport, "Wrong source address: mac:%pM dest_addr:%pM.\n", mac, ctlr->dest_addr); kfree_skb(skb); return; } } if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) { /* Drop FCP data. We dont this in L2 path */ kfree_skb(skb); return; } if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) { switch (fc_frame_payload_op(fp)) { case ELS_LOGO: if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { /* drop non-FIP LOGO */ kfree_skb(skb); return; } break; } } if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { /* Drop incoming ABTS */ kfree_skb(skb); return; } /* * If the destination ID from the frame header does not match what we * have on record for lport and the search for a NPIV port came up * empty then this is not addressed to our port so simply drop it. */ if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { BNX2FC_HBA_DBG(lport, "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n", lport->port_id, ntoh24(fh->fh_d_id)); kfree_skb(skb); return; } fr_crc = le32_to_cpu(fr_crc(fp)); if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) { crc_err = this_cpu_inc_return(lport->stats->InvalidCRCCount); if (crc_err < 5) printk(KERN_WARNING PFX "dropping frame with " "CRC error\n"); kfree_skb(skb); return; } fc_exch_recv(lport, fp); } /** * bnx2fc_percpu_io_thread - thread per cpu for ios * * @arg: ptr to bnx2fc_percpu_info structure */ static int bnx2fc_percpu_io_thread(void *arg) { struct bnx2fc_percpu_s *p = arg; struct bnx2fc_work *work, *tmp; LIST_HEAD(work_list); set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); spin_lock_bh(&p->fp_work_lock); while (!list_empty(&p->work_list)) { list_splice_init(&p->work_list, &work_list); spin_unlock_bh(&p->fp_work_lock); list_for_each_entry_safe(work, tmp, &work_list, list) { list_del_init(&work->list); bnx2fc_process_cq_compl(work->tgt, work->wqe, work->rq_data, work->num_rq, work->task); kfree(work); } spin_lock_bh(&p->fp_work_lock); } __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_bh(&p->fp_work_lock); } __set_current_state(TASK_RUNNING); return 0; } static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) { struct fc_host_statistics *bnx2fc_stats; struct fc_lport *lport = shost_priv(shost); struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct fcoe_statistics_params *fw_stats; int rc = 0; fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer; if (!fw_stats) return NULL; mutex_lock(&hba->hba_stats_mutex); bnx2fc_stats = fc_get_host_stats(shost); init_completion(&hba->stat_req_done); if (bnx2fc_send_stat_req(hba)) goto unlock_stats_mutex; rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); if (!rc) { BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); goto unlock_stats_mutex; } BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt); bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt; BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt); bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4); BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt); bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt; BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt); bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4); bnx2fc_stats->dumped_frames = 0; bnx2fc_stats->lip_count = 0; bnx2fc_stats->nos_count = 0; bnx2fc_stats->loss_of_sync_count = 0; bnx2fc_stats->loss_of_signal_count = 0; bnx2fc_stats->prim_seq_protocol_err_count = 0; memcpy(&hba->prev_stats, hba->stats_buffer, sizeof(struct fcoe_statistics_params)); unlock_stats_mutex: mutex_unlock(&hba->hba_stats_mutex); return bnx2fc_stats; } static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct Scsi_Host *shost = lport->host; int rc = 0; shost->max_cmd_len = BNX2FC_MAX_CMD_LEN; shost->max_lun = bnx2fc_max_luns; shost->max_id = BNX2FC_MAX_FCP_TGT; shost->max_channel = 0; if (lport->vport) shost->transportt = bnx2fc_vport_xport_template; else shost->transportt = bnx2fc_transport_template; /* Add the new host to SCSI-ml */ rc = scsi_add_host(lport->host, dev); if (rc) { printk(KERN_ERR PFX "Error on scsi_add_host\n"); return rc; } if (!lport->vport) fc_host_max_npiv_vports(lport->host) = USHRT_MAX; snprintf(fc_host_symbolic_name(lport->host), 256, "%s (QLogic %s) v%s over %s", BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION, interface->netdev->name); return 0; } static int bnx2fc_link_ok(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct net_device *dev = hba->phys_dev; int rc = 0; if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); else { set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); rc = -1; } return rc; } /** * bnx2fc_get_link_state - get network link state * * @hba: adapter instance pointer * * updates adapter structure flag based on netdev state */ void bnx2fc_get_link_state(struct bnx2fc_hba *hba) { if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state)) set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); else clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); } static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev) { struct bnx2fc_hba *hba; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; struct fcoe_port *port; u64 wwnn, wwpn; port = lport_priv(lport); interface = port->priv; ctlr = bnx2fc_to_ctlr(interface); hba = interface->hba; /* require support for get_pauseparam ethtool op. */ if (!hba->phys_dev->ethtool_ops || !hba->phys_dev->ethtool_ops->get_pauseparam) return -EOPNOTSUPP; if (fc_set_mfs(lport, BNX2FC_MFS)) return -EINVAL; skb_queue_head_init(&port->fcoe_pending_queue); port->fcoe_pending_queue_active = 0; timer_setup(&port->timer, fcoe_queue_timer, 0); fcoe_link_speed_update(lport); if (!lport->vport) { if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0); BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); fc_set_wwnn(lport, wwnn); if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 2, 0); BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); fc_set_wwpn(lport, wwpn); } return 0; } static void bnx2fc_destroy_timer(struct timer_list *t) { struct bnx2fc_hba *hba = from_timer(hba, t, destroy_timer); printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - " "Destroy compl not received!!\n"); set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); wake_up_interruptible(&hba->destroy_wait); } /** * bnx2fc_indicate_netevent - Generic netdev event handler * * @context: adapter structure pointer * @event: event type * @vlan_id: vlan id - associated vlan id with this event * * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and * NETDEV_CHANGE_MTU events. Handle NETDEV_UNREGISTER only for vlans. */ static void bnx2fc_indicate_netevent(void *context, unsigned long event, u16 vlan_id) { struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; struct fcoe_ctlr_device *cdev; struct fc_lport *lport; struct fc_lport *vport; struct bnx2fc_interface *interface, *tmp; struct fcoe_ctlr *ctlr; int wait_for_upload = 0; u32 link_possible = 1; if (vlan_id != 0 && event != NETDEV_UNREGISTER) return; switch (event) { case NETDEV_UP: if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) printk(KERN_ERR "indicate_netevent: "\ "hba is not UP!!\n"); break; case NETDEV_DOWN: clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); link_possible = 0; break; case NETDEV_GOING_DOWN: set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); link_possible = 0; break; case NETDEV_CHANGE: break; case NETDEV_UNREGISTER: if (!vlan_id) return; mutex_lock(&bnx2fc_dev_lock); list_for_each_entry_safe(interface, tmp, &if_list, list) { if (interface->hba == hba && interface->vlan_id == (vlan_id & VLAN_VID_MASK)) __bnx2fc_destroy(interface); } mutex_unlock(&bnx2fc_dev_lock); return; default: return; } mutex_lock(&bnx2fc_dev_lock); list_for_each_entry(interface, &if_list, list) { if (interface->hba != hba) continue; ctlr = bnx2fc_to_ctlr(interface); lport = ctlr->lp; BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n", interface->netdev->name, event); fcoe_link_speed_update(lport); cdev = fcoe_ctlr_to_ctlr_dev(ctlr); if (link_possible && !bnx2fc_link_ok(lport)) { switch (cdev->enabled) { case FCOE_CTLR_DISABLED: pr_info("Link up while interface is disabled.\n"); break; case FCOE_CTLR_ENABLED: case FCOE_CTLR_UNUSED: /* Reset max recv frame size to default */ fc_set_mfs(lport, BNX2FC_MFS); /* * ctlr link up will only be handled during * enable to avoid sending discovery * solicitation on a stale vlan */ if (interface->enabled) fcoe_ctlr_link_up(ctlr); } } else if (fcoe_ctlr_link_down(ctlr)) { switch (cdev->enabled) { case FCOE_CTLR_DISABLED: pr_info("Link down while interface is disabled.\n"); break; case FCOE_CTLR_ENABLED: case FCOE_CTLR_UNUSED: mutex_lock(&lport->lp_mutex); list_for_each_entry(vport, &lport->vports, list) fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN; mutex_unlock(&lport->lp_mutex); fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; this_cpu_inc(lport->stats->LinkFailureCount); fcoe_clean_pending_queue(lport); wait_for_upload = 1; } } } mutex_unlock(&bnx2fc_dev_lock); if (wait_for_upload) { clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); init_waitqueue_head(&hba->shutdown_wait); BNX2FC_MISC_DBG("indicate_netevent " "num_ofld_sess = %d\n", hba->num_ofld_sess); hba->wait_for_link_down = 1; wait_event_interruptible(hba->shutdown_wait, (hba->num_ofld_sess == 0)); BNX2FC_MISC_DBG("wakeup - num_ofld_sess = %d\n", hba->num_ofld_sess); hba->wait_for_link_down = 0; if (signal_pending(current)) flush_signals(current); } } static int bnx2fc_libfc_config(struct fc_lport *lport) { /* Set the function pointers set by bnx2fc driver */ memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ, sizeof(struct libfc_function_template)); fc_elsct_init(lport); fc_exch_init(lport); fc_disc_init(lport); fc_disc_config(lport, lport); return 0; } static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba) { int fcoe_min_xid, fcoe_max_xid; fcoe_min_xid = hba->max_xid + 1; if (nr_cpu_ids <= 2) fcoe_max_xid = hba->max_xid + FCOE_XIDS_PER_CPU_OFFSET; else fcoe_max_xid = hba->max_xid + FCOE_MAX_XID_OFFSET; if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, fcoe_min_xid, fcoe_max_xid, NULL)) { printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); return -ENOMEM; } return 0; } static int bnx2fc_lport_config(struct fc_lport *lport) { lport->link_up = 0; lport->qfull = 0; lport->max_retry_count = BNX2FC_MAX_RETRY_CNT; lport->max_rport_retry_count = BNX2FC_MAX_RPORT_RETRY_CNT; lport->e_d_tov = 2 * 1000; lport->r_a_tov = 10 * 1000; lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); lport->does_npiv = 1; memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen)); lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA; /* alloc stats structure */ if (fc_lport_init_stats(lport)) return -ENOMEM; /* Finish fc_lport configuration */ fc_lport_config(lport); return 0; } /** * bnx2fc_fip_recv - handle a received FIP frame. * * @skb: the received skb * @dev: associated &net_device * @ptype: the &packet_type structure which was used to register this handler. * @orig_dev: original receive &net_device, in case @ dev is a bond. * * Returns: 0 for success */ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; interface = container_of(ptype, struct bnx2fc_interface, fip_packet_type); ctlr = bnx2fc_to_ctlr(interface); fcoe_ctlr_recv(ctlr, skb); return 0; } /** * bnx2fc_update_src_mac - Update Ethernet MAC filters. * * @lport: The local port * @addr: Location of data to copy * * Remove any previously-set unicast MAC filter. * Add secondary FCoE MAC address filter for our OUI. */ static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr) { struct fcoe_port *port = lport_priv(lport); memcpy(port->data_src_addr, addr, ETH_ALEN); } /** * bnx2fc_get_src_mac - return the ethernet source address for an lport * * @lport: libfc port */ static u8 *bnx2fc_get_src_mac(struct fc_lport *lport) { struct fcoe_port *port; port = (struct fcoe_port *)lport_priv(lport); return port->data_src_addr; } /** * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame. * * @fip: FCoE controller. * @skb: FIP Packet. */ static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fip_header *fiph; struct ethhdr *eth_hdr; u16 op; u8 sub; fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); eth_hdr = (struct ethhdr *)skb_mac_header(skb); op = ntohs(fiph->fip_op); sub = fiph->fip_subcode; if (op == FIP_OP_CTRL && sub == FIP_SC_SOL && bnx2fc_log_fka) BNX2FC_MISC_DBG("Sending FKA from %pM to %pM.\n", eth_hdr->h_source, eth_hdr->h_dest); skb->dev = bnx2fc_from_ctlr(fip)->netdev; dev_queue_xmit(skb); } static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled) { struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fcoe_port *port = lport_priv(n_port); struct bnx2fc_interface *interface = port->priv; struct net_device *netdev = interface->netdev; struct fc_lport *vn_port; int rc; char buf[32]; rc = fcoe_validate_vport_create(vport); if (rc) { fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); printk(KERN_ERR PFX "Failed to create vport, " "WWPN (0x%s) already exists\n", buf); return rc; } if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { printk(KERN_ERR PFX "vn ports cannot be created on" "this interface\n"); return -EIO; } rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); vn_port = bnx2fc_if_create(interface, &vport->dev, 1); mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); if (!vn_port) { printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n", netdev->name); return -EIO; } if (bnx2fc_devloss_tmo) fc_host_dev_loss_tmo(vn_port->host) = bnx2fc_devloss_tmo; if (disabled) { fc_vport_set_state(vport, FC_VPORT_DISABLED); } else { vn_port->boot_time = jiffies; fc_lport_init(vn_port); fc_fabric_login(vn_port); fc_vport_setlink(vn_port); } return 0; } static void bnx2fc_free_vport(struct bnx2fc_hba *hba, struct fc_lport *lport) { struct bnx2fc_lport *blport, *tmp; spin_lock_bh(&hba->hba_lock); list_for_each_entry_safe(blport, tmp, &hba->vports, list) { if (blport->lport == lport) { list_del(&blport->list); kfree(blport); } } spin_unlock_bh(&hba->hba_lock); } static int bnx2fc_vport_destroy(struct fc_vport *vport) { struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fc_lport *vn_port = vport->dd_data; struct fcoe_port *port = lport_priv(vn_port); struct bnx2fc_interface *interface = port->priv; struct fc_lport *v_port; bool found = false; mutex_lock(&n_port->lp_mutex); list_for_each_entry(v_port, &n_port->vports, list) if (v_port->vport == vport) { found = true; break; } if (!found) { mutex_unlock(&n_port->lp_mutex); return -ENOENT; } list_del(&vn_port->list); mutex_unlock(&n_port->lp_mutex); bnx2fc_free_vport(interface->hba, port->lport); bnx2fc_port_shutdown(port->lport); bnx2fc_port_destroy(port); bnx2fc_interface_put(interface); return 0; } static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable) { struct fc_lport *lport = vport->dd_data; if (disable) { fc_vport_set_state(vport, FC_VPORT_DISABLED); fc_fabric_logoff(lport); } else { lport->boot_time = jiffies; fc_fabric_login(lport); fc_vport_setlink(lport); } return 0; } static int bnx2fc_interface_setup(struct bnx2fc_interface *interface) { struct net_device *netdev = interface->netdev; struct net_device *physdev = interface->hba->phys_dev; struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct netdev_hw_addr *ha; int sel_san_mac = 0; /* setup Source MAC Address */ rcu_read_lock(); for_each_dev_addr(physdev, ha) { BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ", ha->type); printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0], ha->addr[1], ha->addr[2], ha->addr[3], ha->addr[4], ha->addr[5]); if ((ha->type == NETDEV_HW_ADDR_T_SAN) && (is_valid_ether_addr(ha->addr))) { memcpy(ctlr->ctl_src_addr, ha->addr, ETH_ALEN); sel_san_mac = 1; BNX2FC_MISC_DBG("Found SAN MAC\n"); } } rcu_read_unlock(); if (!sel_san_mac) return -ENODEV; interface->fip_packet_type.func = bnx2fc_fip_recv; interface->fip_packet_type.type = htons(ETH_P_FIP); interface->fip_packet_type.dev = netdev; dev_add_pack(&interface->fip_packet_type); interface->fcoe_packet_type.func = bnx2fc_rcv; interface->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); interface->fcoe_packet_type.dev = netdev; dev_add_pack(&interface->fcoe_packet_type); return 0; } static int bnx2fc_attach_transport(void) { bnx2fc_transport_template = fc_attach_transport(&bnx2fc_transport_function); if (bnx2fc_transport_template == NULL) { printk(KERN_ERR PFX "Failed to attach FC transport\n"); return -ENODEV; } bnx2fc_vport_xport_template = fc_attach_transport(&bnx2fc_vport_xport_function); if (bnx2fc_vport_xport_template == NULL) { printk(KERN_ERR PFX "Failed to attach FC transport for vport\n"); fc_release_transport(bnx2fc_transport_template); bnx2fc_transport_template = NULL; return -ENODEV; } return 0; } static void bnx2fc_release_transport(void) { fc_release_transport(bnx2fc_transport_template); fc_release_transport(bnx2fc_vport_xport_template); bnx2fc_transport_template = NULL; bnx2fc_vport_xport_template = NULL; } static void bnx2fc_interface_release(struct kref *kref) { struct fcoe_ctlr_device *ctlr_dev; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; struct net_device *netdev; interface = container_of(kref, struct bnx2fc_interface, kref); BNX2FC_MISC_DBG("Interface is being released\n"); ctlr = bnx2fc_to_ctlr(interface); ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr); netdev = interface->netdev; /* tear-down FIP controller */ if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags)) fcoe_ctlr_destroy(ctlr); fcoe_ctlr_device_delete(ctlr_dev); dev_put(netdev); module_put(THIS_MODULE); } static inline void bnx2fc_interface_get(struct bnx2fc_interface *interface) { kref_get(&interface->kref); } static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface) { kref_put(&interface->kref, bnx2fc_interface_release); } static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba) { /* Free the command manager */ if (hba->cmd_mgr) { bnx2fc_cmd_mgr_free(hba->cmd_mgr); hba->cmd_mgr = NULL; } kfree(hba->tgt_ofld_list); bnx2fc_unbind_pcidev(hba); kfree(hba); } /** * bnx2fc_hba_create - create a new bnx2fc hba * * @cnic: pointer to cnic device * * Creates a new FCoE hba on the given device. * */ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) { struct bnx2fc_hba *hba; struct fcoe_capabilities *fcoe_cap; int rc; hba = kzalloc(sizeof(*hba), GFP_KERNEL); if (!hba) { printk(KERN_ERR PFX "Unable to allocate hba structure\n"); return NULL; } spin_lock_init(&hba->hba_lock); mutex_init(&hba->hba_mutex); mutex_init(&hba->hba_stats_mutex); hba->cnic = cnic; hba->max_tasks = cnic->max_fcoe_exchanges; hba->elstm_xids = (hba->max_tasks / 2); hba->max_outstanding_cmds = hba->elstm_xids; hba->max_xid = (hba->max_tasks - 1); rc = bnx2fc_bind_pcidev(hba); if (rc) { printk(KERN_ERR PFX "create_adapter: bind error\n"); goto bind_err; } hba->phys_dev = cnic->netdev; hba->next_conn_id = 0; hba->tgt_ofld_list = kcalloc(BNX2FC_NUM_MAX_SESS, sizeof(struct bnx2fc_rport *), GFP_KERNEL); if (!hba->tgt_ofld_list) { printk(KERN_ERR PFX "Unable to allocate tgt offload list\n"); goto tgtofld_err; } hba->num_ofld_sess = 0; hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba); if (!hba->cmd_mgr) { printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); goto cmgr_err; } fcoe_cap = &hba->fcoe_cap; fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES << FCOE_IOS_PER_CONNECTION_SHIFT; fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS << FCOE_LOGINS_PER_PORT_SHIFT; fcoe_cap->capability2 = hba->max_outstanding_cmds << FCOE_NUMBER_OF_EXCHANGES_SHIFT; fcoe_cap->capability2 |= BNX2FC_MAX_NPIV << FCOE_NPIV_WWN_PER_PORT_SHIFT; fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS << FCOE_TARGETS_SUPPORTED_SHIFT; fcoe_cap->capability3 |= hba->max_outstanding_cmds << FCOE_OUTSTANDING_COMMANDS_SHIFT; fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL; init_waitqueue_head(&hba->shutdown_wait); init_waitqueue_head(&hba->destroy_wait); INIT_LIST_HEAD(&hba->vports); return hba; cmgr_err: kfree(hba->tgt_ofld_list); tgtofld_err: bnx2fc_unbind_pcidev(hba); bind_err: kfree(hba); return NULL; } static struct bnx2fc_interface * bnx2fc_interface_create(struct bnx2fc_hba *hba, struct net_device *netdev, enum fip_mode fip_mode) { struct fcoe_ctlr_device *ctlr_dev; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; int size; int rc = 0; size = (sizeof(*interface) + sizeof(struct fcoe_ctlr)); ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ, size); if (!ctlr_dev) { printk(KERN_ERR PFX "Unable to allocate interface structure\n"); return NULL; } ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr->cdev = ctlr_dev; interface = fcoe_ctlr_priv(ctlr); dev_hold(netdev); kref_init(&interface->kref); interface->hba = hba; interface->netdev = netdev; /* Initialize FIP */ fcoe_ctlr_init(ctlr, fip_mode); ctlr->send = bnx2fc_fip_send; ctlr->update_mac = bnx2fc_update_src_mac; ctlr->get_src_addr = bnx2fc_get_src_mac; set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags); rc = bnx2fc_interface_setup(interface); if (!rc) return interface; fcoe_ctlr_destroy(ctlr); dev_put(netdev); fcoe_ctlr_device_delete(ctlr_dev); return NULL; } /** * bnx2fc_if_create - Create FCoE instance on a given interface * * @interface: FCoE interface to create a local port on * @parent: Device pointer to be the parent in sysfs for the SCSI host * @npiv: Indicates if the port is vport or not * * Creates a fc_lport instance and a Scsi_Host instance and configure them. * * Returns: Allocated fc_lport or an error pointer */ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, struct device *parent, int npiv) { struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport, *n_port; struct fcoe_port *port; struct Scsi_Host *shost; struct fc_vport *vport = dev_to_vport(parent); struct bnx2fc_lport *blport; struct bnx2fc_hba *hba = interface->hba; int rc = 0; blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); if (!blport) { BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n"); return NULL; } /* Allocate Scsi_Host structure */ bnx2fc_shost_template.can_queue = hba->max_outstanding_cmds; if (!npiv) lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port)); else lport = libfc_vport_create(vport, sizeof(*port)); if (!lport) { printk(KERN_ERR PFX "could not allocate scsi host structure\n"); goto free_blport; } shost = lport->host; port = lport_priv(lport); port->lport = lport; port->priv = interface; port->get_netdev = bnx2fc_netdev; /* Configure fcoe_port */ rc = bnx2fc_lport_config(lport); if (rc) goto lp_config_err; if (npiv) { printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n", vport->node_name, vport->port_name); fc_set_wwnn(lport, vport->node_name); fc_set_wwpn(lport, vport->port_name); } /* Configure netdev and networking properties of the lport */ rc = bnx2fc_net_config(lport, interface->netdev); if (rc) { printk(KERN_ERR PFX "Error on bnx2fc_net_config\n"); goto lp_config_err; } rc = bnx2fc_shost_config(lport, parent); if (rc) { printk(KERN_ERR PFX "Couldn't configure shost for %s\n", interface->netdev->name); goto lp_config_err; } /* Initialize the libfc library */ rc = bnx2fc_libfc_config(lport); if (rc) { printk(KERN_ERR PFX "Couldn't configure libfc\n"); goto shost_err; } fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; if (bnx2fc_devloss_tmo) fc_host_dev_loss_tmo(shost) = bnx2fc_devloss_tmo; /* Allocate exchange manager */ if (!npiv) rc = bnx2fc_em_config(lport, hba); else { shost = vport_to_shost(vport); n_port = shost_priv(shost); rc = fc_exch_mgr_list_clone(n_port, lport); } if (rc) { printk(KERN_ERR PFX "Error on bnx2fc_em_config\n"); goto shost_err; } bnx2fc_interface_get(interface); spin_lock_bh(&hba->hba_lock); blport->lport = lport; list_add_tail(&blport->list, &hba->vports); spin_unlock_bh(&hba->hba_lock); return lport; shost_err: scsi_remove_host(shost); lp_config_err: scsi_host_put(lport->host); free_blport: kfree(blport); return NULL; } static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface) { /* Dont listen for Ethernet packets anymore */ __dev_remove_pack(&interface->fcoe_packet_type); __dev_remove_pack(&interface->fip_packet_type); synchronize_net(); } static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface) { struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport = ctlr->lp; struct fcoe_port *port = lport_priv(lport); struct bnx2fc_hba *hba = interface->hba; /* Stop the transmit retry timer */ del_timer_sync(&port->timer); /* Free existing transmit skbs */ fcoe_clean_pending_queue(lport); bnx2fc_net_cleanup(interface); bnx2fc_free_vport(hba, lport); } static void bnx2fc_if_destroy(struct fc_lport *lport) { /* Free queued packets for the receive thread */ bnx2fc_clean_rx_queue(lport); /* Detach from scsi-ml */ fc_remove_host(lport->host); scsi_remove_host(lport->host); /* * Note that only the physical lport will have the exchange manager. * for vports, this function is NOP */ fc_exch_mgr_free(lport); /* Free memory used by statistical counters */ fc_lport_free_stats(lport); /* Release Scsi_Host */ scsi_host_put(lport->host); } static void __bnx2fc_destroy(struct bnx2fc_interface *interface) { struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport = ctlr->lp; struct fcoe_port *port = lport_priv(lport); bnx2fc_interface_cleanup(interface); bnx2fc_stop(interface); list_del(&interface->list); bnx2fc_port_destroy(port); bnx2fc_interface_put(interface); } /** * bnx2fc_destroy - Destroy a bnx2fc FCoE interface * * @netdev: The net device that the FCoE interface is on * * Called from sysfs. * * Returns: 0 for success */ static int bnx2fc_destroy(struct net_device *netdev) { struct bnx2fc_interface *interface = NULL; struct workqueue_struct *timer_work_queue; struct fcoe_ctlr *ctlr; int rc = 0; rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); interface = bnx2fc_interface_lookup(netdev); ctlr = bnx2fc_to_ctlr(interface); if (!interface || !ctlr->lp) { rc = -ENODEV; printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n"); goto netdev_err; } timer_work_queue = interface->timer_work_queue; __bnx2fc_destroy(interface); destroy_workqueue(timer_work_queue); netdev_err: mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return rc; } static void bnx2fc_port_destroy(struct fcoe_port *port) { struct fc_lport *lport; lport = port->lport; BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport); bnx2fc_if_destroy(lport); } static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba) { bnx2fc_free_fw_resc(hba); bnx2fc_free_task_ctx(hba); } /** * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated * pci structure * * @hba: Adapter instance */ static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba) { if (bnx2fc_setup_task_ctx(hba)) goto mem_err; if (bnx2fc_setup_fw_resc(hba)) goto mem_err; return 0; mem_err: bnx2fc_unbind_adapter_devices(hba); return -ENOMEM; } static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) { struct cnic_dev *cnic; struct pci_dev *pdev; if (!hba->cnic) { printk(KERN_ERR PFX "cnic is NULL\n"); return -ENODEV; } cnic = hba->cnic; pdev = hba->pcidev = cnic->pcidev; if (!hba->pcidev) return -ENODEV; switch (pdev->device) { case PCI_DEVICE_ID_NX2_57710: strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN); break; case PCI_DEVICE_ID_NX2_57711: strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN); break; case PCI_DEVICE_ID_NX2_57712: case PCI_DEVICE_ID_NX2_57712_MF: case PCI_DEVICE_ID_NX2_57712_VF: strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN); break; case PCI_DEVICE_ID_NX2_57800: case PCI_DEVICE_ID_NX2_57800_MF: case PCI_DEVICE_ID_NX2_57800_VF: strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN); break; case PCI_DEVICE_ID_NX2_57810: case PCI_DEVICE_ID_NX2_57810_MF: case PCI_DEVICE_ID_NX2_57810_VF: strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN); break; case PCI_DEVICE_ID_NX2_57840: case PCI_DEVICE_ID_NX2_57840_MF: case PCI_DEVICE_ID_NX2_57840_VF: case PCI_DEVICE_ID_NX2_57840_2_20: case PCI_DEVICE_ID_NX2_57840_4_10: strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN); break; default: pr_err(PFX "Unknown device id 0x%x\n", pdev->device); break; } pci_dev_get(hba->pcidev); return 0; } static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) { if (hba->pcidev) { hba->chip_num[0] = '\0'; pci_dev_put(hba->pcidev); } hba->pcidev = NULL; } /** * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats * * @handle: transport handle pointing to adapter structure */ static int bnx2fc_ulp_get_stats(void *handle) { struct bnx2fc_hba *hba = handle; struct cnic_dev *cnic; struct fcoe_stats_info *stats_addr; if (!hba) return -EINVAL; cnic = hba->cnic; stats_addr = &cnic->stats_addr->fcoe_stat; if (!stats_addr) return -EINVAL; strncpy(stats_addr->version, BNX2FC_VERSION, sizeof(stats_addr->version)); stats_addr->txq_size = BNX2FC_SQ_WQES_MAX; stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX; return 0; } /** * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance * * @handle: transport handle pointing to adapter structure * * This function maps adapter structure to pcidev structure and initiates * firmware handshake to enable/initialize on-chip FCoE components. * This bnx2fc - cnic interface api callback is used after following * conditions are met - * a) underlying network interface is up (marked by event NETDEV_UP * from netdev * b) bnx2fc adatper structure is registered. */ static void bnx2fc_ulp_start(void *handle) { struct bnx2fc_hba *hba = handle; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; struct fc_lport *lport; mutex_lock(&bnx2fc_dev_lock); if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) bnx2fc_fw_init(hba); BNX2FC_MISC_DBG("bnx2fc started.\n"); list_for_each_entry(interface, &if_list, list) { if (interface->hba == hba) { ctlr = bnx2fc_to_ctlr(interface); lport = ctlr->lp; /* Kick off Fabric discovery*/ printk(KERN_ERR PFX "ulp_init: start discovery\n"); lport->tt.frame_send = bnx2fc_xmit; bnx2fc_start_disc(interface); } } mutex_unlock(&bnx2fc_dev_lock); } static void bnx2fc_port_shutdown(struct fc_lport *lport) { BNX2FC_MISC_DBG("Entered %s\n", __func__); fc_fabric_logoff(lport); fc_lport_destroy(lport); } static void bnx2fc_stop(struct bnx2fc_interface *interface) { struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport; struct fc_lport *vport; if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) return; lport = ctlr->lp; bnx2fc_port_shutdown(lport); mutex_lock(&lport->lp_mutex); list_for_each_entry(vport, &lport->vports, list) fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN; mutex_unlock(&lport->lp_mutex); fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; fcoe_ctlr_link_down(ctlr); fcoe_clean_pending_queue(lport); } static int bnx2fc_fw_init(struct bnx2fc_hba *hba) { #define BNX2FC_INIT_POLL_TIME (1000 / HZ) int rc = -1; int i = HZ; rc = bnx2fc_bind_adapter_devices(hba); if (rc) { printk(KERN_ALERT PFX "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc); goto err_out; } rc = bnx2fc_send_fw_fcoe_init_msg(hba); if (rc) { printk(KERN_ALERT PFX "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc); goto err_unbind; } /* * Wait until the adapter init message is complete, and adapter * state is UP. */ while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) msleep(BNX2FC_INIT_POLL_TIME); if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) { printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. " "Ignoring...\n", hba->cnic->netdev->name); rc = -1; goto err_unbind; } set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags); return 0; err_unbind: bnx2fc_unbind_adapter_devices(hba); err_out: return rc; } static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) { if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) { if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { timer_setup(&hba->destroy_timer, bnx2fc_destroy_timer, 0); hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + jiffies; add_timer(&hba->destroy_timer); wait_event_interruptible(hba->destroy_wait, test_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags)); clear_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); /* This should never happen */ if (signal_pending(current)) flush_signals(current); del_timer_sync(&hba->destroy_timer); } bnx2fc_unbind_adapter_devices(hba); } } /** * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance * * @handle: transport handle pointing to adapter structure * * Driver checks if adapter is already in shutdown mode, if not start * the shutdown process. */ static void bnx2fc_ulp_stop(void *handle) { struct bnx2fc_hba *hba = handle; struct bnx2fc_interface *interface; printk(KERN_ERR "ULP_STOP\n"); mutex_lock(&bnx2fc_dev_lock); if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) goto exit; list_for_each_entry(interface, &if_list, list) { if (interface->hba == hba) bnx2fc_stop(interface); } BUG_ON(hba->num_ofld_sess != 0); mutex_lock(&hba->hba_mutex); clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); mutex_unlock(&hba->hba_mutex); bnx2fc_fw_destroy(hba); exit: mutex_unlock(&bnx2fc_dev_lock); } static void bnx2fc_start_disc(struct bnx2fc_interface *interface) { struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport; int wait_cnt = 0; BNX2FC_MISC_DBG("Entered %s\n", __func__); /* Kick off FIP/FLOGI */ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { printk(KERN_ERR PFX "Init not done yet\n"); return; } lport = ctlr->lp; BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); if (!bnx2fc_link_ok(lport) && interface->enabled) { BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); fcoe_ctlr_link_up(ctlr); fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); } /* wait for the FCF to be selected before issuing FLOGI */ while (!ctlr->sel_fcf) { msleep(250); /* give up after 3 secs */ if (++wait_cnt > 12) break; } /* Reset max receive frame size to default */ if (fc_set_mfs(lport, BNX2FC_MFS)) return; fc_lport_init(lport); fc_fabric_login(lport); } /** * bnx2fc_ulp_init - Initialize an adapter instance * * @dev : cnic device handle * Called from cnic_register_driver() context to initialize all * enumerated cnic devices. This routine allocates adapter structure * and other device specific resources. */ static void bnx2fc_ulp_init(struct cnic_dev *dev) { struct bnx2fc_hba *hba; int rc = 0; BNX2FC_MISC_DBG("Entered %s\n", __func__); /* bnx2fc works only when bnx2x is loaded */ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) || (dev->max_fcoe_conn == 0)) { printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s," " flags: %lx fcoe_conn: %d\n", dev->netdev->name, dev->flags, dev->max_fcoe_conn); return; } hba = bnx2fc_hba_create(dev); if (!hba) { printk(KERN_ERR PFX "hba initialization failed\n"); return; } pr_info(PFX "FCoE initialized for %s.\n", dev->netdev->name); /* Add HBA to the adapter list */ mutex_lock(&bnx2fc_dev_lock); list_add_tail(&hba->list, &adapter_list); adapter_count++; mutex_unlock(&bnx2fc_dev_lock); dev->fcoe_cap = &hba->fcoe_cap; clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); rc = dev->register_device(dev, CNIC_ULP_FCOE, (void *) hba); if (rc) printk(KERN_ERR PFX "register_device failed, rc = %d\n", rc); else set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); } /* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */ static int __bnx2fc_disable(struct fcoe_ctlr *ctlr) { struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); if (interface->enabled) { if (!ctlr->lp) { pr_err(PFX "__bnx2fc_disable: lport not found\n"); return -ENODEV; } else { interface->enabled = false; fcoe_ctlr_link_down(ctlr); fcoe_clean_pending_queue(ctlr->lp); } } return 0; } /* * Deperecated: Use bnx2fc_enabled() */ static int bnx2fc_disable(struct net_device *netdev) { struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; int rc = 0; rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); interface = bnx2fc_interface_lookup(netdev); ctlr = bnx2fc_to_ctlr(interface); if (!interface) { rc = -ENODEV; pr_err(PFX "bnx2fc_disable: interface not found\n"); } else { rc = __bnx2fc_disable(ctlr); } mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return rc; } static uint bnx2fc_npiv_create_vports(struct fc_lport *lport, struct cnic_fc_npiv_tbl *npiv_tbl) { struct fc_vport_identifiers vpid; uint i, created = 0; u64 wwnn = 0; char wwpn_str[32]; char wwnn_str[32]; if (npiv_tbl->count > MAX_NPIV_ENTRIES) { BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n"); goto done; } /* Sanity check the first entry to make sure it's not 0 */ if (wwn_to_u64(npiv_tbl->wwnn[0]) == 0 && wwn_to_u64(npiv_tbl->wwpn[0]) == 0) { BNX2FC_HBA_DBG(lport, "First NPIV table entries invalid.\n"); goto done; } vpid.roles = FC_PORT_ROLE_FCP_INITIATOR; vpid.vport_type = FC_PORTTYPE_NPIV; vpid.disable = false; for (i = 0; i < npiv_tbl->count; i++) { wwnn = wwn_to_u64(npiv_tbl->wwnn[i]); if (wwnn == 0) { /* * If we get a 0 element from for the WWNN then assume * the WWNN should be the same as the physical port. */ wwnn = lport->wwnn; } vpid.node_name = wwnn; vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]); scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name), "NPIV[%u]:%016llx-%016llx", created, vpid.port_name, vpid.node_name); fcoe_wwn_to_str(vpid.node_name, wwnn_str, sizeof(wwnn_str)); fcoe_wwn_to_str(vpid.port_name, wwpn_str, sizeof(wwpn_str)); BNX2FC_HBA_DBG(lport, "Creating vport %s:%s.\n", wwnn_str, wwpn_str); if (fc_vport_create(lport->host, 0, &vpid)) created++; else BNX2FC_HBA_DBG(lport, "Failed to create vport\n"); } done: return created; } static int __bnx2fc_enable(struct fcoe_ctlr *ctlr) { struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); struct bnx2fc_hba *hba; struct cnic_fc_npiv_tbl *npiv_tbl; struct fc_lport *lport; if (!interface->enabled) { if (!ctlr->lp) { pr_err(PFX "__bnx2fc_enable: lport not found\n"); return -ENODEV; } else if (!bnx2fc_link_ok(ctlr->lp)) { fcoe_ctlr_link_up(ctlr); interface->enabled = true; } } /* Create static NPIV ports if any are contained in NVRAM */ hba = interface->hba; lport = ctlr->lp; if (!hba) goto done; if (!hba->cnic) goto done; if (!lport) goto done; if (!lport->host) goto done; if (!hba->cnic->get_fc_npiv_tbl) goto done; npiv_tbl = kzalloc(sizeof(struct cnic_fc_npiv_tbl), GFP_KERNEL); if (!npiv_tbl) goto done; if (hba->cnic->get_fc_npiv_tbl(hba->cnic, npiv_tbl)) goto done_free; bnx2fc_npiv_create_vports(lport, npiv_tbl); done_free: kfree(npiv_tbl); done: return 0; } /* * Deprecated: Use bnx2fc_enabled() */ static int bnx2fc_enable(struct net_device *netdev) { struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; int rc = 0; rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); interface = bnx2fc_interface_lookup(netdev); ctlr = bnx2fc_to_ctlr(interface); if (!interface) { rc = -ENODEV; pr_err(PFX "bnx2fc_enable: interface not found\n"); } else { rc = __bnx2fc_enable(ctlr); } mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return rc; } /** * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller * @cdev: The FCoE Controller that is being enabled or disabled * * fcoe_sysfs will ensure that the state of 'enabled' has * changed, so no checking is necessary here. This routine simply * calls fcoe_enable or fcoe_disable, both of which are deprecated. * When those routines are removed the functionality can be merged * here. */ static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev) { struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); switch (cdev->enabled) { case FCOE_CTLR_ENABLED: return __bnx2fc_enable(ctlr); case FCOE_CTLR_DISABLED: return __bnx2fc_disable(ctlr); case FCOE_CTLR_UNUSED: default: return -ENOTSUPP; } } enum bnx2fc_create_link_state { BNX2FC_CREATE_LINK_DOWN, BNX2FC_CREATE_LINK_UP, }; /** * _bnx2fc_create() - Create bnx2fc FCoE interface * @netdev : The net_device object the Ethernet interface to create on * @fip_mode: The FIP mode for this creation * @link_state: The ctlr link state on creation * * Called from either the libfcoe 'create' module parameter * via fcoe_create or from fcoe_syfs's ctlr_create file. * * libfcoe's 'create' module parameter is deprecated so some * consolidation of code can be done when that interface is * removed. * * Returns: 0 for success */ static int _bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode, enum bnx2fc_create_link_state link_state) { struct fcoe_ctlr_device *cdev; struct fcoe_ctlr *ctlr; struct bnx2fc_interface *interface; struct bnx2fc_hba *hba; struct net_device *phys_dev = netdev; struct fc_lport *lport; struct ethtool_drvinfo drvinfo; int rc = 0; int vlan_id = 0; BNX2FC_MISC_DBG("Entered bnx2fc_create\n"); if (fip_mode != FIP_MODE_FABRIC) { printk(KERN_ERR "fip mode not FABRIC\n"); return -EIO; } rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); if (!try_module_get(THIS_MODULE)) { rc = -EINVAL; goto mod_err; } /* obtain physical netdev */ if (is_vlan_dev(netdev)) phys_dev = vlan_dev_real_dev(netdev); /* verify if the physical device is a netxtreme2 device */ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { memset(&drvinfo, 0, sizeof(drvinfo)); phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); if (strncmp(drvinfo.driver, "bnx2x", strlen("bnx2x"))) { printk(KERN_ERR PFX "Not a netxtreme2 device\n"); rc = -EINVAL; goto netdev_err; } } else { printk(KERN_ERR PFX "unable to obtain drv_info\n"); rc = -EINVAL; goto netdev_err; } /* obtain interface and initialize rest of the structure */ hba = bnx2fc_hba_lookup(phys_dev); if (!hba) { rc = -ENODEV; printk(KERN_ERR PFX "bnx2fc_create: hba not found\n"); goto netdev_err; } if (bnx2fc_interface_lookup(netdev)) { rc = -EEXIST; goto netdev_err; } interface = bnx2fc_interface_create(hba, netdev, fip_mode); if (!interface) { printk(KERN_ERR PFX "bnx2fc_interface_create failed\n"); rc = -ENOMEM; goto netdev_err; } if (is_vlan_dev(netdev)) { vlan_id = vlan_dev_vlan_id(netdev); interface->vlan_enabled = 1; } ctlr = bnx2fc_to_ctlr(interface); cdev = fcoe_ctlr_to_ctlr_dev(ctlr); interface->vlan_id = vlan_id; interface->tm_timeout = BNX2FC_TM_TIMEOUT; interface->timer_work_queue = create_singlethread_workqueue("bnx2fc_timer_wq"); if (!interface->timer_work_queue) { printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); rc = -EINVAL; goto ifput_err; } lport = bnx2fc_if_create(interface, &cdev->dev, 0); if (!lport) { printk(KERN_ERR PFX "Failed to create interface (%s)\n", netdev->name); rc = -EINVAL; goto if_create_err; } /* Add interface to if_list */ list_add_tail(&interface->list, &if_list); lport->boot_time = jiffies; /* Make this master N_port */ ctlr->lp = lport; if (link_state == BNX2FC_CREATE_LINK_UP) cdev->enabled = FCOE_CTLR_ENABLED; else cdev->enabled = FCOE_CTLR_DISABLED; if (link_state == BNX2FC_CREATE_LINK_UP && !bnx2fc_link_ok(lport)) { fcoe_ctlr_link_up(ctlr); fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); } BNX2FC_HBA_DBG(lport, "create: START DISC\n"); bnx2fc_start_disc(interface); if (link_state == BNX2FC_CREATE_LINK_UP) interface->enabled = true; /* * Release from kref_init in bnx2fc_interface_setup, on success * lport should be holding a reference taken in bnx2fc_if_create */ bnx2fc_interface_put(interface); /* put netdev that was held while calling dev_get_by_name */ mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return 0; if_create_err: destroy_workqueue(interface->timer_work_queue); ifput_err: bnx2fc_net_cleanup(interface); bnx2fc_interface_put(interface); goto mod_err; netdev_err: module_put(THIS_MODULE); mod_err: mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return rc; } /** * bnx2fc_create() - Create a bnx2fc interface * @netdev : The net_device object the Ethernet interface to create on * @fip_mode: The FIP mode for this creation * * Called from fcoe transport * * Returns: 0 for success */ static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode) { return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP); } /** * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs * @netdev: The net_device to be used by the allocated FCoE Controller * * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr * in a link_down state. The allows the user an opportunity to configure * the FCoE Controller from sysfs before enabling the FCoE Controller. * * Creating in with this routine starts the FCoE Controller in Fabric * mode. The user can change to VN2VN or another mode before enabling. */ static int bnx2fc_ctlr_alloc(struct net_device *netdev) { return _bnx2fc_create(netdev, FIP_MODE_FABRIC, BNX2FC_CREATE_LINK_DOWN); } /** * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance * * @cnic: Pointer to cnic device instance * **/ static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic) { struct bnx2fc_hba *hba; /* Called with bnx2fc_dev_lock held */ list_for_each_entry(hba, &adapter_list, list) { if (hba->cnic == cnic) return hba; } return NULL; } static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device *netdev) { struct bnx2fc_interface *interface; /* Called with bnx2fc_dev_lock held */ list_for_each_entry(interface, &if_list, list) { if (interface->netdev == netdev) return interface; } return NULL; } static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev) { struct bnx2fc_hba *hba; /* Called with bnx2fc_dev_lock held */ list_for_each_entry(hba, &adapter_list, list) { if (hba->phys_dev == phys_dev) return hba; } printk(KERN_ERR PFX "adapter_lookup: hba NULL\n"); return NULL; } /** * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources * * @dev: cnic device handle */ static void bnx2fc_ulp_exit(struct cnic_dev *dev) { struct bnx2fc_hba *hba; struct bnx2fc_interface *interface, *tmp; BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n"); if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n", dev->netdev->name, dev->flags); return; } mutex_lock(&bnx2fc_dev_lock); hba = bnx2fc_find_hba_for_cnic(dev); if (!hba) { printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n", dev); mutex_unlock(&bnx2fc_dev_lock); return; } list_del_init(&hba->list); adapter_count--; list_for_each_entry_safe(interface, tmp, &if_list, list) /* destroy not called yet, move to quiesced list */ if (interface->hba == hba) __bnx2fc_destroy(interface); mutex_unlock(&bnx2fc_dev_lock); bnx2fc_ulp_stop(hba); /* unregister cnic device */ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); bnx2fc_hba_destroy(hba); } static void bnx2fc_rport_terminate_io(struct fc_rport *rport) { /* This is a no-op */ } /** * bnx2fc_fcoe_reset - Resets the fcoe * * @shost: shost the reset is from * * Returns: always 0 */ static int bnx2fc_fcoe_reset(struct Scsi_Host *shost) { struct fc_lport *lport = shost_priv(shost); fc_lport_reset(lport); return 0; } static bool bnx2fc_match(struct net_device *netdev) { struct net_device *phys_dev = netdev; mutex_lock(&bnx2fc_dev_lock); if (is_vlan_dev(netdev)) phys_dev = vlan_dev_real_dev(netdev); if (bnx2fc_hba_lookup(phys_dev)) { mutex_unlock(&bnx2fc_dev_lock); return true; } mutex_unlock(&bnx2fc_dev_lock); return false; } static struct fcoe_transport bnx2fc_transport = { .name = {"bnx2fc"}, .attached = false, .list = LIST_HEAD_INIT(bnx2fc_transport.list), .alloc = bnx2fc_ctlr_alloc, .match = bnx2fc_match, .create = bnx2fc_create, .destroy = bnx2fc_destroy, .enable = bnx2fc_enable, .disable = bnx2fc_disable, }; /** * bnx2fc_cpu_online - Create a receive thread for an online CPU * * @cpu: cpu index for the online cpu */ static int bnx2fc_cpu_online(unsigned int cpu) { struct bnx2fc_percpu_s *p; struct task_struct *thread; p = &per_cpu(bnx2fc_percpu, cpu); thread = kthread_create_on_node(bnx2fc_percpu_io_thread, (void *)p, cpu_to_node(cpu), "bnx2fc_thread/%d", cpu); if (IS_ERR(thread)) return PTR_ERR(thread); /* bind thread to the cpu */ kthread_bind(thread, cpu); p->iothread = thread; wake_up_process(thread); return 0; } static int bnx2fc_cpu_offline(unsigned int cpu) { struct bnx2fc_percpu_s *p; struct task_struct *thread; struct bnx2fc_work *work, *tmp; BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu); /* Prevent any new work from being queued for this CPU */ p = &per_cpu(bnx2fc_percpu, cpu); spin_lock_bh(&p->fp_work_lock); thread = p->iothread; p->iothread = NULL; /* Free all work in the list */ list_for_each_entry_safe(work, tmp, &p->work_list, list) { list_del_init(&work->list); bnx2fc_process_cq_compl(work->tgt, work->wqe, work->rq_data, work->num_rq, work->task); kfree(work); } spin_unlock_bh(&p->fp_work_lock); if (thread) kthread_stop(thread); return 0; } static int bnx2fc_slave_configure(struct scsi_device *sdev) { if (!bnx2fc_queue_depth) return 0; scsi_change_queue_depth(sdev, bnx2fc_queue_depth); return 0; } static enum cpuhp_state bnx2fc_online_state; /** * bnx2fc_mod_init - module init entry point * * Initialize driver wide global data structures, and register * with cnic module **/ static int __init bnx2fc_mod_init(void) { struct fcoe_percpu_s *bg; struct task_struct *l2_thread; int rc = 0; unsigned int cpu = 0; struct bnx2fc_percpu_s *p; printk(KERN_INFO PFX "%s", version); /* register as a fcoe transport */ rc = fcoe_transport_attach(&bnx2fc_transport); if (rc) { printk(KERN_ERR "failed to register an fcoe transport, check " "if libfcoe is loaded\n"); goto out; } INIT_LIST_HEAD(&adapter_list); INIT_LIST_HEAD(&if_list); mutex_init(&bnx2fc_dev_lock); adapter_count = 0; /* Attach FC transport template */ rc = bnx2fc_attach_transport(); if (rc) goto detach_ft; bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0); if (!bnx2fc_wq) { rc = -ENOMEM; goto release_bt; } bg = &bnx2fc_global; skb_queue_head_init(&bg->fcoe_rx_list); l2_thread = kthread_run(bnx2fc_l2_rcv_thread, (void *)bg, "bnx2fc_l2_thread"); if (IS_ERR(l2_thread)) { rc = PTR_ERR(l2_thread); goto free_wq; } spin_lock_bh(&bg->fcoe_rx_list.lock); bg->kthread = l2_thread; spin_unlock_bh(&bg->fcoe_rx_list.lock); for_each_possible_cpu(cpu) { p = &per_cpu(bnx2fc_percpu, cpu); INIT_LIST_HEAD(&p->work_list); spin_lock_init(&p->fp_work_lock); } rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online", bnx2fc_cpu_online, bnx2fc_cpu_offline); if (rc < 0) goto stop_thread; bnx2fc_online_state = rc; cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); return 0; stop_thread: kthread_stop(l2_thread); free_wq: destroy_workqueue(bnx2fc_wq); release_bt: bnx2fc_release_transport(); detach_ft: fcoe_transport_detach(&bnx2fc_transport); out: return rc; } static void __exit bnx2fc_mod_exit(void) { LIST_HEAD(to_be_deleted); struct bnx2fc_hba *hba, *next; struct fcoe_percpu_s *bg; struct task_struct *l2_thread; struct sk_buff *skb; /* * NOTE: Since cnic calls register_driver routine rtnl_lock, * it will have higher precedence than bnx2fc_dev_lock. * unregister_device() cannot be called with bnx2fc_dev_lock * held. */ mutex_lock(&bnx2fc_dev_lock); list_splice_init(&adapter_list, &to_be_deleted); adapter_count = 0; mutex_unlock(&bnx2fc_dev_lock); /* Unregister with cnic */ list_for_each_entry_safe(hba, next, &to_be_deleted, list) { list_del_init(&hba->list); printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p\n", hba); bnx2fc_ulp_stop(hba); /* unregister cnic device */ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); bnx2fc_hba_destroy(hba); } cnic_unregister_driver(CNIC_ULP_FCOE); /* Destroy global thread */ bg = &bnx2fc_global; spin_lock_bh(&bg->fcoe_rx_list.lock); l2_thread = bg->kthread; bg->kthread = NULL; while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) kfree_skb(skb); spin_unlock_bh(&bg->fcoe_rx_list.lock); if (l2_thread) kthread_stop(l2_thread); cpuhp_remove_state(bnx2fc_online_state); destroy_workqueue(bnx2fc_wq); /* * detach from scsi transport * must happen after all destroys are done */ bnx2fc_release_transport(); /* detach from fcoe transport */ fcoe_transport_detach(&bnx2fc_transport); } module_init(bnx2fc_mod_init); module_exit(bnx2fc_mod_exit); static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = { .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled, .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb, .get_fcoe_fcf_selected = fcoe_fcf_get_selected, .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id, }; static struct fc_function_template bnx2fc_transport_function = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_active_fc4s = 1, .show_host_maxframe_size = 1, .show_host_port_id = 1, .show_host_supported_speeds = 1, .get_host_speed = fc_get_host_speed, .show_host_speed = 1, .show_host_port_type = 1, .get_host_port_state = fc_get_host_port_state, .show_host_port_state = 1, .show_host_symbolic_name = 1, .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + sizeof(struct bnx2fc_rport)), .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_host_fabric_name = 1, .show_starget_node_name = 1, .show_starget_port_name = 1, .show_starget_port_id = 1, .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_fc_host_stats = bnx2fc_get_host_stats, .issue_fc_host_lip = bnx2fc_fcoe_reset, .terminate_rport_io = bnx2fc_rport_terminate_io, .vport_create = bnx2fc_vport_create, .vport_delete = bnx2fc_vport_destroy, .vport_disable = bnx2fc_vport_disable, .bsg_request = fc_lport_bsg_request, }; static struct fc_function_template bnx2fc_vport_xport_function = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_active_fc4s = 1, .show_host_maxframe_size = 1, .show_host_port_id = 1, .show_host_supported_speeds = 1, .get_host_speed = fc_get_host_speed, .show_host_speed = 1, .show_host_port_type = 1, .get_host_port_state = fc_get_host_port_state, .show_host_port_state = 1, .show_host_symbolic_name = 1, .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + sizeof(struct bnx2fc_rport)), .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_host_fabric_name = 1, .show_starget_node_name = 1, .show_starget_port_name = 1, .show_starget_port_id = 1, .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_fc_host_stats = fc_get_host_stats, .issue_fc_host_lip = bnx2fc_fcoe_reset, .terminate_rport_io = fc_rport_terminate_io, .bsg_request = fc_lport_bsg_request, }; /* * Additional scsi_host attributes. */ static ssize_t bnx2fc_tm_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct fc_lport *lport = shost_priv(shost); struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; sprintf(buf, "%u\n", interface->tm_timeout); return strlen(buf); } static ssize_t bnx2fc_tm_timeout_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct fc_lport *lport = shost_priv(shost); struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; int rval, val; rval = kstrtouint(buf, 10, &val); if (rval) return rval; if (val > 255) return -ERANGE; interface->tm_timeout = (u8)val; return strlen(buf); } static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show, bnx2fc_tm_timeout_store); static struct attribute *bnx2fc_host_attrs[] = { &dev_attr_tm_timeout.attr, NULL, }; ATTRIBUTE_GROUPS(bnx2fc_host); /* * scsi_host_template structure used while registering with SCSI-ml */ static struct scsi_host_template bnx2fc_shost_template = { .module = THIS_MODULE, .name = "QLogic Offload FCoE Initiator", .queuecommand = bnx2fc_queuecommand, .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = bnx2fc_eh_abort, /* abts */ .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */ .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */ .eh_host_reset_handler = fc_eh_host_reset, .slave_alloc = fc_slave_alloc, .change_queue_depth = scsi_change_queue_depth, .this_id = -1, .cmd_per_lun = 3, .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, .dma_boundary = 0x7fff, .max_sectors = 0x3fbf, .track_queue_depth = 1, .slave_configure = bnx2fc_slave_configure, .shost_groups = bnx2fc_host_groups, .cmd_size = sizeof(struct bnx2fc_priv), }; static struct libfc_function_template bnx2fc_libfc_fcn_templ = { .frame_send = bnx2fc_xmit, .elsct_send = bnx2fc_elsct_send, .fcp_abort_io = bnx2fc_abort_io, .fcp_cleanup = bnx2fc_cleanup, .get_lesb = fcoe_get_lesb, .rport_event_callback = bnx2fc_rport_event_handler, }; /* * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface * structure carrying callback function pointers */ static struct cnic_ulp_ops bnx2fc_cnic_cb = { .owner = THIS_MODULE, .cnic_init = bnx2fc_ulp_init, .cnic_exit = bnx2fc_ulp_exit, .cnic_start = bnx2fc_ulp_start, .cnic_stop = bnx2fc_ulp_stop, .indicate_kcqes = bnx2fc_indicate_kcqe, .indicate_netevent = bnx2fc_indicate_netevent, .cnic_get_stats = bnx2fc_ulp_get_stats, };
linux-master
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
// SPDX-License-Identifier: GPL-2.0-or-later /* ------------------------------------------------------------ * ibmvscsi.c * (C) Copyright IBM Corporation 1994, 2004 * Authors: Colin DeVilbiss ([email protected]) * Santiago Leon ([email protected]) * Dave Boutcher ([email protected]) * * ------------------------------------------------------------ * Emulation of a SCSI host adapter for Virtual I/O devices * * This driver supports the SCSI adapter implemented by the IBM * Power5 firmware. That SCSI adapter is not a physical adapter, * but allows Linux SCSI peripheral drivers to directly * access devices in another logical partition on the physical system. * * The virtual adapter(s) are present in the open firmware device * tree just like real adapters. * * One of the capabilities provided on these systems is the ability * to DMA between partitions. The architecture states that for VSCSI, * the server side is allowed to DMA to and from the client. The client * is never trusted to DMA to or from the server directly. * * Messages are sent between partitions on a "Command/Response Queue" * (CRQ), which is just a buffer of 16 byte entries in the receiver's * Senders cannot access the buffer directly, but send messages by * making a hypervisor call and passing in the 16 bytes. The hypervisor * puts the message in the next 16 byte space in round-robin fashion, * turns on the high order bit of the message (the valid bit), and * generates an interrupt to the receiver (if interrupts are turned on.) * The receiver just turns off the valid bit when they have copied out * the message. * * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit * (IU) (as defined in the T10 standard available at www.t10.org), gets * a DMA address for the message, and sends it to the server as the * payload of a CRQ message. The server DMAs the SRP IU and processes it, * including doing any additional data transfers. When it is done, it * DMAs the SRP response back to the same address as the request came from, * and sends a CRQ message back to inform the client that the request has * completed. * * TODO: This is currently pretty tied to the IBM pSeries hypervisor * interfaces. It would be really nice to abstract this above an RDMA * layer. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/pm.h> #include <linux/kthread.h> #include <asm/firmware.h> #include <asm/vio.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport_srp.h> #include "ibmvscsi.h" /* The values below are somewhat arbitrary default values, but * OS/400 will use 3 busses (disks, CDs, tapes, I think.) * Note that there are 3 bits of channel value, 6 bits of id, and * 5 bits of LUN. */ static int max_id = 64; static int max_channel = 3; static int init_timeout = 300; static int login_timeout = 60; static int info_timeout = 30; static int abort_timeout = 60; static int reset_timeout = 60; static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; static int fast_fail = 1; static int client_reserve = 1; static char partition_name[96] = "UNKNOWN"; static unsigned int partition_number = -1; static LIST_HEAD(ibmvscsi_head); static DEFINE_SPINLOCK(ibmvscsi_driver_lock); static struct scsi_transport_template *ibmvscsi_transport_template; #define IBMVSCSI_VERSION "1.5.9" MODULE_DESCRIPTION("IBM Virtual SCSI"); MODULE_AUTHOR("Dave Boutcher"); MODULE_LICENSE("GPL"); MODULE_VERSION(IBMVSCSI_VERSION); module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(max_id, "Largest ID value for each channel [Default=64]"); module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(max_channel, "Largest channel value [Default=3]"); module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); module_param_named(max_requests, max_requests, int, S_IRUGO); MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]"); module_param_named(client_reserve, client_reserve, int, S_IRUGO ); MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release"); static void ibmvscsi_handle_crq(struct viosrp_crq *crq, struct ibmvscsi_host_data *hostdata); /* ------------------------------------------------------------ * Routines for managing the command/response queue */ /** * ibmvscsi_handle_event: - Interrupt handler for crq events * @irq: number of irq to handle, not used * @dev_instance: ibmvscsi_host_data of host that received interrupt * * Disables interrupts and schedules srp_task * Always returns IRQ_HANDLED */ static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance) { struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)dev_instance; vio_disable_interrupts(to_vio_dev(hostdata->dev)); tasklet_schedule(&hostdata->srp_task); return IRQ_HANDLED; } /** * ibmvscsi_release_crq_queue() - Deallocates data and unregisters CRQ * @queue: crq_queue to initialize and register * @hostdata: ibmvscsi_host_data of host * @max_requests: maximum requests (unused) * * Frees irq, deallocates a page for messages, unmaps dma, and unregisters * the crq with the hypervisor. */ static void ibmvscsi_release_crq_queue(struct crq_queue *queue, struct ibmvscsi_host_data *hostdata, int max_requests) { long rc = 0; struct vio_dev *vdev = to_vio_dev(hostdata->dev); free_irq(vdev->irq, (void *)hostdata); tasklet_kill(&hostdata->srp_task); do { if (rc) msleep(100); rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); dma_unmap_single(hostdata->dev, queue->msg_token, queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); free_page((unsigned long)queue->msgs); } /** * crq_queue_next_crq: - Returns the next entry in message queue * @queue: crq_queue to use * * Returns pointer to next entry in queue, or NULL if there are no new * entried in the CRQ. */ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) { struct viosrp_crq *crq; unsigned long flags; spin_lock_irqsave(&queue->lock, flags); crq = &queue->msgs[queue->cur]; if (crq->valid != VIOSRP_CRQ_FREE) { if (++queue->cur == queue->size) queue->cur = 0; /* Ensure the read of the valid bit occurs before reading any * other bits of the CRQ entry */ rmb(); } else crq = NULL; spin_unlock_irqrestore(&queue->lock, flags); return crq; } /** * ibmvscsi_send_crq: - Send a CRQ * @hostdata: the adapter * @word1: the first 64 bits of the data * @word2: the second 64 bits of the data */ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, u64 word1, u64 word2) { struct vio_dev *vdev = to_vio_dev(hostdata->dev); /* * Ensure the command buffer is flushed to memory before handing it * over to the VIOS to prevent it from fetching any stale data. */ mb(); return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); } /** * ibmvscsi_task: - Process srps asynchronously * @data: ibmvscsi_host_data of host */ static void ibmvscsi_task(void *data) { struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data; struct vio_dev *vdev = to_vio_dev(hostdata->dev); struct viosrp_crq *crq; int done = 0; while (!done) { /* Pull all the valid messages off the CRQ */ while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) { ibmvscsi_handle_crq(crq, hostdata); crq->valid = VIOSRP_CRQ_FREE; wmb(); } vio_enable_interrupts(vdev); crq = crq_queue_next_crq(&hostdata->queue); if (crq != NULL) { vio_disable_interrupts(vdev); ibmvscsi_handle_crq(crq, hostdata); crq->valid = VIOSRP_CRQ_FREE; wmb(); } else { done = 1; } } } static void gather_partition_info(void) { const char *ppartition_name; const __be32 *p_number_ptr; /* Retrieve information about this partition */ if (!of_root) return; of_node_get(of_root); ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL); if (ppartition_name) strscpy(partition_name, ppartition_name, sizeof(partition_name)); p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL); if (p_number_ptr) partition_number = of_read_number(p_number_ptr, 1); of_node_put(of_root); } static void set_adapter_info(struct ibmvscsi_host_data *hostdata) { memset(&hostdata->madapter_info, 0x00, sizeof(hostdata->madapter_info)); dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION); strcpy(hostdata->madapter_info.srp_version, SRP_VERSION); strncpy(hostdata->madapter_info.partition_name, partition_name, sizeof(hostdata->madapter_info.partition_name)); hostdata->madapter_info.partition_number = cpu_to_be32(partition_number); hostdata->madapter_info.mad_version = cpu_to_be32(SRP_MAD_VERSION_1); hostdata->madapter_info.os_type = cpu_to_be32(SRP_MAD_OS_LINUX); } /** * ibmvscsi_reset_crq_queue() - resets a crq after a failure * @queue: crq_queue to initialize and register * @hostdata: ibmvscsi_host_data of host */ static int ibmvscsi_reset_crq_queue(struct crq_queue *queue, struct ibmvscsi_host_data *hostdata) { int rc = 0; struct vio_dev *vdev = to_vio_dev(hostdata->dev); /* Close the CRQ */ do { if (rc) msleep(100); rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); /* Clean out the queue */ memset(queue->msgs, 0x00, PAGE_SIZE); queue->cur = 0; set_adapter_info(hostdata); /* And re-open it again */ rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, queue->msg_token, PAGE_SIZE); if (rc == H_CLOSED) { /* Adapter is good, but other end is not ready */ dev_warn(hostdata->dev, "Partner adapter not ready\n"); } else if (rc != 0) { dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc); } return rc; } /** * ibmvscsi_init_crq_queue() - Initializes and registers CRQ with hypervisor * @queue: crq_queue to initialize and register * @hostdata: ibmvscsi_host_data of host * @max_requests: maximum requests (unused) * * Allocates a page for messages, maps it for dma, and registers * the crq with the hypervisor. * Returns zero on success. */ static int ibmvscsi_init_crq_queue(struct crq_queue *queue, struct ibmvscsi_host_data *hostdata, int max_requests) { int rc; int retrc; struct vio_dev *vdev = to_vio_dev(hostdata->dev); queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); if (!queue->msgs) goto malloc_failed; queue->size = PAGE_SIZE / sizeof(*queue->msgs); queue->msg_token = dma_map_single(hostdata->dev, queue->msgs, queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); if (dma_mapping_error(hostdata->dev, queue->msg_token)) goto map_failed; gather_partition_info(); set_adapter_info(hostdata); retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, queue->msg_token, PAGE_SIZE); if (rc == H_RESOURCE) /* maybe kexecing and resource is busy. try a reset */ rc = ibmvscsi_reset_crq_queue(queue, hostdata); if (rc == H_CLOSED) { /* Adapter is good, but other end is not ready */ dev_warn(hostdata->dev, "Partner adapter not ready\n"); retrc = 0; } else if (rc != 0) { dev_warn(hostdata->dev, "Error %d opening adapter\n", rc); goto reg_crq_failed; } queue->cur = 0; spin_lock_init(&queue->lock); tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task, (unsigned long)hostdata); if (request_irq(vdev->irq, ibmvscsi_handle_event, 0, "ibmvscsi", (void *)hostdata) != 0) { dev_err(hostdata->dev, "couldn't register irq 0x%x\n", vdev->irq); goto req_irq_failed; } rc = vio_enable_interrupts(vdev); if (rc != 0) { dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc); goto req_irq_failed; } return retrc; req_irq_failed: tasklet_kill(&hostdata->srp_task); rc = 0; do { if (rc) msleep(100); rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); reg_crq_failed: dma_unmap_single(hostdata->dev, queue->msg_token, queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); map_failed: free_page((unsigned long)queue->msgs); malloc_failed: return -1; } /** * ibmvscsi_reenable_crq_queue() - reenables a crq after * @queue: crq_queue to initialize and register * @hostdata: ibmvscsi_host_data of host */ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, struct ibmvscsi_host_data *hostdata) { int rc = 0; struct vio_dev *vdev = to_vio_dev(hostdata->dev); set_adapter_info(hostdata); /* Re-enable the CRQ */ do { if (rc) msleep(100); rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); if (rc) dev_err(hostdata->dev, "Error %d enabling adapter\n", rc); return rc; } /* ------------------------------------------------------------ * Routines for the event pool and event structs */ /** * initialize_event_pool: - Allocates and initializes the event pool for a host * @pool: event_pool to be initialized * @size: Number of events in pool * @hostdata: ibmvscsi_host_data who owns the event pool * * Returns zero on success. */ static int initialize_event_pool(struct event_pool *pool, int size, struct ibmvscsi_host_data *hostdata) { int i; pool->size = size; pool->next = 0; pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); if (!pool->events) return -ENOMEM; pool->iu_storage = dma_alloc_coherent(hostdata->dev, pool->size * sizeof(*pool->iu_storage), &pool->iu_token, GFP_KERNEL); if (!pool->iu_storage) { kfree(pool->events); return -ENOMEM; } for (i = 0; i < pool->size; ++i) { struct srp_event_struct *evt = &pool->events[i]; memset(&evt->crq, 0x00, sizeof(evt->crq)); atomic_set(&evt->free, 1); evt->crq.valid = VIOSRP_CRQ_CMD_RSP; evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu)); evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token + sizeof(*evt->xfer_iu) * i); evt->xfer_iu = pool->iu_storage + i; evt->hostdata = hostdata; evt->ext_list = NULL; evt->ext_list_token = 0; } return 0; } /** * release_event_pool() - Frees memory of an event pool of a host * @pool: event_pool to be released * @hostdata: ibmvscsi_host_data who owns the even pool * * Returns zero on success. */ static void release_event_pool(struct event_pool *pool, struct ibmvscsi_host_data *hostdata) { int i, in_use = 0; for (i = 0; i < pool->size; ++i) { if (atomic_read(&pool->events[i].free) != 1) ++in_use; if (pool->events[i].ext_list) { dma_free_coherent(hostdata->dev, SG_ALL * sizeof(struct srp_direct_buf), pool->events[i].ext_list, pool->events[i].ext_list_token); } } if (in_use) dev_warn(hostdata->dev, "releasing event pool with %d " "events still in use?\n", in_use); kfree(pool->events); dma_free_coherent(hostdata->dev, pool->size * sizeof(*pool->iu_storage), pool->iu_storage, pool->iu_token); } /** * valid_event_struct: - Determines if event is valid. * @pool: event_pool that contains the event * @evt: srp_event_struct to be checked for validity * * Returns zero if event is invalid, one otherwise. */ static int valid_event_struct(struct event_pool *pool, struct srp_event_struct *evt) { int index = evt - pool->events; if (index < 0 || index >= pool->size) /* outside of bounds */ return 0; if (evt != pool->events + index) /* unaligned */ return 0; return 1; } /** * free_event_struct() - Changes status of event to "free" * @pool: event_pool that contains the event * @evt: srp_event_struct to be modified */ static void free_event_struct(struct event_pool *pool, struct srp_event_struct *evt) { if (!valid_event_struct(pool, evt)) { dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p " "(not in pool %p)\n", evt, pool->events); return; } if (atomic_inc_return(&evt->free) != 1) { dev_err(evt->hostdata->dev, "Freeing event_struct %p " "which is not in use!\n", evt); return; } } /** * get_event_struct() - Gets the next free event in pool * @pool: event_pool that contains the events to be searched * * Returns the next event in "free" state, and NULL if none are free. * Note that no synchronization is done here, we assume the host_lock * will syncrhonze things. */ static struct srp_event_struct *get_event_struct(struct event_pool *pool) { int i; int poolsize = pool->size; int offset = pool->next; for (i = 0; i < poolsize; i++) { offset = (offset + 1) % poolsize; if (!atomic_dec_if_positive(&pool->events[offset].free)) { pool->next = offset; return &pool->events[offset]; } } printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n"); return NULL; } /** * init_event_struct: Initialize fields in an event struct that are always * required. * @evt_struct: The event * @done: Routine to call when the event is responded to * @format: SRP or MAD format * @timeout: timeout value set in the CRQ */ static void init_event_struct(struct srp_event_struct *evt_struct, void (*done) (struct srp_event_struct *), u8 format, int timeout) { evt_struct->cmnd = NULL; evt_struct->cmnd_done = NULL; evt_struct->sync_srp = NULL; evt_struct->crq.format = format; evt_struct->crq.timeout = cpu_to_be16(timeout); evt_struct->done = done; } /* ------------------------------------------------------------ * Routines for receiving SCSI responses from the hosting partition */ /* * set_srp_direction: Set the fields in the srp related to data * direction and number of buffers based on the direction in * the scsi_cmnd and the number of buffers */ static void set_srp_direction(struct scsi_cmnd *cmd, struct srp_cmd *srp_cmd, int numbuf) { u8 fmt; if (numbuf == 0) return; if (numbuf == 1) fmt = SRP_DATA_DESC_DIRECT; else { fmt = SRP_DATA_DESC_INDIRECT; numbuf = min(numbuf, MAX_INDIRECT_BUFS); if (cmd->sc_data_direction == DMA_TO_DEVICE) srp_cmd->data_out_desc_cnt = numbuf; else srp_cmd->data_in_desc_cnt = numbuf; } if (cmd->sc_data_direction == DMA_TO_DEVICE) srp_cmd->buf_fmt = fmt << 4; else srp_cmd->buf_fmt = fmt; } /** * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format * @cmd: srp_cmd whose additional_data member will be unmapped * @evt_struct: the event * @dev: device for which the memory is mapped */ static void unmap_cmd_data(struct srp_cmd *cmd, struct srp_event_struct *evt_struct, struct device *dev) { u8 out_fmt, in_fmt; out_fmt = cmd->buf_fmt >> 4; in_fmt = cmd->buf_fmt & ((1U << 4) - 1); if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) return; if (evt_struct->cmnd) scsi_dma_unmap(evt_struct->cmnd); } static int map_sg_list(struct scsi_cmnd *cmd, int nseg, struct srp_direct_buf *md) { int i; struct scatterlist *sg; u64 total_length = 0; scsi_for_each_sg(cmd, sg, nseg, i) { struct srp_direct_buf *descr = md + i; descr->va = cpu_to_be64(sg_dma_address(sg)); descr->len = cpu_to_be32(sg_dma_len(sg)); descr->key = 0; total_length += sg_dma_len(sg); } return total_length; } /** * map_sg_data: - Maps dma for a scatterlist and initializes descriptor fields * @cmd: struct scsi_cmnd with the scatterlist * @evt_struct: struct srp_event_struct to map * @srp_cmd: srp_cmd that contains the memory descriptor * @dev: device for which to map dma memory * * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. * Returns 1 on success. */ static int map_sg_data(struct scsi_cmnd *cmd, struct srp_event_struct *evt_struct, struct srp_cmd *srp_cmd, struct device *dev) { int sg_mapped; u64 total_length = 0; struct srp_direct_buf *data = (struct srp_direct_buf *) srp_cmd->add_data; struct srp_indirect_buf *indirect = (struct srp_indirect_buf *) data; sg_mapped = scsi_dma_map(cmd); if (!sg_mapped) return 1; else if (sg_mapped < 0) return 0; set_srp_direction(cmd, srp_cmd, sg_mapped); /* special case; we can use a single direct descriptor */ if (sg_mapped == 1) { map_sg_list(cmd, sg_mapped, data); return 1; } indirect->table_desc.va = 0; indirect->table_desc.len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf)); indirect->table_desc.key = 0; if (sg_mapped <= MAX_INDIRECT_BUFS) { total_length = map_sg_list(cmd, sg_mapped, &indirect->desc_list[0]); indirect->len = cpu_to_be32(total_length); return 1; } /* get indirect table */ if (!evt_struct->ext_list) { evt_struct->ext_list = dma_alloc_coherent(dev, SG_ALL * sizeof(struct srp_direct_buf), &evt_struct->ext_list_token, 0); if (!evt_struct->ext_list) { if (!firmware_has_feature(FW_FEATURE_CMO)) sdev_printk(KERN_ERR, cmd->device, "Can't allocate memory " "for indirect table\n"); scsi_dma_unmap(cmd); return 0; } } total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list); indirect->len = cpu_to_be32(total_length); indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token); indirect->table_desc.len = cpu_to_be32(sg_mapped * sizeof(indirect->desc_list[0])); memcpy(indirect->desc_list, evt_struct->ext_list, MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); return 1; } /** * map_data_for_srp_cmd: - Calls functions to map data for srp cmds * @cmd: struct scsi_cmnd with the memory to be mapped * @evt_struct: struct srp_event_struct to map * @srp_cmd: srp_cmd that contains the memory descriptor * @dev: dma device for which to map dma memory * * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds * Returns 1 on success. */ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, struct srp_event_struct *evt_struct, struct srp_cmd *srp_cmd, struct device *dev) { switch (cmd->sc_data_direction) { case DMA_FROM_DEVICE: case DMA_TO_DEVICE: break; case DMA_NONE: return 1; case DMA_BIDIRECTIONAL: sdev_printk(KERN_ERR, cmd->device, "Can't map DMA_BIDIRECTIONAL to read/write\n"); return 0; default: sdev_printk(KERN_ERR, cmd->device, "Unknown data direction 0x%02x; can't map!\n", cmd->sc_data_direction); return 0; } return map_sg_data(cmd, evt_struct, srp_cmd, dev); } /** * purge_requests: Our virtual adapter just shut down. purge any sent requests * @hostdata: the adapter * @error_code: error code to return as the 'result' */ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) { struct srp_event_struct *evt; unsigned long flags; spin_lock_irqsave(hostdata->host->host_lock, flags); while (!list_empty(&hostdata->sent)) { evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list); list_del(&evt->list); del_timer(&evt->timer); spin_unlock_irqrestore(hostdata->host->host_lock, flags); if (evt->cmnd) { evt->cmnd->result = (error_code << 16); unmap_cmd_data(&evt->iu.srp.cmd, evt, evt->hostdata->dev); if (evt->cmnd_done) evt->cmnd_done(evt->cmnd); } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT && evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ) evt->done(evt); free_event_struct(&evt->hostdata->pool, evt); spin_lock_irqsave(hostdata->host->host_lock, flags); } spin_unlock_irqrestore(hostdata->host->host_lock, flags); } /** * ibmvscsi_set_request_limit - Set the adapter request_limit in response to * an adapter failure, reset, or SRP Login. Done under host lock to prevent * race with SCSI command submission. * @hostdata: adapter to adjust * @limit: new request limit */ static void ibmvscsi_set_request_limit(struct ibmvscsi_host_data *hostdata, int limit) { unsigned long flags; spin_lock_irqsave(hostdata->host->host_lock, flags); atomic_set(&hostdata->request_limit, limit); spin_unlock_irqrestore(hostdata->host->host_lock, flags); } /** * ibmvscsi_reset_host - Reset the connection to the server * @hostdata: struct ibmvscsi_host_data to reset */ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata) { scsi_block_requests(hostdata->host); ibmvscsi_set_request_limit(hostdata, 0); purge_requests(hostdata, DID_ERROR); hostdata->action = IBMVSCSI_HOST_ACTION_RESET; wake_up(&hostdata->work_wait_q); } /** * ibmvscsi_timeout - Internal command timeout handler * @t: struct srp_event_struct that timed out * * Called when an internally generated command times out */ static void ibmvscsi_timeout(struct timer_list *t) { struct srp_event_struct *evt_struct = from_timer(evt_struct, t, timer); struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n", evt_struct->iu.srp.cmd.opcode); ibmvscsi_reset_host(hostdata); } /* ------------------------------------------------------------ * Routines for sending and receiving SRPs */ /** * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq() * @evt_struct: evt_struct to be sent * @hostdata: ibmvscsi_host_data of host * @timeout: timeout in seconds - 0 means do not time command * * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) * Note that this routine assumes that host_lock is held for synchronization */ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, struct ibmvscsi_host_data *hostdata, unsigned long timeout) { __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq; int request_status = 0; int rc; int srp_req = 0; /* If we have exhausted our request limit, just fail this request, * unless it is for a reset or abort. * Note that there are rare cases involving driver generated requests * (such as task management requests) that the mid layer may think we * can handle more requests (can_queue) when we actually can't */ if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) { srp_req = 1; request_status = atomic_dec_if_positive(&hostdata->request_limit); /* If request limit was -1 when we started, it is now even * less than that */ if (request_status < -1) goto send_error; /* Otherwise, we may have run out of requests. */ /* If request limit was 0 when we started the adapter is in the * process of performing a login with the server adapter, or * we may have run out of requests. */ else if (request_status == -1 && evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ) goto send_busy; /* Abort and reset calls should make it through. * Nothing except abort and reset should use the last two * slots unless we had two or less to begin with. */ else if (request_status < 2 && evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) { /* In the case that we have less than two requests * available, check the server limit as a combination * of the request limit and the number of requests * in-flight (the size of the send list). If the * server limit is greater than 2, return busy so * that the last two are reserved for reset and abort. */ int server_limit = request_status; struct srp_event_struct *tmp_evt; list_for_each_entry(tmp_evt, &hostdata->sent, list) { server_limit++; } if (server_limit > 2) goto send_busy; } } /* Copy the IU into the transfer area */ *evt_struct->xfer_iu = evt_struct->iu; evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct; /* Add this to the sent list. We need to do this * before we actually send * in case it comes back REALLY fast */ list_add_tail(&evt_struct->list, &hostdata->sent); timer_setup(&evt_struct->timer, ibmvscsi_timeout, 0); if (timeout) { evt_struct->timer.expires = jiffies + (timeout * HZ); add_timer(&evt_struct->timer); } rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1])); if (rc != 0) { list_del(&evt_struct->list); del_timer(&evt_struct->timer); /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. * Firmware will send a CRQ with a transport event (0xFF) to * tell this client what has happened to the transport. This * will be handled in ibmvscsi_handle_crq() */ if (rc == H_CLOSED) { dev_warn(hostdata->dev, "send warning. " "Receive queue closed, will retry.\n"); goto send_busy; } dev_err(hostdata->dev, "send error %d\n", rc); if (srp_req) atomic_inc(&hostdata->request_limit); goto send_error; } return 0; send_busy: unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); free_event_struct(&hostdata->pool, evt_struct); if (srp_req && request_status != -1) atomic_inc(&hostdata->request_limit); return SCSI_MLQUEUE_HOST_BUSY; send_error: unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); if (evt_struct->cmnd != NULL) { evt_struct->cmnd->result = DID_ERROR << 16; evt_struct->cmnd_done(evt_struct->cmnd); } else if (evt_struct->done) evt_struct->done(evt_struct); free_event_struct(&hostdata->pool, evt_struct); return 0; } /** * handle_cmd_rsp: - Handle responses from commands * @evt_struct: srp_event_struct to be handled * * Used as a callback by when sending scsi cmds. * Gets called by ibmvscsi_handle_crq() */ static void handle_cmd_rsp(struct srp_event_struct *evt_struct) { struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; struct scsi_cmnd *cmnd = evt_struct->cmnd; if (unlikely(rsp->opcode != SRP_RSP)) { if (printk_ratelimit()) dev_warn(evt_struct->hostdata->dev, "bad SRP RSP type %#02x\n", rsp->opcode); } if (cmnd) { cmnd->result |= rsp->status; if (scsi_status_is_check_condition(cmnd->result)) memcpy(cmnd->sense_buffer, rsp->data, be32_to_cpu(rsp->sense_data_len)); unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, evt_struct->hostdata->dev); if (rsp->flags & SRP_RSP_FLAG_DOOVER) scsi_set_resid(cmnd, be32_to_cpu(rsp->data_out_res_cnt)); else if (rsp->flags & SRP_RSP_FLAG_DIOVER) scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt)); } if (evt_struct->cmnd_done) evt_struct->cmnd_done(cmnd); } /** * lun_from_dev: - Returns the lun of the scsi device * @dev: struct scsi_device * */ static inline u16 lun_from_dev(struct scsi_device *dev) { return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun; } /** * ibmvscsi_queuecommand_lck() - The queuecommand function of the scsi template * @cmnd: struct scsi_cmnd to be executed * @done: Callback function to be called when cmd is completed */ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd) { void (*done)(struct scsi_cmnd *) = scsi_done; struct srp_cmd *srp_cmd; struct srp_event_struct *evt_struct; struct srp_indirect_buf *indirect; struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host); u16 lun = lun_from_dev(cmnd->device); u8 out_fmt, in_fmt; cmnd->result = (DID_OK << 16); evt_struct = get_event_struct(&hostdata->pool); if (!evt_struct) return SCSI_MLQUEUE_HOST_BUSY; /* Set up the actual SRP IU */ BUILD_BUG_ON(sizeof(evt_struct->iu.srp) != SRP_MAX_IU_LEN); memset(&evt_struct->iu.srp, 0x00, sizeof(evt_struct->iu.srp)); srp_cmd = &evt_struct->iu.srp.cmd; srp_cmd->opcode = SRP_CMD; memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb)); int_to_scsilun(lun, &srp_cmd->lun); if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { if (!firmware_has_feature(FW_FEATURE_CMO)) sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n"); free_event_struct(&hostdata->pool, evt_struct); return SCSI_MLQUEUE_HOST_BUSY; } init_event_struct(evt_struct, handle_cmd_rsp, VIOSRP_SRP_FORMAT, scsi_cmd_to_rq(cmnd)->timeout / HZ); evt_struct->cmnd = cmnd; evt_struct->cmnd_done = done; /* Fix up dma address of the buffer itself */ indirect = (struct srp_indirect_buf *) srp_cmd->add_data; out_fmt = srp_cmd->buf_fmt >> 4; in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1); if ((in_fmt == SRP_DATA_DESC_INDIRECT || out_fmt == SRP_DATA_DESC_INDIRECT) && indirect->table_desc.va == 0) { indirect->table_desc.va = cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) + offsetof(struct srp_cmd, add_data) + offsetof(struct srp_indirect_buf, desc_list)); } return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); } static DEF_SCSI_QCMD(ibmvscsi_queuecommand) /* ------------------------------------------------------------ * Routines for driver initialization */ /** * map_persist_bufs: - Pre-map persistent data for adapter logins * @hostdata: ibmvscsi_host_data of host * * Map the capabilities and adapter info DMA buffers to avoid runtime failures. * Return 1 on error, 0 on success. */ static int map_persist_bufs(struct ibmvscsi_host_data *hostdata) { hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps, sizeof(hostdata->caps), DMA_BIDIRECTIONAL); if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) { dev_err(hostdata->dev, "Unable to map capabilities buffer!\n"); return 1; } hostdata->adapter_info_addr = dma_map_single(hostdata->dev, &hostdata->madapter_info, sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL); if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) { dev_err(hostdata->dev, "Unable to map adapter info buffer!\n"); dma_unmap_single(hostdata->dev, hostdata->caps_addr, sizeof(hostdata->caps), DMA_BIDIRECTIONAL); return 1; } return 0; } /** * unmap_persist_bufs: - Unmap persistent data needed for adapter logins * @hostdata: ibmvscsi_host_data of host * * Unmap the capabilities and adapter info DMA buffers */ static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata) { dma_unmap_single(hostdata->dev, hostdata->caps_addr, sizeof(hostdata->caps), DMA_BIDIRECTIONAL); dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr, sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL); } /** * login_rsp: - Handle response to SRP login request * @evt_struct: srp_event_struct with the response * * Used as a "done" callback by when sending srp_login. Gets called * by ibmvscsi_handle_crq() */ static void login_rsp(struct srp_event_struct *evt_struct) { struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; switch (evt_struct->xfer_iu->srp.login_rsp.opcode) { case SRP_LOGIN_RSP: /* it worked! */ break; case SRP_LOGIN_REJ: /* refused! */ dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n", evt_struct->xfer_iu->srp.login_rej.reason); /* Login failed. */ ibmvscsi_set_request_limit(hostdata, -1); return; default: dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n", evt_struct->xfer_iu->srp.login_rsp.opcode); /* Login failed. */ ibmvscsi_set_request_limit(hostdata, -1); return; } dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); hostdata->client_migrated = 0; /* Now we know what the real request-limit is. * This value is set rather than added to request_limit because * request_limit could have been set to -1 by this client. */ ibmvscsi_set_request_limit(hostdata, be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta)); /* If we had any pending I/Os, kick them */ hostdata->action = IBMVSCSI_HOST_ACTION_UNBLOCK; wake_up(&hostdata->work_wait_q); } /** * send_srp_login: - Sends the srp login * @hostdata: ibmvscsi_host_data of host * * Returns zero if successful. */ static int send_srp_login(struct ibmvscsi_host_data *hostdata) { int rc; unsigned long flags; struct srp_login_req *login; struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); BUG_ON(!evt_struct); init_event_struct(evt_struct, login_rsp, VIOSRP_SRP_FORMAT, login_timeout); login = &evt_struct->iu.srp.login_req; memset(login, 0, sizeof(*login)); login->opcode = SRP_LOGIN_REQ; login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu)); login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT); /* Start out with a request limit of 0, since this is negotiated in * the login request we are just sending and login requests always * get sent by the driver regardless of request_limit. */ ibmvscsi_set_request_limit(hostdata, 0); spin_lock_irqsave(hostdata->host->host_lock, flags); rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2); spin_unlock_irqrestore(hostdata->host->host_lock, flags); dev_info(hostdata->dev, "sent SRP login\n"); return rc; }; /** * capabilities_rsp: - Handle response to MAD adapter capabilities request * @evt_struct: srp_event_struct with the response * * Used as a "done" callback by when sending adapter_info. */ static void capabilities_rsp(struct srp_event_struct *evt_struct) { struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; if (evt_struct->xfer_iu->mad.capabilities.common.status) { dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", evt_struct->xfer_iu->mad.capabilities.common.status); } else { if (hostdata->caps.migration.common.server_support != cpu_to_be16(SERVER_SUPPORTS_CAP)) dev_info(hostdata->dev, "Partition migration not supported\n"); if (client_reserve) { if (hostdata->caps.reserve.common.server_support == cpu_to_be16(SERVER_SUPPORTS_CAP)) dev_info(hostdata->dev, "Client reserve enabled\n"); else dev_info(hostdata->dev, "Client reserve not supported\n"); } } send_srp_login(hostdata); } /** * send_mad_capabilities: - Sends the mad capabilities request * and stores the result so it can be retrieved with * @hostdata: ibmvscsi_host_data of host */ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) { struct viosrp_capabilities *req; struct srp_event_struct *evt_struct; unsigned long flags; struct device_node *of_node = hostdata->dev->of_node; const char *location; evt_struct = get_event_struct(&hostdata->pool); BUG_ON(!evt_struct); init_event_struct(evt_struct, capabilities_rsp, VIOSRP_MAD_FORMAT, info_timeout); req = &evt_struct->iu.mad.capabilities; memset(req, 0, sizeof(*req)); hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED); if (hostdata->client_migrated) hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED); strscpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), sizeof(hostdata->caps.name)); location = of_get_property(of_node, "ibm,loc-code", NULL); location = location ? location : dev_name(hostdata->dev); strscpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE); req->buffer = cpu_to_be64(hostdata->caps_addr); hostdata->caps.migration.common.cap_type = cpu_to_be32(MIGRATION_CAPABILITIES); hostdata->caps.migration.common.length = cpu_to_be16(sizeof(hostdata->caps.migration)); hostdata->caps.migration.common.server_support = cpu_to_be16(SERVER_SUPPORTS_CAP); hostdata->caps.migration.ecl = cpu_to_be32(1); if (client_reserve) { hostdata->caps.reserve.common.cap_type = cpu_to_be32(RESERVATION_CAPABILITIES); hostdata->caps.reserve.common.length = cpu_to_be16(sizeof(hostdata->caps.reserve)); hostdata->caps.reserve.common.server_support = cpu_to_be16(SERVER_SUPPORTS_CAP); hostdata->caps.reserve.type = cpu_to_be32(CLIENT_RESERVE_SCSI_2); req->common.length = cpu_to_be16(sizeof(hostdata->caps)); } else req->common.length = cpu_to_be16(sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve)); spin_lock_irqsave(hostdata->host->host_lock, flags); if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n"); spin_unlock_irqrestore(hostdata->host->host_lock, flags); }; /** * fast_fail_rsp: - Handle response to MAD enable fast fail * @evt_struct: srp_event_struct with the response * * Used as a "done" callback by when sending enable fast fail. Gets called * by ibmvscsi_handle_crq() */ static void fast_fail_rsp(struct srp_event_struct *evt_struct) { struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status); if (status == VIOSRP_MAD_NOT_SUPPORTED) dev_err(hostdata->dev, "fast_fail not supported in server\n"); else if (status == VIOSRP_MAD_FAILED) dev_err(hostdata->dev, "fast_fail request failed\n"); else if (status != VIOSRP_MAD_SUCCESS) dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status); send_mad_capabilities(hostdata); } /** * enable_fast_fail() - Start host initialization * @hostdata: ibmvscsi_host_data of host * * Returns zero if successful. */ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) { int rc; unsigned long flags; struct viosrp_fast_fail *fast_fail_mad; struct srp_event_struct *evt_struct; if (!fast_fail) { send_mad_capabilities(hostdata); return 0; } evt_struct = get_event_struct(&hostdata->pool); BUG_ON(!evt_struct); init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout); fast_fail_mad = &evt_struct->iu.mad.fast_fail; memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL); fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad)); spin_lock_irqsave(hostdata->host->host_lock, flags); rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); spin_unlock_irqrestore(hostdata->host->host_lock, flags); return rc; } /** * adapter_info_rsp: - Handle response to MAD adapter info request * @evt_struct: srp_event_struct with the response * * Used as a "done" callback by when sending adapter_info. Gets called * by ibmvscsi_handle_crq() */ static void adapter_info_rsp(struct srp_event_struct *evt_struct) { struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; if (evt_struct->xfer_iu->mad.adapter_info.common.status) { dev_err(hostdata->dev, "error %d getting adapter info\n", evt_struct->xfer_iu->mad.adapter_info.common.status); } else { dev_info(hostdata->dev, "host srp version: %s, " "host partition %s (%d), OS %d, max io %u\n", hostdata->madapter_info.srp_version, hostdata->madapter_info.partition_name, be32_to_cpu(hostdata->madapter_info.partition_number), be32_to_cpu(hostdata->madapter_info.os_type), be32_to_cpu(hostdata->madapter_info.port_max_txu[0])); if (hostdata->madapter_info.port_max_txu[0]) hostdata->host->max_sectors = be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9; if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX && strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", hostdata->madapter_info.srp_version); dev_err(hostdata->dev, "limiting scatterlists to %d\n", MAX_INDIRECT_BUFS); hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; } if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX) { enable_fast_fail(hostdata); return; } } send_srp_login(hostdata); } /** * send_mad_adapter_info: - Sends the mad adapter info request * and stores the result so it can be retrieved with * sysfs. We COULD consider causing a failure if the * returned SRP version doesn't match ours. * @hostdata: ibmvscsi_host_data of host * * Returns zero if successful. */ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) { struct viosrp_adapter_info *req; struct srp_event_struct *evt_struct; unsigned long flags; evt_struct = get_event_struct(&hostdata->pool); BUG_ON(!evt_struct); init_event_struct(evt_struct, adapter_info_rsp, VIOSRP_MAD_FORMAT, info_timeout); req = &evt_struct->iu.mad.adapter_info; memset(req, 0x00, sizeof(*req)); req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE); req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info)); req->buffer = cpu_to_be64(hostdata->adapter_info_addr); spin_lock_irqsave(hostdata->host->host_lock, flags); if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); spin_unlock_irqrestore(hostdata->host->host_lock, flags); }; /* * init_adapter() - Start virtual adapter initialization sequence */ static void init_adapter(struct ibmvscsi_host_data *hostdata) { send_mad_adapter_info(hostdata); } /* * sync_completion: Signal that a synchronous command has completed * Note that after returning from this call, the evt_struct is freed. * the caller waiting on this completion shouldn't touch the evt_struct * again. */ static void sync_completion(struct srp_event_struct *evt_struct) { /* copy the response back */ if (evt_struct->sync_srp) *evt_struct->sync_srp = *evt_struct->xfer_iu; complete(&evt_struct->comp); } /* * ibmvscsi_eh_abort_handler: Abort a command...from scsi host template * send this over to the server and wait synchronously for the response */ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) { struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host); struct srp_tsk_mgmt *tsk_mgmt; struct srp_event_struct *evt; struct srp_event_struct *tmp_evt, *found_evt; union viosrp_iu srp_rsp; int rsp_rc; unsigned long flags; u16 lun = lun_from_dev(cmd->device); unsigned long wait_switch = 0; /* First, find this command in our sent list so we can figure * out the correct tag */ spin_lock_irqsave(hostdata->host->host_lock, flags); wait_switch = jiffies + (init_timeout * HZ); do { found_evt = NULL; list_for_each_entry(tmp_evt, &hostdata->sent, list) { if (tmp_evt->cmnd == cmd) { found_evt = tmp_evt; break; } } if (!found_evt) { spin_unlock_irqrestore(hostdata->host->host_lock, flags); return SUCCESS; } evt = get_event_struct(&hostdata->pool); if (evt == NULL) { spin_unlock_irqrestore(hostdata->host->host_lock, flags); sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n"); return FAILED; } init_event_struct(evt, sync_completion, VIOSRP_SRP_FORMAT, abort_timeout); tsk_mgmt = &evt->iu.srp.tsk_mgmt; /* Set up an abort SRP command */ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); tsk_mgmt->opcode = SRP_TSK_MGMT; int_to_scsilun(lun, &tsk_mgmt->lun); tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; tsk_mgmt->task_tag = (u64) found_evt; evt->sync_srp = &srp_rsp; init_completion(&evt->comp); rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2); if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) break; spin_unlock_irqrestore(hostdata->host->host_lock, flags); msleep(10); spin_lock_irqsave(hostdata->host->host_lock, flags); } while (time_before(jiffies, wait_switch)); spin_unlock_irqrestore(hostdata->host->host_lock, flags); if (rsp_rc != 0) { sdev_printk(KERN_ERR, cmd->device, "failed to send abort() event. rc=%d\n", rsp_rc); return FAILED; } sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%llx, tag 0x%llx\n", (((u64) lun) << 48), (u64) found_evt); wait_for_completion(&evt->comp); /* make sure we got a good response */ if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { if (printk_ratelimit()) sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n", srp_rsp.srp.rsp.opcode); return FAILED; } if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) rsp_rc = *((int *)srp_rsp.srp.rsp.data); else rsp_rc = srp_rsp.srp.rsp.status; if (rsp_rc) { if (printk_ratelimit()) sdev_printk(KERN_WARNING, cmd->device, "abort code %d for task tag 0x%llx\n", rsp_rc, tsk_mgmt->task_tag); return FAILED; } /* Because we dropped the spinlock above, it's possible * The event is no longer in our list. Make sure it didn't * complete while we were aborting */ spin_lock_irqsave(hostdata->host->host_lock, flags); found_evt = NULL; list_for_each_entry(tmp_evt, &hostdata->sent, list) { if (tmp_evt->cmnd == cmd) { found_evt = tmp_evt; break; } } if (found_evt == NULL) { spin_unlock_irqrestore(hostdata->host->host_lock, flags); sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n", tsk_mgmt->task_tag); return SUCCESS; } sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n", tsk_mgmt->task_tag); cmd->result = (DID_ABORT << 16); list_del(&found_evt->list); unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt, found_evt->hostdata->dev); free_event_struct(&found_evt->hostdata->pool, found_evt); spin_unlock_irqrestore(hostdata->host->host_lock, flags); atomic_inc(&hostdata->request_limit); return SUCCESS; } /* * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host * template send this over to the server and wait synchronously for the * response */ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host); struct srp_tsk_mgmt *tsk_mgmt; struct srp_event_struct *evt; struct srp_event_struct *tmp_evt, *pos; union viosrp_iu srp_rsp; int rsp_rc; unsigned long flags; u16 lun = lun_from_dev(cmd->device); unsigned long wait_switch = 0; spin_lock_irqsave(hostdata->host->host_lock, flags); wait_switch = jiffies + (init_timeout * HZ); do { evt = get_event_struct(&hostdata->pool); if (evt == NULL) { spin_unlock_irqrestore(hostdata->host->host_lock, flags); sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n"); return FAILED; } init_event_struct(evt, sync_completion, VIOSRP_SRP_FORMAT, reset_timeout); tsk_mgmt = &evt->iu.srp.tsk_mgmt; /* Set up a lun reset SRP command */ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); tsk_mgmt->opcode = SRP_TSK_MGMT; int_to_scsilun(lun, &tsk_mgmt->lun); tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; evt->sync_srp = &srp_rsp; init_completion(&evt->comp); rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2); if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) break; spin_unlock_irqrestore(hostdata->host->host_lock, flags); msleep(10); spin_lock_irqsave(hostdata->host->host_lock, flags); } while (time_before(jiffies, wait_switch)); spin_unlock_irqrestore(hostdata->host->host_lock, flags); if (rsp_rc != 0) { sdev_printk(KERN_ERR, cmd->device, "failed to send reset event. rc=%d\n", rsp_rc); return FAILED; } sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n", (((u64) lun) << 48)); wait_for_completion(&evt->comp); /* make sure we got a good response */ if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { if (printk_ratelimit()) sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n", srp_rsp.srp.rsp.opcode); return FAILED; } if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) rsp_rc = *((int *)srp_rsp.srp.rsp.data); else rsp_rc = srp_rsp.srp.rsp.status; if (rsp_rc) { if (printk_ratelimit()) sdev_printk(KERN_WARNING, cmd->device, "reset code %d for task tag 0x%llx\n", rsp_rc, tsk_mgmt->task_tag); return FAILED; } /* We need to find all commands for this LUN that have not yet been * responded to, and fail them with DID_RESET */ spin_lock_irqsave(hostdata->host->host_lock, flags); list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) { if (tmp_evt->cmnd) tmp_evt->cmnd->result = (DID_RESET << 16); list_del(&tmp_evt->list); unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt, tmp_evt->hostdata->dev); free_event_struct(&tmp_evt->hostdata->pool, tmp_evt); atomic_inc(&hostdata->request_limit); if (tmp_evt->cmnd_done) tmp_evt->cmnd_done(tmp_evt->cmnd); else if (tmp_evt->done) tmp_evt->done(tmp_evt); } } spin_unlock_irqrestore(hostdata->host->host_lock, flags); return SUCCESS; } /** * ibmvscsi_eh_host_reset_handler - Reset the connection to the server * @cmd: struct scsi_cmnd having problems */ static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd) { unsigned long wait_switch = 0; struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host); dev_err(hostdata->dev, "Resetting connection due to error recovery\n"); ibmvscsi_reset_host(hostdata); for (wait_switch = jiffies + (init_timeout * HZ); time_before(jiffies, wait_switch) && atomic_read(&hostdata->request_limit) < 2;) { msleep(10); } if (atomic_read(&hostdata->request_limit) <= 0) return FAILED; return SUCCESS; } /** * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ * @crq: Command/Response queue * @hostdata: ibmvscsi_host_data of host * */ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, struct ibmvscsi_host_data *hostdata) { long rc; unsigned long flags; /* The hypervisor copies our tag value here so no byteswapping */ struct srp_event_struct *evt_struct = (__force struct srp_event_struct *)crq->IU_data_ptr; switch (crq->valid) { case VIOSRP_CRQ_INIT_RSP: /* initialization */ switch (crq->format) { case VIOSRP_CRQ_INIT: /* Initialization message */ dev_info(hostdata->dev, "partner initialized\n"); /* Send back a response */ rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0); if (rc == 0) { /* Now login */ init_adapter(hostdata); } else { dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); } break; case VIOSRP_CRQ_INIT_COMPLETE: /* Initialization response */ dev_info(hostdata->dev, "partner initialization complete\n"); /* Now login */ init_adapter(hostdata); break; default: dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); } return; case VIOSRP_CRQ_XPORT_EVENT: /* Hypervisor telling us the connection is closed */ scsi_block_requests(hostdata->host); ibmvscsi_set_request_limit(hostdata, 0); if (crq->format == 0x06) { /* We need to re-setup the interpartition connection */ dev_info(hostdata->dev, "Re-enabling adapter!\n"); hostdata->client_migrated = 1; hostdata->action = IBMVSCSI_HOST_ACTION_REENABLE; purge_requests(hostdata, DID_REQUEUE); wake_up(&hostdata->work_wait_q); } else { dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n", crq->format); ibmvscsi_reset_host(hostdata); } return; case VIOSRP_CRQ_CMD_RSP: /* real payload */ break; default: dev_err(hostdata->dev, "got an invalid message type 0x%02x\n", crq->valid); return; } /* The only kind of payload CRQs we should get are responses to * things we send. Make sure this response is to something we * actually sent */ if (!valid_event_struct(&hostdata->pool, evt_struct)) { dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n", evt_struct); return; } if (atomic_read(&evt_struct->free)) { dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n", evt_struct); return; } if (crq->format == VIOSRP_SRP_FORMAT) atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta), &hostdata->request_limit); del_timer(&evt_struct->timer); if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd) evt_struct->cmnd->result = DID_ERROR << 16; if (evt_struct->done) evt_struct->done(evt_struct); else dev_err(hostdata->dev, "returned done() is NULL; not running it!\n"); /* * Lock the host_lock before messing with these structures, since we * are running in a task context */ spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags); list_del(&evt_struct->list); free_event_struct(&evt_struct->hostdata->pool, evt_struct); spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags); } /** * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk. * @sdev: struct scsi_device device to configure * * Enable allow_restart for a device if it is a disk. Adjust the * queue_depth here also as is required by the documentation for * struct scsi_host_template. */ static int ibmvscsi_slave_configure(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; unsigned long lock_flags = 0; spin_lock_irqsave(shost->host_lock, lock_flags); if (sdev->type == TYPE_DISK) { sdev->allow_restart = 1; blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); } spin_unlock_irqrestore(shost->host_lock, lock_flags); return 0; } /** * ibmvscsi_change_queue_depth - Change the device's queue depth * @sdev: scsi device struct * @qdepth: depth to set * * Return value: * actual depth set **/ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) { if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN) qdepth = IBMVSCSI_MAX_CMDS_PER_LUN; return scsi_change_queue_depth(sdev, qdepth); } /* ------------------------------------------------------------ * sysfs attributes */ static ssize_t show_host_vhost_loc(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n", hostdata->caps.loc); return len; } static struct device_attribute ibmvscsi_host_vhost_loc = { .attr = { .name = "vhost_loc", .mode = S_IRUGO, }, .show = show_host_vhost_loc, }; static ssize_t show_host_vhost_name(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n", hostdata->caps.name); return len; } static struct device_attribute ibmvscsi_host_vhost_name = { .attr = { .name = "vhost_name", .mode = S_IRUGO, }, .show = show_host_vhost_name, }; static ssize_t show_host_srp_version(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; len = snprintf(buf, PAGE_SIZE, "%s\n", hostdata->madapter_info.srp_version); return len; } static struct device_attribute ibmvscsi_host_srp_version = { .attr = { .name = "srp_version", .mode = S_IRUGO, }, .show = show_host_srp_version, }; static ssize_t show_host_partition_name(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; len = snprintf(buf, PAGE_SIZE, "%s\n", hostdata->madapter_info.partition_name); return len; } static struct device_attribute ibmvscsi_host_partition_name = { .attr = { .name = "partition_name", .mode = S_IRUGO, }, .show = show_host_partition_name, }; static ssize_t show_host_partition_number(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; len = snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(hostdata->madapter_info.partition_number)); return len; } static struct device_attribute ibmvscsi_host_partition_number = { .attr = { .name = "partition_number", .mode = S_IRUGO, }, .show = show_host_partition_number, }; static ssize_t show_host_mad_version(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; len = snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(hostdata->madapter_info.mad_version)); return len; } static struct device_attribute ibmvscsi_host_mad_version = { .attr = { .name = "mad_version", .mode = S_IRUGO, }, .show = show_host_mad_version, }; static ssize_t show_host_os_type(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; len = snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(hostdata->madapter_info.os_type)); return len; } static struct device_attribute ibmvscsi_host_os_type = { .attr = { .name = "os_type", .mode = S_IRUGO, }, .show = show_host_os_type, }; static ssize_t show_host_config(struct device *dev, struct device_attribute *attr, char *buf) { return 0; } static struct device_attribute ibmvscsi_host_config = { .attr = { .name = "config", .mode = S_IRUGO, }, .show = show_host_config, }; static int ibmvscsi_host_reset(struct Scsi_Host *shost, int reset_type) { struct ibmvscsi_host_data *hostdata = shost_priv(shost); dev_info(hostdata->dev, "Initiating adapter reset!\n"); ibmvscsi_reset_host(hostdata); return 0; } static struct attribute *ibmvscsi_host_attrs[] = { &ibmvscsi_host_vhost_loc.attr, &ibmvscsi_host_vhost_name.attr, &ibmvscsi_host_srp_version.attr, &ibmvscsi_host_partition_name.attr, &ibmvscsi_host_partition_number.attr, &ibmvscsi_host_mad_version.attr, &ibmvscsi_host_os_type.attr, &ibmvscsi_host_config.attr, NULL }; ATTRIBUTE_GROUPS(ibmvscsi_host); /* ------------------------------------------------------------ * SCSI driver registration */ static struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION, .proc_name = "ibmvscsi", .queuecommand = ibmvscsi_queuecommand, .eh_timed_out = srp_timed_out, .eh_abort_handler = ibmvscsi_eh_abort_handler, .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler, .slave_configure = ibmvscsi_slave_configure, .change_queue_depth = ibmvscsi_change_queue_depth, .host_reset = ibmvscsi_host_reset, .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT, .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT, .this_id = -1, .sg_tablesize = SG_ALL, .shost_groups = ibmvscsi_host_groups, }; /** * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver * * @vdev: struct vio_dev for the device whose desired IO mem is to be returned * * Return value: * Number of bytes of IO data the driver will need to perform well. */ static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev) { /* iu_storage data allocated in initialize_event_pool */ unsigned long desired_io = max_events * sizeof(union viosrp_iu); /* add io space for sg data */ desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 * IBMVSCSI_CMDS_PER_LUN_DEFAULT); return desired_io; } static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata) { unsigned long flags; int rc; char *action = "reset"; spin_lock_irqsave(hostdata->host->host_lock, flags); switch (hostdata->action) { case IBMVSCSI_HOST_ACTION_UNBLOCK: rc = 0; break; case IBMVSCSI_HOST_ACTION_RESET: spin_unlock_irqrestore(hostdata->host->host_lock, flags); rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata); spin_lock_irqsave(hostdata->host->host_lock, flags); if (!rc) rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0); vio_enable_interrupts(to_vio_dev(hostdata->dev)); break; case IBMVSCSI_HOST_ACTION_REENABLE: action = "enable"; spin_unlock_irqrestore(hostdata->host->host_lock, flags); rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata); spin_lock_irqsave(hostdata->host->host_lock, flags); if (!rc) rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0); break; case IBMVSCSI_HOST_ACTION_NONE: default: spin_unlock_irqrestore(hostdata->host->host_lock, flags); return; } hostdata->action = IBMVSCSI_HOST_ACTION_NONE; spin_unlock_irqrestore(hostdata->host->host_lock, flags); if (rc) { ibmvscsi_set_request_limit(hostdata, -1); dev_err(hostdata->dev, "error after %s\n", action); } scsi_unblock_requests(hostdata->host); } static int __ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata) { if (kthread_should_stop()) return 1; switch (hostdata->action) { case IBMVSCSI_HOST_ACTION_NONE: return 0; case IBMVSCSI_HOST_ACTION_RESET: case IBMVSCSI_HOST_ACTION_REENABLE: case IBMVSCSI_HOST_ACTION_UNBLOCK: default: break; } return 1; } static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata) { unsigned long flags; int rc; spin_lock_irqsave(hostdata->host->host_lock, flags); rc = __ibmvscsi_work_to_do(hostdata); spin_unlock_irqrestore(hostdata->host->host_lock, flags); return rc; } static int ibmvscsi_work(void *data) { struct ibmvscsi_host_data *hostdata = data; int rc; set_user_nice(current, MIN_NICE); while (1) { rc = wait_event_interruptible(hostdata->work_wait_q, ibmvscsi_work_to_do(hostdata)); BUG_ON(rc); if (kthread_should_stop()) break; ibmvscsi_do_work(hostdata); } return 0; } /* * Called by bus code for each adapter */ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct ibmvscsi_host_data *hostdata; struct Scsi_Host *host; struct device *dev = &vdev->dev; struct srp_rport_identifiers ids; struct srp_rport *rport; unsigned long wait_switch = 0; int rc; dev_set_drvdata(&vdev->dev, NULL); host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); if (!host) { dev_err(&vdev->dev, "couldn't allocate host data\n"); goto scsi_host_alloc_failed; } host->transportt = ibmvscsi_transport_template; hostdata = shost_priv(host); memset(hostdata, 0x00, sizeof(*hostdata)); INIT_LIST_HEAD(&hostdata->sent); init_waitqueue_head(&hostdata->work_wait_q); hostdata->host = host; hostdata->dev = dev; ibmvscsi_set_request_limit(hostdata, -1); hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; if (map_persist_bufs(hostdata)) { dev_err(&vdev->dev, "couldn't map persistent buffers\n"); goto persist_bufs_failed; } hostdata->work_thread = kthread_run(ibmvscsi_work, hostdata, "%s_%d", "ibmvscsi", host->host_no); if (IS_ERR(hostdata->work_thread)) { dev_err(&vdev->dev, "couldn't initialize kthread. rc=%ld\n", PTR_ERR(hostdata->work_thread)); goto init_crq_failed; } rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events); if (rc != 0 && rc != H_RESOURCE) { dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); goto kill_kthread; } if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) { dev_err(&vdev->dev, "couldn't initialize event pool\n"); goto init_pool_failed; } host->max_lun = IBMVSCSI_MAX_LUN; host->max_id = max_id; host->max_channel = max_channel; host->max_cmd_len = 16; dev_info(dev, "Maximum ID: %d Maximum LUN: %llu Maximum Channel: %d\n", host->max_id, host->max_lun, host->max_channel); if (scsi_add_host(hostdata->host, hostdata->dev)) goto add_host_failed; /* we don't have a proper target_port_id so let's use the fake one */ memcpy(ids.port_id, hostdata->madapter_info.partition_name, sizeof(ids.port_id)); ids.roles = SRP_RPORT_ROLE_TARGET; rport = srp_rport_add(host, &ids); if (IS_ERR(rport)) goto add_srp_port_failed; /* Try to send an initialization message. Note that this is allowed * to fail if the other end is not acive. In that case we don't * want to scan */ if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0 || rc == H_RESOURCE) { /* * Wait around max init_timeout secs for the adapter to finish * initializing. When we are done initializing, we will have a * valid request_limit. We don't want Linux scanning before * we are ready. */ for (wait_switch = jiffies + (init_timeout * HZ); time_before(jiffies, wait_switch) && atomic_read(&hostdata->request_limit) < 2;) { msleep(10); } /* if we now have a valid request_limit, initiate a scan */ if (atomic_read(&hostdata->request_limit) > 0) scsi_scan_host(host); } dev_set_drvdata(&vdev->dev, hostdata); spin_lock(&ibmvscsi_driver_lock); list_add_tail(&hostdata->host_list, &ibmvscsi_head); spin_unlock(&ibmvscsi_driver_lock); return 0; add_srp_port_failed: scsi_remove_host(hostdata->host); add_host_failed: release_event_pool(&hostdata->pool, hostdata); init_pool_failed: ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events); kill_kthread: kthread_stop(hostdata->work_thread); init_crq_failed: unmap_persist_bufs(hostdata); persist_bufs_failed: scsi_host_put(host); scsi_host_alloc_failed: return -1; } static void ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); srp_remove_host(hostdata->host); scsi_remove_host(hostdata->host); purge_requests(hostdata, DID_ERROR); release_event_pool(&hostdata->pool, hostdata); ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events); kthread_stop(hostdata->work_thread); unmap_persist_bufs(hostdata); spin_lock(&ibmvscsi_driver_lock); list_del(&hostdata->host_list); spin_unlock(&ibmvscsi_driver_lock); scsi_host_put(hostdata->host); } /** * ibmvscsi_resume: Resume from suspend * @dev: device struct * * We may have lost an interrupt across suspend/resume, so kick the * interrupt handler */ static int ibmvscsi_resume(struct device *dev) { struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev); vio_disable_interrupts(to_vio_dev(hostdata->dev)); tasklet_schedule(&hostdata->srp_task); return 0; } /* * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we * support. */ static const struct vio_device_id ibmvscsi_device_table[] = { {"vscsi", "IBM,v-scsi"}, { "", "" } }; MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); static const struct dev_pm_ops ibmvscsi_pm_ops = { .resume = ibmvscsi_resume }; static struct vio_driver ibmvscsi_driver = { .id_table = ibmvscsi_device_table, .probe = ibmvscsi_probe, .remove = ibmvscsi_remove, .get_desired_dma = ibmvscsi_get_desired_dma, .name = "ibmvscsi", .pm = &ibmvscsi_pm_ops, }; static struct srp_function_template ibmvscsi_transport_functions = { }; static int __init ibmvscsi_module_init(void) { int ret; /* Ensure we have two requests to do error recovery */ driver_template.can_queue = max_requests; max_events = max_requests + 2; if (!firmware_has_feature(FW_FEATURE_VIO)) return -ENODEV; ibmvscsi_transport_template = srp_attach_transport(&ibmvscsi_transport_functions); if (!ibmvscsi_transport_template) return -ENOMEM; ret = vio_register_driver(&ibmvscsi_driver); if (ret) srp_release_transport(ibmvscsi_transport_template); return ret; } static void __exit ibmvscsi_module_exit(void) { vio_unregister_driver(&ibmvscsi_driver); srp_release_transport(ibmvscsi_transport_template); } module_init(ibmvscsi_module_init); module_exit(ibmvscsi_module_exit);
linux-master
drivers/scsi/ibmvscsi/ibmvscsi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter * * Written By: Brian King <[email protected]>, IBM Corporation * * Copyright (C) IBM Corporation, 2008 */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/pm.h> #include <linux/stringify.h> #include <linux/bsg-lib.h> #include <asm/firmware.h> #include <asm/irq.h> #include <asm/rtas.h> #include <asm/vio.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_bsg_fc.h> #include "ibmvfc.h" static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT; static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT; static u64 max_lun = IBMVFC_MAX_LUN; static unsigned int max_targets = IBMVFC_MAX_TARGETS; static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; static unsigned int ibmvfc_debug = IBMVFC_DEBUG; static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL; static unsigned int cls3_error = IBMVFC_CLS3_ERROR; static unsigned int mq_enabled = IBMVFC_MQ; static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES; static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS; static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ; static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M; static LIST_HEAD(ibmvfc_head); static DEFINE_SPINLOCK(ibmvfc_driver_lock); static struct scsi_transport_template *ibmvfc_transport_template; MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver"); MODULE_AUTHOR("Brian King <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_VERSION(IBMVFC_DRIVER_VERSION); module_param_named(mq, mq_enabled, uint, S_IRUGO); MODULE_PARM_DESC(mq, "Enable multiqueue support. " "[Default=" __stringify(IBMVFC_MQ) "]"); module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO); MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. " "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]"); module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO); MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. " "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]"); module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO); MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. " "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]"); module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO); MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. " "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]"); module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. " "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]"); module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(default_timeout, "Default timeout in seconds for initialization and EH commands. " "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]"); module_param_named(max_requests, max_requests, uint, S_IRUGO); MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. " "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]"); module_param_named(max_lun, max_lun, ullong, S_IRUGO); MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. " "[Default=" __stringify(IBMVFC_MAX_LUN) "]"); module_param_named(max_targets, max_targets, uint, S_IRUGO); MODULE_PARM_DESC(max_targets, "Maximum allowed targets. " "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]"); module_param_named(disc_threads, disc_threads, uint, S_IRUGO); MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. " "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]"); module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable driver debug information. " "[Default=" __stringify(IBMVFC_DEBUG) "]"); module_param_named(log_level, log_level, uint, 0); MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. " "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]"); module_param_named(cls3_error, cls3_error, uint, 0); MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. " "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]"); static const struct { u16 status; u16 error; u8 result; u8 retry; int log; char *name; } cmd_status [] = { { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" }, { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" }, { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" }, { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" }, { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" }, { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" }, { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" }, { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" }, { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" }, { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" }, { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" }, { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" }, { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" }, { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" }, { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" }, { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" }, { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" }, { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" }, { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" }, { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" }, { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" }, { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." }, }; static void ibmvfc_npiv_login(struct ibmvfc_host *); static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); static void ibmvfc_tgt_query_target(struct ibmvfc_target *); static void ibmvfc_npiv_logout(struct ibmvfc_host *); static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *); static void ibmvfc_tgt_move_login(struct ibmvfc_target *); static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *); static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *); static const char *unknown_error = "unknown error"; static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba, unsigned long length, unsigned long *cookie, unsigned long *irq) { unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; long rc; rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length); *cookie = retbuf[0]; *irq = retbuf[1]; return rc; } static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags) { u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities); return (host_caps & cap_flags) ? 1 : 0; } static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd) { if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) return &vfc_cmd->v2.iu; else return &vfc_cmd->v1.iu; } static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd) { if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) return &vfc_cmd->v2.rsp; else return &vfc_cmd->v1.rsp; } #ifdef CONFIG_SCSI_IBMVFC_TRACE /** * ibmvfc_trc_start - Log a start trace entry * @evt: ibmvfc event struct * **/ static void ibmvfc_trc_start(struct ibmvfc_event *evt) { struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd; struct ibmvfc_mad_common *mad = &evt->iu.mad_common; struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); struct ibmvfc_trace_entry *entry; int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK; entry = &vhost->trace[index]; entry->evt = evt; entry->time = jiffies; entry->fmt = evt->crq.format; entry->type = IBMVFC_TRC_START; switch (entry->fmt) { case IBMVFC_CMD_FORMAT: entry->op_code = iu->cdb[0]; entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id); entry->lun = scsilun_to_int(&iu->lun); entry->tmf_flags = iu->tmf_flags; entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len); break; case IBMVFC_MAD_FORMAT: entry->op_code = be32_to_cpu(mad->opcode); break; default: break; } } /** * ibmvfc_trc_end - Log an end trace entry * @evt: ibmvfc event struct * **/ static void ibmvfc_trc_end(struct ibmvfc_event *evt) { struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common; struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); struct ibmvfc_trace_entry *entry; int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK; entry = &vhost->trace[index]; entry->evt = evt; entry->time = jiffies; entry->fmt = evt->crq.format; entry->type = IBMVFC_TRC_END; switch (entry->fmt) { case IBMVFC_CMD_FORMAT: entry->op_code = iu->cdb[0]; entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id); entry->lun = scsilun_to_int(&iu->lun); entry->tmf_flags = iu->tmf_flags; entry->u.end.status = be16_to_cpu(vfc_cmd->status); entry->u.end.error = be16_to_cpu(vfc_cmd->error); entry->u.end.fcp_rsp_flags = rsp->flags; entry->u.end.rsp_code = rsp->data.info.rsp_code; entry->u.end.scsi_status = rsp->scsi_status; break; case IBMVFC_MAD_FORMAT: entry->op_code = be32_to_cpu(mad->opcode); entry->u.end.status = be16_to_cpu(mad->status); break; default: break; } } #else #define ibmvfc_trc_start(evt) do { } while (0) #define ibmvfc_trc_end(evt) do { } while (0) #endif /** * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response * @status: status / error class * @error: error * * Return value: * index into cmd_status / -EINVAL on failure **/ static int ibmvfc_get_err_index(u16 status, u16 error) { int i; for (i = 0; i < ARRAY_SIZE(cmd_status); i++) if ((cmd_status[i].status & status) == cmd_status[i].status && cmd_status[i].error == error) return i; return -EINVAL; } /** * ibmvfc_get_cmd_error - Find the error description for the fcp response * @status: status / error class * @error: error * * Return value: * error description string **/ static const char *ibmvfc_get_cmd_error(u16 status, u16 error) { int rc = ibmvfc_get_err_index(status, error); if (rc >= 0) return cmd_status[rc].name; return unknown_error; } /** * ibmvfc_get_err_result - Find the scsi status to return for the fcp response * @vhost: ibmvfc host struct * @vfc_cmd: ibmvfc command struct * * Return value: * SCSI result value to return for completed command **/ static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd) { int err; struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len); if ((rsp->flags & FCP_RSP_LEN_VALID) && ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || rsp->data.info.rsp_code)) return DID_ERROR << 16; err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error)); if (err >= 0) return rsp->scsi_status | (cmd_status[err].result << 16); return rsp->scsi_status | (DID_ERROR << 16); } /** * ibmvfc_retry_cmd - Determine if error status is retryable * @status: status / error class * @error: error * * Return value: * 1 if error should be retried / 0 if it should not **/ static int ibmvfc_retry_cmd(u16 status, u16 error) { int rc = ibmvfc_get_err_index(status, error); if (rc >= 0) return cmd_status[rc].retry; return 1; } static const char *unknown_fc_explain = "unknown fc explain"; static const struct { u16 fc_explain; char *name; } ls_explain [] = { { 0x00, "no additional explanation" }, { 0x01, "service parameter error - options" }, { 0x03, "service parameter error - initiator control" }, { 0x05, "service parameter error - recipient control" }, { 0x07, "service parameter error - received data field size" }, { 0x09, "service parameter error - concurrent seq" }, { 0x0B, "service parameter error - credit" }, { 0x0D, "invalid N_Port/F_Port_Name" }, { 0x0E, "invalid node/Fabric Name" }, { 0x0F, "invalid common service parameters" }, { 0x11, "invalid association header" }, { 0x13, "association header required" }, { 0x15, "invalid originator S_ID" }, { 0x17, "invalid OX_ID-RX-ID combination" }, { 0x19, "command (request) already in progress" }, { 0x1E, "N_Port Login requested" }, { 0x1F, "Invalid N_Port_ID" }, }; static const struct { u16 fc_explain; char *name; } gs_explain [] = { { 0x00, "no additional explanation" }, { 0x01, "port identifier not registered" }, { 0x02, "port name not registered" }, { 0x03, "node name not registered" }, { 0x04, "class of service not registered" }, { 0x06, "initial process associator not registered" }, { 0x07, "FC-4 TYPEs not registered" }, { 0x08, "symbolic port name not registered" }, { 0x09, "symbolic node name not registered" }, { 0x0A, "port type not registered" }, { 0xF0, "authorization exception" }, { 0xF1, "authentication exception" }, { 0xF2, "data base full" }, { 0xF3, "data base empty" }, { 0xF4, "processing request" }, { 0xF5, "unable to verify connection" }, { 0xF6, "devices not in a common zone" }, }; /** * ibmvfc_get_ls_explain - Return the FC Explain description text * @status: FC Explain status * * Returns: * error string **/ static const char *ibmvfc_get_ls_explain(u16 status) { int i; for (i = 0; i < ARRAY_SIZE(ls_explain); i++) if (ls_explain[i].fc_explain == status) return ls_explain[i].name; return unknown_fc_explain; } /** * ibmvfc_get_gs_explain - Return the FC Explain description text * @status: FC Explain status * * Returns: * error string **/ static const char *ibmvfc_get_gs_explain(u16 status) { int i; for (i = 0; i < ARRAY_SIZE(gs_explain); i++) if (gs_explain[i].fc_explain == status) return gs_explain[i].name; return unknown_fc_explain; } static const struct { enum ibmvfc_fc_type fc_type; char *name; } fc_type [] = { { IBMVFC_FABRIC_REJECT, "fabric reject" }, { IBMVFC_PORT_REJECT, "port reject" }, { IBMVFC_LS_REJECT, "ELS reject" }, { IBMVFC_FABRIC_BUSY, "fabric busy" }, { IBMVFC_PORT_BUSY, "port busy" }, { IBMVFC_BASIC_REJECT, "basic reject" }, }; static const char *unknown_fc_type = "unknown fc type"; /** * ibmvfc_get_fc_type - Return the FC Type description text * @status: FC Type error status * * Returns: * error string **/ static const char *ibmvfc_get_fc_type(u16 status) { int i; for (i = 0; i < ARRAY_SIZE(fc_type); i++) if (fc_type[i].fc_type == status) return fc_type[i].name; return unknown_fc_type; } /** * ibmvfc_set_tgt_action - Set the next init action for the target * @tgt: ibmvfc target struct * @action: action to perform * * Returns: * 0 if action changed / non-zero if not changed **/ static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt, enum ibmvfc_target_action action) { int rc = -EINVAL; switch (tgt->action) { case IBMVFC_TGT_ACTION_LOGOUT_RPORT: if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT || action == IBMVFC_TGT_ACTION_DEL_RPORT) { tgt->action = action; rc = 0; } break; case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT: if (action == IBMVFC_TGT_ACTION_DEL_RPORT || action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { tgt->action = action; rc = 0; } break; case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT: if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) { tgt->action = action; rc = 0; } break; case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT: if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) { tgt->action = action; rc = 0; } break; case IBMVFC_TGT_ACTION_DEL_RPORT: if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) { tgt->action = action; rc = 0; } break; case IBMVFC_TGT_ACTION_DELETED_RPORT: break; default: tgt->action = action; rc = 0; break; } if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT) tgt->add_rport = 0; return rc; } /** * ibmvfc_set_host_state - Set the state for the host * @vhost: ibmvfc host struct * @state: state to set host to * * Returns: * 0 if state changed / non-zero if not changed **/ static int ibmvfc_set_host_state(struct ibmvfc_host *vhost, enum ibmvfc_host_state state) { int rc = 0; switch (vhost->state) { case IBMVFC_HOST_OFFLINE: rc = -EINVAL; break; default: vhost->state = state; break; } return rc; } /** * ibmvfc_set_host_action - Set the next init action for the host * @vhost: ibmvfc host struct * @action: action to perform * **/ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, enum ibmvfc_host_action action) { switch (action) { case IBMVFC_HOST_ACTION_ALLOC_TGTS: if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) vhost->action = action; break; case IBMVFC_HOST_ACTION_LOGO_WAIT: if (vhost->action == IBMVFC_HOST_ACTION_LOGO) vhost->action = action; break; case IBMVFC_HOST_ACTION_INIT_WAIT: if (vhost->action == IBMVFC_HOST_ACTION_INIT) vhost->action = action; break; case IBMVFC_HOST_ACTION_QUERY: switch (vhost->action) { case IBMVFC_HOST_ACTION_INIT_WAIT: case IBMVFC_HOST_ACTION_NONE: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: vhost->action = action; break; default: break; } break; case IBMVFC_HOST_ACTION_TGT_INIT: if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) vhost->action = action; break; case IBMVFC_HOST_ACTION_REENABLE: case IBMVFC_HOST_ACTION_RESET: vhost->action = action; break; case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_LOGO: case IBMVFC_HOST_ACTION_QUERY_TGTS: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: case IBMVFC_HOST_ACTION_NONE: default: switch (vhost->action) { case IBMVFC_HOST_ACTION_RESET: case IBMVFC_HOST_ACTION_REENABLE: break; default: vhost->action = action; break; } break; } } /** * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login) * @vhost: ibmvfc host struct * * Return value: * nothing **/ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost) { if (vhost->action == IBMVFC_HOST_ACTION_NONE && vhost->state == IBMVFC_ACTIVE) { if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); } } else vhost->reinit = 1; wake_up(&vhost->work_wait_q); } /** * ibmvfc_del_tgt - Schedule cleanup and removal of the target * @tgt: ibmvfc target struct **/ static void ibmvfc_del_tgt(struct ibmvfc_target *tgt) { if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) { tgt->job_step = ibmvfc_tgt_implicit_logout_and_del; tgt->init_retries = 0; } wake_up(&tgt->vhost->work_wait_q); } /** * ibmvfc_link_down - Handle a link down event from the adapter * @vhost: ibmvfc host struct * @state: ibmvfc host state to enter * **/ static void ibmvfc_link_down(struct ibmvfc_host *vhost, enum ibmvfc_host_state state) { struct ibmvfc_target *tgt; ENTER; scsi_block_requests(vhost->host); list_for_each_entry(tgt, &vhost->targets, queue) ibmvfc_del_tgt(tgt); ibmvfc_set_host_state(vhost, state); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL); vhost->events_to_log |= IBMVFC_AE_LINKDOWN; wake_up(&vhost->work_wait_q); LEAVE; } /** * ibmvfc_init_host - Start host initialization * @vhost: ibmvfc host struct * * Return value: * nothing **/ static void ibmvfc_init_host(struct ibmvfc_host *vhost) { struct ibmvfc_target *tgt; if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { dev_err(vhost->dev, "Host initialization retries exceeded. Taking adapter offline\n"); ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); return; } } if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE); vhost->async_crq.cur = 0; list_for_each_entry(tgt, &vhost->targets, queue) { if (vhost->client_migrated) tgt->need_login = 1; else ibmvfc_del_tgt(tgt); } scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); vhost->job_step = ibmvfc_npiv_login; wake_up(&vhost->work_wait_q); } } /** * ibmvfc_send_crq - Send a CRQ * @vhost: ibmvfc host struct * @word1: the first 64 bits of the data * @word2: the second 64 bits of the data * * Return value: * 0 on success / other on failure **/ static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2) { struct vio_dev *vdev = to_vio_dev(vhost->dev); return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); } static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1, u64 word2, u64 word3, u64 word4) { struct vio_dev *vdev = to_vio_dev(vhost->dev); return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie, word1, word2, word3, word4); } /** * ibmvfc_send_crq_init - Send a CRQ init message * @vhost: ibmvfc host struct * * Return value: * 0 on success / other on failure **/ static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost) { ibmvfc_dbg(vhost, "Sending CRQ init\n"); return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0); } /** * ibmvfc_send_crq_init_complete - Send a CRQ init complete message * @vhost: ibmvfc host struct * * Return value: * 0 on success / other on failure **/ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost) { ibmvfc_dbg(vhost, "Sending CRQ init complete\n"); return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0); } /** * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host * @vhost: ibmvfc host who owns the event pool * @queue: ibmvfc queue struct * @size: pool size * * Returns zero on success. **/ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost, struct ibmvfc_queue *queue, unsigned int size) { int i; struct ibmvfc_event_pool *pool = &queue->evt_pool; ENTER; if (!size) return 0; pool->size = size; pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL); if (!pool->events) return -ENOMEM; pool->iu_storage = dma_alloc_coherent(vhost->dev, size * sizeof(*pool->iu_storage), &pool->iu_token, 0); if (!pool->iu_storage) { kfree(pool->events); return -ENOMEM; } INIT_LIST_HEAD(&queue->sent); INIT_LIST_HEAD(&queue->free); spin_lock_init(&queue->l_lock); for (i = 0; i < size; ++i) { struct ibmvfc_event *evt = &pool->events[i]; /* * evt->active states * 1 = in flight * 0 = being completed * -1 = free/freed */ atomic_set(&evt->active, -1); atomic_set(&evt->free, 1); evt->crq.valid = 0x80; evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); evt->xfer_iu = pool->iu_storage + i; evt->vhost = vhost; evt->queue = queue; evt->ext_list = NULL; list_add_tail(&evt->queue_list, &queue->free); } LEAVE; return 0; } /** * ibmvfc_free_event_pool - Frees memory of the event pool of a host * @vhost: ibmvfc host who owns the event pool * @queue: ibmvfc queue struct * **/ static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost, struct ibmvfc_queue *queue) { int i; struct ibmvfc_event_pool *pool = &queue->evt_pool; ENTER; for (i = 0; i < pool->size; ++i) { list_del(&pool->events[i].queue_list); BUG_ON(atomic_read(&pool->events[i].free) != 1); if (pool->events[i].ext_list) dma_pool_free(vhost->sg_pool, pool->events[i].ext_list, pool->events[i].ext_list_token); } kfree(pool->events); dma_free_coherent(vhost->dev, pool->size * sizeof(*pool->iu_storage), pool->iu_storage, pool->iu_token); LEAVE; } /** * ibmvfc_free_queue - Deallocate queue * @vhost: ibmvfc host struct * @queue: ibmvfc queue struct * * Unmaps dma and deallocates page for messages **/ static void ibmvfc_free_queue(struct ibmvfc_host *vhost, struct ibmvfc_queue *queue) { struct device *dev = vhost->dev; dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)queue->msgs.handle); queue->msgs.handle = NULL; ibmvfc_free_event_pool(vhost, queue); } /** * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ * @vhost: ibmvfc host struct * * Frees irq, deallocates a page for messages, unmaps dma, and unregisters * the crq with the hypervisor. **/ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost) { long rc = 0; struct vio_dev *vdev = to_vio_dev(vhost->dev); struct ibmvfc_queue *crq = &vhost->crq; ibmvfc_dbg(vhost, "Releasing CRQ\n"); free_irq(vdev->irq, vhost); tasklet_kill(&vhost->tasklet); do { if (rc) msleep(100); rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); vhost->state = IBMVFC_NO_CRQ; vhost->logged_in = 0; ibmvfc_free_queue(vhost, crq); } /** * ibmvfc_reenable_crq_queue - reenables the CRQ * @vhost: ibmvfc host struct * * Return value: * 0 on success / other on failure **/ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) { int rc = 0; struct vio_dev *vdev = to_vio_dev(vhost->dev); unsigned long flags; ibmvfc_dereg_sub_crqs(vhost); /* Re-enable the CRQ */ do { if (rc) msleep(100); rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); if (rc) dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc); spin_lock_irqsave(vhost->host->host_lock, flags); spin_lock(vhost->crq.q_lock); vhost->do_enquiry = 1; vhost->using_channels = 0; spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); ibmvfc_reg_sub_crqs(vhost); return rc; } /** * ibmvfc_reset_crq - resets a crq after a failure * @vhost: ibmvfc host struct * * Return value: * 0 on success / other on failure **/ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) { int rc = 0; unsigned long flags; struct vio_dev *vdev = to_vio_dev(vhost->dev); struct ibmvfc_queue *crq = &vhost->crq; ibmvfc_dereg_sub_crqs(vhost); /* Close the CRQ */ do { if (rc) msleep(100); rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); spin_lock_irqsave(vhost->host->host_lock, flags); spin_lock(vhost->crq.q_lock); vhost->state = IBMVFC_NO_CRQ; vhost->logged_in = 0; vhost->do_enquiry = 1; vhost->using_channels = 0; /* Clean out the queue */ memset(crq->msgs.crq, 0, PAGE_SIZE); crq->cur = 0; /* And re-open it again */ rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, crq->msg_token, PAGE_SIZE); if (rc == H_CLOSED) /* Adapter is good, but other end is not ready */ dev_warn(vhost->dev, "Partner adapter not ready\n"); else if (rc != 0) dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc); spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); ibmvfc_reg_sub_crqs(vhost); return rc; } /** * ibmvfc_valid_event - Determines if event is valid. * @pool: event_pool that contains the event * @evt: ibmvfc event to be checked for validity * * Return value: * 1 if event is valid / 0 if event is not valid **/ static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool, struct ibmvfc_event *evt) { int index = evt - pool->events; if (index < 0 || index >= pool->size) /* outside of bounds */ return 0; if (evt != pool->events + index) /* unaligned */ return 0; return 1; } /** * ibmvfc_free_event - Free the specified event * @evt: ibmvfc_event to be freed * **/ static void ibmvfc_free_event(struct ibmvfc_event *evt) { struct ibmvfc_event_pool *pool = &evt->queue->evt_pool; unsigned long flags; BUG_ON(!ibmvfc_valid_event(pool, evt)); BUG_ON(atomic_inc_return(&evt->free) != 1); BUG_ON(atomic_dec_and_test(&evt->active)); spin_lock_irqsave(&evt->queue->l_lock, flags); list_add_tail(&evt->queue_list, &evt->queue->free); if (evt->eh_comp) complete(evt->eh_comp); spin_unlock_irqrestore(&evt->queue->l_lock, flags); } /** * ibmvfc_scsi_eh_done - EH done function for queuecommand commands * @evt: ibmvfc event struct * * This function does not setup any error status, that must be done * before this function gets called. **/ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt) { struct scsi_cmnd *cmnd = evt->cmnd; if (cmnd) { scsi_dma_unmap(cmnd); scsi_done(cmnd); } ibmvfc_free_event(evt); } /** * ibmvfc_complete_purge - Complete failed command list * @purge_list: list head of failed commands * * This function runs completions on commands to fail as a result of a * host reset or platform migration. **/ static void ibmvfc_complete_purge(struct list_head *purge_list) { struct ibmvfc_event *evt, *pos; list_for_each_entry_safe(evt, pos, purge_list, queue_list) { list_del(&evt->queue_list); ibmvfc_trc_end(evt); evt->done(evt); } } /** * ibmvfc_fail_request - Fail request with specified error code * @evt: ibmvfc event struct * @error_code: error code to fail request with * * Return value: * none **/ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) { /* * Anything we are failing should still be active. Otherwise, it * implies we already got a response for the command and are doing * something bad like double completing it. */ BUG_ON(!atomic_dec_and_test(&evt->active)); if (evt->cmnd) { evt->cmnd->result = (error_code << 16); evt->done = ibmvfc_scsi_eh_done; } else evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED); del_timer(&evt->timer); } /** * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests * @vhost: ibmvfc host struct * @error_code: error code to fail requests with * * Return value: * none **/ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code) { struct ibmvfc_event *evt, *pos; struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs; unsigned long flags; int hwqs = 0; int i; if (vhost->using_channels) hwqs = vhost->scsi_scrqs.active_queues; ibmvfc_dbg(vhost, "Purging all requests\n"); spin_lock_irqsave(&vhost->crq.l_lock, flags); list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list) ibmvfc_fail_request(evt, error_code); list_splice_init(&vhost->crq.sent, &vhost->purge); spin_unlock_irqrestore(&vhost->crq.l_lock, flags); for (i = 0; i < hwqs; i++) { spin_lock_irqsave(queues[i].q_lock, flags); spin_lock(&queues[i].l_lock); list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list) ibmvfc_fail_request(evt, error_code); list_splice_init(&queues[i].sent, &vhost->purge); spin_unlock(&queues[i].l_lock); spin_unlock_irqrestore(queues[i].q_lock, flags); } } /** * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ * @vhost: struct ibmvfc host to reset **/ static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost) { ibmvfc_purge_requests(vhost, DID_ERROR); ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET); } /** * __ibmvfc_reset_host - Reset the connection to the server (no locking) * @vhost: struct ibmvfc host to reset **/ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) { if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO); vhost->job_step = ibmvfc_npiv_logout; wake_up(&vhost->work_wait_q); } else ibmvfc_hard_reset_host(vhost); } /** * ibmvfc_reset_host - Reset the connection to the server * @vhost: ibmvfc host struct **/ static void ibmvfc_reset_host(struct ibmvfc_host *vhost) { unsigned long flags; spin_lock_irqsave(vhost->host->host_lock, flags); __ibmvfc_reset_host(vhost); spin_unlock_irqrestore(vhost->host->host_lock, flags); } /** * ibmvfc_retry_host_init - Retry host initialization if allowed * @vhost: ibmvfc host struct * * Returns: 1 if init will be retried / 0 if not * **/ static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost) { int retry = 0; if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { vhost->delay_init = 1; if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { dev_err(vhost->dev, "Host initialization retries exceeded. Taking adapter offline\n"); ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) __ibmvfc_reset_host(vhost); else { ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); retry = 1; } } wake_up(&vhost->work_wait_q); return retry; } /** * __ibmvfc_get_target - Find the specified scsi_target (no locking) * @starget: scsi target struct * * Return value: * ibmvfc_target struct / NULL if not found **/ static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ibmvfc_host *vhost = shost_priv(shost); struct ibmvfc_target *tgt; list_for_each_entry(tgt, &vhost->targets, queue) if (tgt->target_id == starget->id) { kref_get(&tgt->kref); return tgt; } return NULL; } /** * ibmvfc_get_target - Find the specified scsi_target * @starget: scsi target struct * * Return value: * ibmvfc_target struct / NULL if not found **/ static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ibmvfc_target *tgt; unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); tgt = __ibmvfc_get_target(starget); spin_unlock_irqrestore(shost->host_lock, flags); return tgt; } /** * ibmvfc_get_host_speed - Get host port speed * @shost: scsi host struct * * Return value: * none **/ static void ibmvfc_get_host_speed(struct Scsi_Host *shost) { struct ibmvfc_host *vhost = shost_priv(shost); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); if (vhost->state == IBMVFC_ACTIVE) { switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) { case 1: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; break; case 2: fc_host_speed(shost) = FC_PORTSPEED_2GBIT; break; case 4: fc_host_speed(shost) = FC_PORTSPEED_4GBIT; break; case 8: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; case 10: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; case 16: fc_host_speed(shost) = FC_PORTSPEED_16GBIT; break; default: ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n", be64_to_cpu(vhost->login_buf->resp.link_speed) / 100); fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } else fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; spin_unlock_irqrestore(shost->host_lock, flags); } /** * ibmvfc_get_host_port_state - Get host port state * @shost: scsi host struct * * Return value: * none **/ static void ibmvfc_get_host_port_state(struct Scsi_Host *shost) { struct ibmvfc_host *vhost = shost_priv(shost); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); switch (vhost->state) { case IBMVFC_INITIALIZING: case IBMVFC_ACTIVE: fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; break; case IBMVFC_LINK_DOWN: fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; break; case IBMVFC_LINK_DEAD: case IBMVFC_HOST_OFFLINE: fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; break; case IBMVFC_HALTED: fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED; break; case IBMVFC_NO_CRQ: fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; break; default: ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state); fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; break; } spin_unlock_irqrestore(shost->host_lock, flags); } /** * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout * @rport: rport struct * @timeout: timeout value * * Return value: * none **/ static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) { if (timeout) rport->dev_loss_tmo = timeout; else rport->dev_loss_tmo = 1; } /** * ibmvfc_release_tgt - Free memory allocated for a target * @kref: kref struct * **/ static void ibmvfc_release_tgt(struct kref *kref) { struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref); kfree(tgt); } /** * ibmvfc_get_starget_node_name - Get SCSI target's node name * @starget: scsi target struct * * Return value: * none **/ static void ibmvfc_get_starget_node_name(struct scsi_target *starget) { struct ibmvfc_target *tgt = ibmvfc_get_target(starget); fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0; if (tgt) kref_put(&tgt->kref, ibmvfc_release_tgt); } /** * ibmvfc_get_starget_port_name - Get SCSI target's port name * @starget: scsi target struct * * Return value: * none **/ static void ibmvfc_get_starget_port_name(struct scsi_target *starget) { struct ibmvfc_target *tgt = ibmvfc_get_target(starget); fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0; if (tgt) kref_put(&tgt->kref, ibmvfc_release_tgt); } /** * ibmvfc_get_starget_port_id - Get SCSI target's port ID * @starget: scsi target struct * * Return value: * none **/ static void ibmvfc_get_starget_port_id(struct scsi_target *starget) { struct ibmvfc_target *tgt = ibmvfc_get_target(starget); fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1; if (tgt) kref_put(&tgt->kref, ibmvfc_release_tgt); } /** * ibmvfc_wait_while_resetting - Wait while the host resets * @vhost: ibmvfc host struct * * Return value: * 0 on success / other on failure **/ static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost) { long timeout = wait_event_timeout(vhost->init_wait_q, ((vhost->state == IBMVFC_ACTIVE || vhost->state == IBMVFC_HOST_OFFLINE || vhost->state == IBMVFC_LINK_DEAD) && vhost->action == IBMVFC_HOST_ACTION_NONE), (init_timeout * HZ)); return timeout ? 0 : -EIO; } /** * ibmvfc_issue_fc_host_lip - Re-initiate link initialization * @shost: scsi host struct * * Return value: * 0 on success / other on failure **/ static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost) { struct ibmvfc_host *vhost = shost_priv(shost); dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n"); ibmvfc_reset_host(vhost); return ibmvfc_wait_while_resetting(vhost); } /** * ibmvfc_gather_partition_info - Gather info about the LPAR * @vhost: ibmvfc host struct * * Return value: * none **/ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost) { struct device_node *rootdn; const char *name; const unsigned int *num; rootdn = of_find_node_by_path("/"); if (!rootdn) return; name = of_get_property(rootdn, "ibm,partition-name", NULL); if (name) strncpy(vhost->partition_name, name, sizeof(vhost->partition_name)); num = of_get_property(rootdn, "ibm,partition-no", NULL); if (num) vhost->partition_number = *num; of_node_put(rootdn); } /** * ibmvfc_set_login_info - Setup info for NPIV login * @vhost: ibmvfc host struct * * Return value: * none **/ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) { struct ibmvfc_npiv_login *login_info = &vhost->login_info; struct ibmvfc_queue *async_crq = &vhost->async_crq; struct device_node *of_node = vhost->dev->of_node; const char *location; memset(login_info, 0, sizeof(*login_info)); login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX); login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9); login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu)); login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp)); login_info->partition_num = cpu_to_be32(vhost->partition_number); login_info->vfc_frame_version = cpu_to_be32(1); login_info->fcp_version = cpu_to_be16(3); login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT); if (vhost->client_migrated) login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED); login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ); login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN); if (vhost->mq_enabled || vhost->using_channels) login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS); login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token); login_info->async.len = cpu_to_be32(async_crq->size * sizeof(*async_crq->msgs.async)); strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME); strncpy(login_info->device_name, dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME); location = of_get_property(of_node, "ibm,loc-code", NULL); location = location ? location : dev_name(vhost->dev); strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME); } /** * ibmvfc_get_event - Gets the next free event in pool * @queue: ibmvfc queue struct * * Returns a free event from the pool. **/ static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue) { struct ibmvfc_event *evt; unsigned long flags; spin_lock_irqsave(&queue->l_lock, flags); BUG_ON(list_empty(&queue->free)); evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list); atomic_set(&evt->free, 0); list_del(&evt->queue_list); spin_unlock_irqrestore(&queue->l_lock, flags); return evt; } /** * ibmvfc_locked_done - Calls evt completion with host_lock held * @evt: ibmvfc evt to complete * * All non-scsi command completion callbacks have the expectation that the * host_lock is held. This callback is used by ibmvfc_init_event to wrap a * MAD evt with the host_lock. **/ static void ibmvfc_locked_done(struct ibmvfc_event *evt) { unsigned long flags; spin_lock_irqsave(evt->vhost->host->host_lock, flags); evt->_done(evt); spin_unlock_irqrestore(evt->vhost->host->host_lock, flags); } /** * ibmvfc_init_event - Initialize fields in an event struct that are always * required. * @evt: The event * @done: Routine to call when the event is responded to * @format: SRP or MAD format **/ static void ibmvfc_init_event(struct ibmvfc_event *evt, void (*done) (struct ibmvfc_event *), u8 format) { evt->cmnd = NULL; evt->sync_iu = NULL; evt->eh_comp = NULL; evt->crq.format = format; if (format == IBMVFC_CMD_FORMAT) evt->done = done; else { evt->_done = done; evt->done = ibmvfc_locked_done; } evt->hwq = 0; } /** * ibmvfc_map_sg_list - Initialize scatterlist * @scmd: scsi command struct * @nseg: number of scatterlist segments * @md: memory descriptor list to initialize **/ static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg, struct srp_direct_buf *md) { int i; struct scatterlist *sg; scsi_for_each_sg(scmd, sg, nseg, i) { md[i].va = cpu_to_be64(sg_dma_address(sg)); md[i].len = cpu_to_be32(sg_dma_len(sg)); md[i].key = 0; } } /** * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields * @scmd: struct scsi_cmnd with the scatterlist * @evt: ibmvfc event struct * @vfc_cmd: vfc_cmd that contains the memory descriptor * @dev: device for which to map dma memory * * Returns: * 0 on success / non-zero on failure **/ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, struct ibmvfc_event *evt, struct ibmvfc_cmd *vfc_cmd, struct device *dev) { int sg_mapped; struct srp_direct_buf *data = &vfc_cmd->ioba; struct ibmvfc_host *vhost = dev_get_drvdata(dev); struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd); if (cls3_error) vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR); sg_mapped = scsi_dma_map(scmd); if (!sg_mapped) { vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC); return 0; } else if (unlikely(sg_mapped < 0)) { if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n"); return sg_mapped; } if (scmd->sc_data_direction == DMA_TO_DEVICE) { vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE); iu->add_cdb_len |= IBMVFC_WRDATA; } else { vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ); iu->add_cdb_len |= IBMVFC_RDDATA; } if (sg_mapped == 1) { ibmvfc_map_sg_list(scmd, sg_mapped, data); return 0; } vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST); if (!evt->ext_list) { evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC, &evt->ext_list_token); if (!evt->ext_list) { scsi_dma_unmap(scmd); if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n"); return -ENOMEM; } } ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list); data->va = cpu_to_be64(evt->ext_list_token); data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf)); data->key = 0; return 0; } /** * ibmvfc_timeout - Internal command timeout handler * @t: struct ibmvfc_event that timed out * * Called when an internally generated command times out **/ static void ibmvfc_timeout(struct timer_list *t) { struct ibmvfc_event *evt = from_timer(evt, t, timer); struct ibmvfc_host *vhost = evt->vhost; dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt); ibmvfc_reset_host(vhost); } /** * ibmvfc_send_event - Transforms event to u64 array and calls send_crq() * @evt: event to be sent * @vhost: ibmvfc host struct * @timeout: timeout in seconds - 0 means do not time command * * Returns the value returned from ibmvfc_send_crq(). (Zero for success) **/ static int ibmvfc_send_event(struct ibmvfc_event *evt, struct ibmvfc_host *vhost, unsigned long timeout) { __be64 *crq_as_u64 = (__be64 *) &evt->crq; unsigned long flags; int rc; /* Copy the IU into the transfer area */ *evt->xfer_iu = evt->iu; if (evt->crq.format == IBMVFC_CMD_FORMAT) evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt); else if (evt->crq.format == IBMVFC_MAD_FORMAT) evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt); else BUG(); timer_setup(&evt->timer, ibmvfc_timeout, 0); if (timeout) { evt->timer.expires = jiffies + (timeout * HZ); add_timer(&evt->timer); } spin_lock_irqsave(&evt->queue->l_lock, flags); list_add_tail(&evt->queue_list, &evt->queue->sent); atomic_set(&evt->active, 1); mb(); if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT) rc = ibmvfc_send_sub_crq(vhost, evt->queue->vios_cookie, be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1]), 0, 0); else rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1])); if (rc) { atomic_set(&evt->active, 0); list_del(&evt->queue_list); spin_unlock_irqrestore(&evt->queue->l_lock, flags); del_timer(&evt->timer); /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. * Firmware will send a CRQ with a transport event (0xFF) to * tell this client what has happened to the transport. This * will be handled in ibmvfc_handle_crq() */ if (rc == H_CLOSED) { if (printk_ratelimit()) dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n"); if (evt->cmnd) scsi_dma_unmap(evt->cmnd); ibmvfc_free_event(evt); return SCSI_MLQUEUE_HOST_BUSY; } dev_err(vhost->dev, "Send error (rc=%d)\n", rc); if (evt->cmnd) { evt->cmnd->result = DID_ERROR << 16; evt->done = ibmvfc_scsi_eh_done; } else evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR); evt->done(evt); } else { spin_unlock_irqrestore(&evt->queue->l_lock, flags); ibmvfc_trc_start(evt); } return 0; } /** * ibmvfc_log_error - Log an error for the failed command if appropriate * @evt: ibmvfc event to log * **/ static void ibmvfc_log_error(struct ibmvfc_event *evt) { struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); struct scsi_cmnd *cmnd = evt->cmnd; const char *err = unknown_error; int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error)); int logerr = 0; int rsp_code = 0; if (index >= 0) { logerr = cmd_status[index].log; err = cmd_status[index].name; } if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1))) return; if (rsp->flags & FCP_RSP_LEN_VALID) rsp_code = rsp->data.info.rsp_code; scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) " "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n", cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error), rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status); } /** * ibmvfc_relogin - Log back into the specified device * @sdev: scsi device struct * **/ static void ibmvfc_relogin(struct scsi_device *sdev) { struct ibmvfc_host *vhost = shost_priv(sdev->host); struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct ibmvfc_target *tgt; unsigned long flags; spin_lock_irqsave(vhost->host->host_lock, flags); list_for_each_entry(tgt, &vhost->targets, queue) { if (rport == tgt->rport) { ibmvfc_del_tgt(tgt); break; } } ibmvfc_reinit_host(vhost); spin_unlock_irqrestore(vhost->host->host_lock, flags); } /** * ibmvfc_scsi_done - Handle responses from commands * @evt: ibmvfc event to be handled * * Used as a callback when sending scsi cmds. **/ static void ibmvfc_scsi_done(struct ibmvfc_event *evt) { struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd); struct scsi_cmnd *cmnd = evt->cmnd; u32 rsp_len = 0; u32 sense_len = be32_to_cpu(rsp->fcp_sense_len); if (cmnd) { if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID) scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid)); else if (rsp->flags & FCP_RESID_UNDER) scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid)); else scsi_set_resid(cmnd, 0); if (vfc_cmd->status) { cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd); if (rsp->flags & FCP_RSP_LEN_VALID) rsp_len = be32_to_cpu(rsp->fcp_rsp_len); if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE) sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len; if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) && (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED)) ibmvfc_relogin(cmnd->device); if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) cmnd->result = (DID_ERROR << 16); ibmvfc_log_error(evt); } if (!cmnd->result && (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow)) cmnd->result = (DID_ERROR << 16); scsi_dma_unmap(cmnd); scsi_done(cmnd); } ibmvfc_free_event(evt); } /** * ibmvfc_host_chkready - Check if the host can accept commands * @vhost: struct ibmvfc host * * Returns: * 1 if host can accept command / 0 if not **/ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost) { int result = 0; switch (vhost->state) { case IBMVFC_LINK_DEAD: case IBMVFC_HOST_OFFLINE: result = DID_NO_CONNECT << 16; break; case IBMVFC_NO_CRQ: case IBMVFC_INITIALIZING: case IBMVFC_HALTED: case IBMVFC_LINK_DOWN: result = DID_REQUEUE << 16; break; case IBMVFC_ACTIVE: result = 0; break; } return result; } static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd; struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); size_t offset; memset(vfc_cmd, 0, sizeof(*vfc_cmd)); if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { offset = offsetof(struct ibmvfc_cmd, v2.rsp); vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name); } else offset = offsetof(struct ibmvfc_cmd, v1.rsp); vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset); vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp)); vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE); vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu)); vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp)); vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata); vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id); int_to_scsilun(sdev->lun, &iu->lun); return vfc_cmd; } /** * ibmvfc_queuecommand - The queuecommand function of the scsi template * @shost: scsi host struct * @cmnd: struct scsi_cmnd to be executed * * Returns: * 0 on success / other on failure **/ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) { struct ibmvfc_host *vhost = shost_priv(shost); struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); struct ibmvfc_cmd *vfc_cmd; struct ibmvfc_fcp_cmd_iu *iu; struct ibmvfc_event *evt; u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq); u16 scsi_channel; int rc; if (unlikely((rc = fc_remote_port_chkready(rport))) || unlikely((rc = ibmvfc_host_chkready(vhost)))) { cmnd->result = rc; scsi_done(cmnd); return 0; } cmnd->result = (DID_OK << 16); if (vhost->using_channels) { scsi_channel = hwq % vhost->scsi_scrqs.active_queues; evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]); evt->hwq = hwq % vhost->scsi_scrqs.active_queues; } else evt = ibmvfc_get_event(&vhost->crq); ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT); evt->cmnd = cmnd; vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device); iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd)); memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len); if (cmnd->flags & SCMD_TAGGED) { vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag); iu->pri_task_attr = IBMVFC_SIMPLE_TASK; } vfc_cmd->correlation = cpu_to_be64((u64)evt); if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev)))) return ibmvfc_send_event(evt, vhost, 0); ibmvfc_free_event(evt); if (rc == -ENOMEM) return SCSI_MLQUEUE_HOST_BUSY; if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) scmd_printk(KERN_ERR, cmnd, "Failed to map DMA buffer for command. rc=%d\n", rc); cmnd->result = DID_ERROR << 16; scsi_done(cmnd); return 0; } /** * ibmvfc_sync_completion - Signal that a synchronous command has completed * @evt: ibmvfc event struct * **/ static void ibmvfc_sync_completion(struct ibmvfc_event *evt) { /* copy the response back */ if (evt->sync_iu) *evt->sync_iu = *evt->xfer_iu; complete(&evt->comp); } /** * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands * @evt: struct ibmvfc_event * **/ static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt) { struct ibmvfc_host *vhost = evt->vhost; ibmvfc_free_event(evt); vhost->aborting_passthru = 0; dev_info(vhost->dev, "Passthru command cancelled\n"); } /** * ibmvfc_bsg_timeout - Handle a BSG timeout * @job: struct bsg_job that timed out * * Returns: * 0 on success / other on failure **/ static int ibmvfc_bsg_timeout(struct bsg_job *job) { struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job)); unsigned long port_id = (unsigned long)job->dd_data; struct ibmvfc_event *evt; struct ibmvfc_tmf *tmf; unsigned long flags; int rc; ENTER; spin_lock_irqsave(vhost->host->host_lock, flags); if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) { __ibmvfc_reset_host(vhost); spin_unlock_irqrestore(vhost->host->host_lock, flags); return 0; } vhost->aborting_passthru = 1; evt = ibmvfc_get_event(&vhost->crq); ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT); tmf = &evt->iu.tmf; memset(tmf, 0, sizeof(*tmf)); tmf->common.version = cpu_to_be32(1); tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD); tmf->common.length = cpu_to_be16(sizeof(*tmf)); tmf->scsi_id = cpu_to_be64(port_id); tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY); tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY); rc = ibmvfc_send_event(evt, vhost, default_timeout); if (rc != 0) { vhost->aborting_passthru = 0; dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc); rc = -EIO; } else dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n", port_id); spin_unlock_irqrestore(vhost->host->host_lock, flags); LEAVE; return rc; } /** * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command * @vhost: struct ibmvfc_host to send command * @port_id: port ID to send command * * Returns: * 0 on success / other on failure **/ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id) { struct ibmvfc_port_login *plogi; struct ibmvfc_target *tgt; struct ibmvfc_event *evt; union ibmvfc_iu rsp_iu; unsigned long flags; int rc = 0, issue_login = 1; ENTER; spin_lock_irqsave(vhost->host->host_lock, flags); list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->scsi_id == port_id) { issue_login = 0; break; } } if (!issue_login) goto unlock_out; if (unlikely((rc = ibmvfc_host_chkready(vhost)))) goto unlock_out; evt = ibmvfc_get_event(&vhost->crq); ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); plogi = &evt->iu.plogi; memset(plogi, 0, sizeof(*plogi)); plogi->common.version = cpu_to_be32(1); plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN); plogi->common.length = cpu_to_be16(sizeof(*plogi)); plogi->scsi_id = cpu_to_be64(port_id); evt->sync_iu = &rsp_iu; init_completion(&evt->comp); rc = ibmvfc_send_event(evt, vhost, default_timeout); spin_unlock_irqrestore(vhost->host->host_lock, flags); if (rc) return -EIO; wait_for_completion(&evt->comp); if (rsp_iu.plogi.common.status) rc = -EIO; spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_free_event(evt); unlock_out: spin_unlock_irqrestore(vhost->host->host_lock, flags); LEAVE; return rc; } /** * ibmvfc_bsg_request - Handle a BSG request * @job: struct bsg_job to be executed * * Returns: * 0 on success / other on failure **/ static int ibmvfc_bsg_request(struct bsg_job *job) { struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job)); struct fc_rport *rport = fc_bsg_to_rport(job); struct ibmvfc_passthru_mad *mad; struct ibmvfc_event *evt; union ibmvfc_iu rsp_iu; unsigned long flags, port_id = -1; struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; unsigned int code = bsg_request->msgcode; int rc = 0, req_seg, rsp_seg, issue_login = 0; u32 fc_flags, rsp_len; ENTER; bsg_reply->reply_payload_rcv_len = 0; if (rport) port_id = rport->port_id; switch (code) { case FC_BSG_HST_ELS_NOLOGIN: port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) | (bsg_request->rqst_data.h_els.port_id[1] << 8) | bsg_request->rqst_data.h_els.port_id[2]; fallthrough; case FC_BSG_RPT_ELS: fc_flags = IBMVFC_FC_ELS; break; case FC_BSG_HST_CT: issue_login = 1; port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) | (bsg_request->rqst_data.h_ct.port_id[1] << 8) | bsg_request->rqst_data.h_ct.port_id[2]; fallthrough; case FC_BSG_RPT_CT: fc_flags = IBMVFC_FC_CT_IU; break; default: return -ENOTSUPP; } if (port_id == -1) return -EINVAL; if (!mutex_trylock(&vhost->passthru_mutex)) return -EBUSY; job->dd_data = (void *)port_id; req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list, job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!req_seg) { mutex_unlock(&vhost->passthru_mutex); return -ENOMEM; } rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list, job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if (!rsp_seg) { dma_unmap_sg(vhost->dev, job->request_payload.sg_list, job->request_payload.sg_cnt, DMA_TO_DEVICE); mutex_unlock(&vhost->passthru_mutex); return -ENOMEM; } if (req_seg > 1 || rsp_seg > 1) { rc = -EINVAL; goto out; } if (issue_login) rc = ibmvfc_bsg_plogi(vhost, port_id); spin_lock_irqsave(vhost->host->host_lock, flags); if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) || unlikely((rc = ibmvfc_host_chkready(vhost)))) { spin_unlock_irqrestore(vhost->host->host_lock, flags); goto out; } evt = ibmvfc_get_event(&vhost->crq); ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); mad = &evt->iu.passthru; memset(mad, 0, sizeof(*mad)); mad->common.version = cpu_to_be32(1); mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU); mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu)); mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_passthru_mad, iu)); mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu)); mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len); mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len); mad->iu.flags = cpu_to_be32(fc_flags); mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY); mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list)); mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list)); mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list)); mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list)); mad->iu.scsi_id = cpu_to_be64(port_id); mad->iu.tag = cpu_to_be64((u64)evt); rsp_len = be32_to_cpu(mad->iu.rsp.len); evt->sync_iu = &rsp_iu; init_completion(&evt->comp); rc = ibmvfc_send_event(evt, vhost, 0); spin_unlock_irqrestore(vhost->host->host_lock, flags); if (rc) { rc = -EIO; goto out; } wait_for_completion(&evt->comp); if (rsp_iu.passthru.common.status) rc = -EIO; else bsg_reply->reply_payload_rcv_len = rsp_len; spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_free_event(evt); spin_unlock_irqrestore(vhost->host->host_lock, flags); bsg_reply->result = rc; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); rc = 0; out: dma_unmap_sg(vhost->dev, job->request_payload.sg_list, job->request_payload.sg_cnt, DMA_TO_DEVICE); dma_unmap_sg(vhost->dev, job->reply_payload.sg_list, job->reply_payload.sg_cnt, DMA_FROM_DEVICE); mutex_unlock(&vhost->passthru_mutex); LEAVE; return rc; } /** * ibmvfc_reset_device - Reset the device with the specified reset type * @sdev: scsi device to reset * @type: reset type * @desc: reset type description for log messages * * Returns: * 0 on success / other on failure **/ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) { struct ibmvfc_host *vhost = shost_priv(sdev->host); struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct ibmvfc_cmd *tmf; struct ibmvfc_event *evt = NULL; union ibmvfc_iu rsp_iu; struct ibmvfc_fcp_cmd_iu *iu; struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd); int rsp_rc = -EBUSY; unsigned long flags; int rsp_code = 0; spin_lock_irqsave(vhost->host->host_lock, flags); if (vhost->state == IBMVFC_ACTIVE) { if (vhost->using_channels) evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]); else evt = ibmvfc_get_event(&vhost->crq); ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT); tmf = ibmvfc_init_vfc_cmd(evt, sdev); iu = ibmvfc_get_fcp_iu(vhost, tmf); tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF)); if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) tmf->target_wwpn = cpu_to_be64(rport->port_name); iu->tmf_flags = type; evt->sync_iu = &rsp_iu; init_completion(&evt->comp); rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); } spin_unlock_irqrestore(vhost->host->host_lock, flags); if (rsp_rc != 0) { sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n", desc, rsp_rc); return -EIO; } sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc); wait_for_completion(&evt->comp); if (rsp_iu.cmd.status) rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd); if (rsp_code) { if (fc_rsp->flags & FCP_RSP_LEN_VALID) rsp_code = fc_rsp->data.info.rsp_code; sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) " "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc, ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code, fc_rsp->scsi_status); rsp_rc = -EIO; } else sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc); spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_free_event(evt); spin_unlock_irqrestore(vhost->host->host_lock, flags); return rsp_rc; } /** * ibmvfc_match_rport - Match function for specified remote port * @evt: ibmvfc event struct * @rport: device to match * * Returns: * 1 if event matches rport / 0 if event does not match rport **/ static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport) { struct fc_rport *cmd_rport; if (evt->cmnd) { cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device)); if (cmd_rport == rport) return 1; } return 0; } /** * ibmvfc_match_target - Match function for specified target * @evt: ibmvfc event struct * @device: device to match (starget) * * Returns: * 1 if event matches starget / 0 if event does not match starget **/ static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device) { if (evt->cmnd && scsi_target(evt->cmnd->device) == device) return 1; return 0; } /** * ibmvfc_match_lun - Match function for specified LUN * @evt: ibmvfc event struct * @device: device to match (sdev) * * Returns: * 1 if event matches sdev / 0 if event does not match sdev **/ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device) { if (evt->cmnd && evt->cmnd->device == device) return 1; return 0; } /** * ibmvfc_event_is_free - Check if event is free or not * @evt: ibmvfc event struct * * Returns: * true / false **/ static bool ibmvfc_event_is_free(struct ibmvfc_event *evt) { struct ibmvfc_event *loop_evt; list_for_each_entry(loop_evt, &evt->queue->free, queue_list) if (loop_evt == evt) return true; return false; } /** * ibmvfc_wait_for_ops - Wait for ops to complete * @vhost: ibmvfc host struct * @device: device to match (starget or sdev) * @match: match function * * Returns: * SUCCESS / FAILED **/ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device, int (*match) (struct ibmvfc_event *, void *)) { struct ibmvfc_event *evt; DECLARE_COMPLETION_ONSTACK(comp); int wait, i, q_index, q_size; unsigned long flags; signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ; struct ibmvfc_queue *queues; ENTER; if (vhost->mq_enabled && vhost->using_channels) { queues = vhost->scsi_scrqs.scrqs; q_size = vhost->scsi_scrqs.active_queues; } else { queues = &vhost->crq; q_size = 1; } do { wait = 0; spin_lock_irqsave(vhost->host->host_lock, flags); for (q_index = 0; q_index < q_size; q_index++) { spin_lock(&queues[q_index].l_lock); for (i = 0; i < queues[q_index].evt_pool.size; i++) { evt = &queues[q_index].evt_pool.events[i]; if (!ibmvfc_event_is_free(evt)) { if (match(evt, device)) { evt->eh_comp = &comp; wait++; } } } spin_unlock(&queues[q_index].l_lock); } spin_unlock_irqrestore(vhost->host->host_lock, flags); if (wait) { timeout = wait_for_completion_timeout(&comp, timeout); if (!timeout) { wait = 0; spin_lock_irqsave(vhost->host->host_lock, flags); for (q_index = 0; q_index < q_size; q_index++) { spin_lock(&queues[q_index].l_lock); for (i = 0; i < queues[q_index].evt_pool.size; i++) { evt = &queues[q_index].evt_pool.events[i]; if (!ibmvfc_event_is_free(evt)) { if (match(evt, device)) { evt->eh_comp = NULL; wait++; } } } spin_unlock(&queues[q_index].l_lock); } spin_unlock_irqrestore(vhost->host->host_lock, flags); if (wait) dev_err(vhost->dev, "Timed out waiting for aborted commands\n"); LEAVE; return wait ? FAILED : SUCCESS; } } } while (wait); LEAVE; return SUCCESS; } static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue, struct scsi_device *sdev, int type) { struct ibmvfc_host *vhost = shost_priv(sdev->host); struct scsi_target *starget = scsi_target(sdev); struct fc_rport *rport = starget_to_rport(starget); struct ibmvfc_event *evt; struct ibmvfc_tmf *tmf; evt = ibmvfc_get_event(queue); ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); tmf = &evt->iu.tmf; memset(tmf, 0, sizeof(*tmf)); if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { tmf->common.version = cpu_to_be32(2); tmf->target_wwpn = cpu_to_be64(rport->port_name); } else { tmf->common.version = cpu_to_be32(1); } tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD); tmf->common.length = cpu_to_be16(sizeof(*tmf)); tmf->scsi_id = cpu_to_be64(rport->port_id); int_to_scsilun(sdev->lun, &tmf->lun); if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS)) type &= ~IBMVFC_TMF_SUPPRESS_ABTS; if (vhost->state == IBMVFC_ACTIVE) tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID)); else tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID)); tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata); tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata); init_completion(&evt->comp); return evt; } static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type) { struct ibmvfc_host *vhost = shost_priv(sdev->host); struct ibmvfc_event *evt, *found_evt, *temp; struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs; unsigned long flags; int num_hwq, i; int fail = 0; LIST_HEAD(cancelq); u16 status; ENTER; spin_lock_irqsave(vhost->host->host_lock, flags); num_hwq = vhost->scsi_scrqs.active_queues; for (i = 0; i < num_hwq; i++) { spin_lock(queues[i].q_lock); spin_lock(&queues[i].l_lock); found_evt = NULL; list_for_each_entry(evt, &queues[i].sent, queue_list) { if (evt->cmnd && evt->cmnd->device == sdev) { found_evt = evt; break; } } spin_unlock(&queues[i].l_lock); if (found_evt && vhost->logged_in) { evt = ibmvfc_init_tmf(&queues[i], sdev, type); evt->sync_iu = &queues[i].cancel_rsp; ibmvfc_send_event(evt, vhost, default_timeout); list_add_tail(&evt->cancel, &cancelq); } spin_unlock(queues[i].q_lock); } spin_unlock_irqrestore(vhost->host->host_lock, flags); if (list_empty(&cancelq)) { if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) sdev_printk(KERN_INFO, sdev, "No events found to cancel\n"); return 0; } sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); list_for_each_entry_safe(evt, temp, &cancelq, cancel) { wait_for_completion(&evt->comp); status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status); list_del(&evt->cancel); ibmvfc_free_event(evt); if (status != IBMVFC_MAD_SUCCESS) { sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); switch (status) { case IBMVFC_MAD_DRIVER_FAILED: case IBMVFC_MAD_CRQ_ERROR: /* Host adapter most likely going through reset, return success to * the caller will wait for the command being cancelled to get returned */ break; default: fail = 1; break; } } } if (fail) return -EIO; sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); LEAVE; return 0; } static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type) { struct ibmvfc_host *vhost = shost_priv(sdev->host); struct ibmvfc_event *evt, *found_evt; union ibmvfc_iu rsp; int rsp_rc = -EBUSY; unsigned long flags; u16 status; ENTER; found_evt = NULL; spin_lock_irqsave(vhost->host->host_lock, flags); spin_lock(&vhost->crq.l_lock); list_for_each_entry(evt, &vhost->crq.sent, queue_list) { if (evt->cmnd && evt->cmnd->device == sdev) { found_evt = evt; break; } } spin_unlock(&vhost->crq.l_lock); if (!found_evt) { if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) sdev_printk(KERN_INFO, sdev, "No events found to cancel\n"); spin_unlock_irqrestore(vhost->host->host_lock, flags); return 0; } if (vhost->logged_in) { evt = ibmvfc_init_tmf(&vhost->crq, sdev, type); evt->sync_iu = &rsp; rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); } spin_unlock_irqrestore(vhost->host->host_lock, flags); if (rsp_rc != 0) { sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc); /* If failure is received, the host adapter is most likely going through reset, return success so the caller will wait for the command being cancelled to get returned */ return 0; } sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); wait_for_completion(&evt->comp); status = be16_to_cpu(rsp.mad_common.status); spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_free_event(evt); spin_unlock_irqrestore(vhost->host->host_lock, flags); if (status != IBMVFC_MAD_SUCCESS) { sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); switch (status) { case IBMVFC_MAD_DRIVER_FAILED: case IBMVFC_MAD_CRQ_ERROR: /* Host adapter most likely going through reset, return success to the caller will wait for the command being cancelled to get returned */ return 0; default: return -EIO; }; } sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); return 0; } /** * ibmvfc_cancel_all - Cancel all outstanding commands to the device * @sdev: scsi device to cancel commands * @type: type of error recovery being performed * * This sends a cancel to the VIOS for the specified device. This does * NOT send any abort to the actual device. That must be done separately. * * Returns: * 0 on success / other on failure **/ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) { struct ibmvfc_host *vhost = shost_priv(sdev->host); if (vhost->mq_enabled && vhost->using_channels) return ibmvfc_cancel_all_mq(sdev, type); else return ibmvfc_cancel_all_sq(sdev, type); } /** * ibmvfc_match_key - Match function for specified cancel key * @evt: ibmvfc event struct * @key: cancel key to match * * Returns: * 1 if event matches key / 0 if event does not match key **/ static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key) { unsigned long cancel_key = (unsigned long)key; if (evt->crq.format == IBMVFC_CMD_FORMAT && be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key) return 1; return 0; } /** * ibmvfc_match_evt - Match function for specified event * @evt: ibmvfc event struct * @match: event to match * * Returns: * 1 if event matches key / 0 if event does not match key **/ static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match) { if (evt == match) return 1; return 0; } /** * ibmvfc_abort_task_set - Abort outstanding commands to the device * @sdev: scsi device to abort commands * * This sends an Abort Task Set to the VIOS for the specified device. This does * NOT send any cancel to the VIOS. That must be done separately. * * Returns: * 0 on success / other on failure **/ static int ibmvfc_abort_task_set(struct scsi_device *sdev) { struct ibmvfc_host *vhost = shost_priv(sdev->host); struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct ibmvfc_cmd *tmf; struct ibmvfc_event *evt, *found_evt; union ibmvfc_iu rsp_iu; struct ibmvfc_fcp_cmd_iu *iu; struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd); int rc, rsp_rc = -EBUSY; unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT; int rsp_code = 0; found_evt = NULL; spin_lock_irqsave(vhost->host->host_lock, flags); spin_lock(&vhost->crq.l_lock); list_for_each_entry(evt, &vhost->crq.sent, queue_list) { if (evt->cmnd && evt->cmnd->device == sdev) { found_evt = evt; break; } } spin_unlock(&vhost->crq.l_lock); if (!found_evt) { if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) sdev_printk(KERN_INFO, sdev, "No events found to abort\n"); spin_unlock_irqrestore(vhost->host->host_lock, flags); return 0; } if (vhost->state == IBMVFC_ACTIVE) { evt = ibmvfc_get_event(&vhost->crq); ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT); tmf = ibmvfc_init_vfc_cmd(evt, sdev); iu = ibmvfc_get_fcp_iu(vhost, tmf); if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) tmf->target_wwpn = cpu_to_be64(rport->port_name); iu->tmf_flags = IBMVFC_ABORT_TASK_SET; tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF)); evt->sync_iu = &rsp_iu; tmf->correlation = cpu_to_be64((u64)evt); init_completion(&evt->comp); rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); } spin_unlock_irqrestore(vhost->host->host_lock, flags); if (rsp_rc != 0) { sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc); return -EIO; } sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n"); timeout = wait_for_completion_timeout(&evt->comp, timeout); if (!timeout) { rc = ibmvfc_cancel_all(sdev, 0); if (!rc) { rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); if (rc == SUCCESS) rc = 0; } if (rc) { sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n"); ibmvfc_reset_host(vhost); rsp_rc = -EIO; rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); if (rc == SUCCESS) rsp_rc = 0; rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt); if (rc != SUCCESS) { spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_hard_reset_host(vhost); spin_unlock_irqrestore(vhost->host->host_lock, flags); rsp_rc = 0; } goto out; } } if (rsp_iu.cmd.status) rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd); if (rsp_code) { if (fc_rsp->flags & FCP_RSP_LEN_VALID) rsp_code = fc_rsp->data.info.rsp_code; sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " "flags: %x fcp_rsp: %x, scsi_status: %x\n", ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code, fc_rsp->scsi_status); rsp_rc = -EIO; } else sdev_printk(KERN_INFO, sdev, "Abort successful\n"); out: spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_free_event(evt); spin_unlock_irqrestore(vhost->host->host_lock, flags); return rsp_rc; } /** * ibmvfc_eh_abort_handler - Abort a command * @cmd: scsi command to abort * * Returns: * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); int cancel_rc, block_rc; int rc = FAILED; ENTER; block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); if (block_rc != FAST_IO_FAIL) { cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); ibmvfc_abort_task_set(sdev); } else cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); if (!cancel_rc) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); if (block_rc == FAST_IO_FAIL && rc != FAILED) rc = FAST_IO_FAIL; LEAVE; return rc; } /** * ibmvfc_eh_device_reset_handler - Reset a single LUN * @cmd: scsi command struct * * Returns: * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); int cancel_rc, block_rc, reset_rc = 0; int rc = FAILED; ENTER; block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); if (block_rc != FAST_IO_FAIL) { cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); } else cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); if (block_rc == FAST_IO_FAIL && rc != FAILED) rc = FAST_IO_FAIL; LEAVE; return rc; } /** * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function * @sdev: scsi device struct * @data: return code * **/ static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data) { unsigned long *rc = data; *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); } /** * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function * @sdev: scsi device struct * @data: return code * **/ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data) { unsigned long *rc = data; *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET); } /** * ibmvfc_eh_target_reset_handler - Reset the target * @cmd: scsi command struct * * Returns: * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); struct scsi_target *starget = scsi_target(sdev); int block_rc; int reset_rc = 0; int rc = FAILED; unsigned long cancel_rc = 0; ENTER; block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); if (block_rc != FAST_IO_FAIL) { starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); } else starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target); if (block_rc == FAST_IO_FAIL && rc != FAILED) rc = FAST_IO_FAIL; LEAVE; return rc; } /** * ibmvfc_eh_host_reset_handler - Reset the connection to the server * @cmd: struct scsi_cmnd having problems * **/ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) { int rc; struct ibmvfc_host *vhost = shost_priv(cmd->device->host); dev_err(vhost->dev, "Resetting connection due to error recovery\n"); rc = ibmvfc_issue_fc_host_lip(vhost->host); return rc ? FAILED : SUCCESS; } /** * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport. * @rport: rport struct * * Return value: * none **/ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) { struct Scsi_Host *shost = rport_to_shost(rport); struct ibmvfc_host *vhost = shost_priv(shost); struct fc_rport *dev_rport; struct scsi_device *sdev; struct ibmvfc_target *tgt; unsigned long rc, flags; unsigned int found; ENTER; shost_for_each_device(sdev, shost) { dev_rport = starget_to_rport(scsi_target(sdev)); if (dev_rport != rport) continue; ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); } rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport); if (rc == FAILED) ibmvfc_issue_fc_host_lip(shost); spin_lock_irqsave(shost->host_lock, flags); found = 0; list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->scsi_id == rport->port_id) { found++; break; } } if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) { /* * If we get here, that means we previously attempted to send * an implicit logout to the target but it failed, most likely * due to I/O being pending, so we need to send it again */ ibmvfc_del_tgt(tgt); ibmvfc_reinit_host(vhost); } spin_unlock_irqrestore(shost->host_lock, flags); LEAVE; } static const struct ibmvfc_async_desc ae_desc [] = { { "PLOGI", IBMVFC_AE_ELS_PLOGI, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, { "LOGO", IBMVFC_AE_ELS_LOGO, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, { "PRLO", IBMVFC_AE_ELS_PRLO, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, { "N-Port SCN", IBMVFC_AE_SCN_NPORT, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, { "Group SCN", IBMVFC_AE_SCN_GROUP, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, { "Domain SCN", IBMVFC_AE_SCN_DOMAIN, IBMVFC_DEFAULT_LOG_LEVEL }, { "Fabric SCN", IBMVFC_AE_SCN_FABRIC, IBMVFC_DEFAULT_LOG_LEVEL }, { "Link Up", IBMVFC_AE_LINK_UP, IBMVFC_DEFAULT_LOG_LEVEL }, { "Link Down", IBMVFC_AE_LINK_DOWN, IBMVFC_DEFAULT_LOG_LEVEL }, { "Link Dead", IBMVFC_AE_LINK_DEAD, IBMVFC_DEFAULT_LOG_LEVEL }, { "Halt", IBMVFC_AE_HALT, IBMVFC_DEFAULT_LOG_LEVEL }, { "Resume", IBMVFC_AE_RESUME, IBMVFC_DEFAULT_LOG_LEVEL }, { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL }, }; static const struct ibmvfc_async_desc unknown_ae = { "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL }; /** * ibmvfc_get_ae_desc - Get text description for async event * @ae: async event * **/ static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae) { int i; for (i = 0; i < ARRAY_SIZE(ae_desc); i++) if (ae_desc[i].ae == ae) return &ae_desc[i]; return &unknown_ae; } static const struct { enum ibmvfc_ae_link_state state; const char *desc; } link_desc [] = { { IBMVFC_AE_LS_LINK_UP, " link up" }, { IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" }, { IBMVFC_AE_LS_LINK_DOWN, " link down" }, { IBMVFC_AE_LS_LINK_DEAD, " link dead" }, }; /** * ibmvfc_get_link_state - Get text description for link state * @state: link state * **/ static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state) { int i; for (i = 0; i < ARRAY_SIZE(link_desc); i++) if (link_desc[i].state == state) return link_desc[i].desc; return ""; } /** * ibmvfc_handle_async - Handle an async event from the adapter * @crq: crq to process * @vhost: ibmvfc host struct * **/ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, struct ibmvfc_host *vhost) { const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event)); struct ibmvfc_target *tgt; ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx," " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id), be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name), ibmvfc_get_link_state(crq->link_state)); switch (be64_to_cpu(crq->event)) { case IBMVFC_AE_RESUME: switch (crq->link_state) { case IBMVFC_AE_LS_LINK_DOWN: ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); break; case IBMVFC_AE_LS_LINK_DEAD: ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); break; case IBMVFC_AE_LS_LINK_UP: case IBMVFC_AE_LS_LINK_BOUNCED: default: vhost->events_to_log |= IBMVFC_AE_LINKUP; vhost->delay_init = 1; __ibmvfc_reset_host(vhost); break; } break; case IBMVFC_AE_LINK_UP: vhost->events_to_log |= IBMVFC_AE_LINKUP; vhost->delay_init = 1; __ibmvfc_reset_host(vhost); break; case IBMVFC_AE_SCN_FABRIC: case IBMVFC_AE_SCN_DOMAIN: vhost->events_to_log |= IBMVFC_AE_RSCN; if (vhost->state < IBMVFC_HALTED) { vhost->delay_init = 1; __ibmvfc_reset_host(vhost); } break; case IBMVFC_AE_SCN_NPORT: case IBMVFC_AE_SCN_GROUP: vhost->events_to_log |= IBMVFC_AE_RSCN; ibmvfc_reinit_host(vhost); break; case IBMVFC_AE_ELS_LOGO: case IBMVFC_AE_ELS_PRLO: case IBMVFC_AE_ELS_PLOGI: list_for_each_entry(tgt, &vhost->targets, queue) { if (!crq->scsi_id && !crq->wwpn && !crq->node_name) break; if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id) continue; if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn) continue; if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name) continue; if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO) tgt->logo_rcvd = 1; if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) { ibmvfc_del_tgt(tgt); ibmvfc_reinit_host(vhost); } } break; case IBMVFC_AE_LINK_DOWN: case IBMVFC_AE_ADAPTER_FAILED: ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); break; case IBMVFC_AE_LINK_DEAD: ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); break; case IBMVFC_AE_HALT: ibmvfc_link_down(vhost, IBMVFC_HALTED); break; default: dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event); break; } } /** * ibmvfc_handle_crq - Handles and frees received events in the CRQ * @crq: Command/Response queue * @vhost: ibmvfc host struct * @evt_doneq: Event done queue * **/ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, struct list_head *evt_doneq) { long rc; struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); switch (crq->valid) { case IBMVFC_CRQ_INIT_RSP: switch (crq->format) { case IBMVFC_CRQ_INIT: dev_info(vhost->dev, "Partner initialized\n"); /* Send back a response */ rc = ibmvfc_send_crq_init_complete(vhost); if (rc == 0) ibmvfc_init_host(vhost); else dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc); break; case IBMVFC_CRQ_INIT_COMPLETE: dev_info(vhost->dev, "Partner initialization complete\n"); ibmvfc_init_host(vhost); break; default: dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); } return; case IBMVFC_CRQ_XPORT_EVENT: vhost->state = IBMVFC_NO_CRQ; vhost->logged_in = 0; ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); if (crq->format == IBMVFC_PARTITION_MIGRATED) { /* We need to re-setup the interpartition connection */ dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n"); vhost->client_migrated = 1; scsi_block_requests(vhost->host); ibmvfc_purge_requests(vhost, DID_REQUEUE); ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); wake_up(&vhost->work_wait_q); } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) { dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format); ibmvfc_purge_requests(vhost, DID_ERROR); ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET); } else { dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format); } return; case IBMVFC_CRQ_CMD_RSP: break; default: dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid); return; } if (crq->format == IBMVFC_ASYNC_EVENT) return; /* The only kind of payload CRQs we should get are responses to * things we send. Make sure this response is to something we * actually sent */ if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) { dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n", crq->ioba); return; } if (unlikely(atomic_dec_if_positive(&evt->active))) { dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", crq->ioba); return; } spin_lock(&evt->queue->l_lock); list_move_tail(&evt->queue_list, evt_doneq); spin_unlock(&evt->queue->l_lock); } /** * ibmvfc_scan_finished - Check if the device scan is done. * @shost: scsi host struct * @time: current elapsed time * * Returns: * 0 if scan is not done / 1 if scan is done **/ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time) { unsigned long flags; struct ibmvfc_host *vhost = shost_priv(shost); int done = 0; spin_lock_irqsave(shost->host_lock, flags); if (!vhost->scan_timeout) done = 1; else if (time >= (vhost->scan_timeout * HZ)) { dev_info(vhost->dev, "Scan taking longer than %d seconds, " "continuing initialization\n", vhost->scan_timeout); done = 1; } if (vhost->scan_complete) { vhost->scan_timeout = init_timeout; done = 1; } spin_unlock_irqrestore(shost->host_lock, flags); return done; } /** * ibmvfc_slave_alloc - Setup the device's task set value * @sdev: struct scsi_device device to configure * * Set the device's task set value so that error handling works as * expected. * * Returns: * 0 on success / -ENXIO if device does not exist **/ static int ibmvfc_slave_alloc(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct ibmvfc_host *vhost = shost_priv(shost); unsigned long flags = 0; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; spin_lock_irqsave(shost->host_lock, flags); sdev->hostdata = (void *)(unsigned long)vhost->task_set++; spin_unlock_irqrestore(shost->host_lock, flags); return 0; } /** * ibmvfc_target_alloc - Setup the target's task set value * @starget: struct scsi_target * * Set the target's task set value so that error handling works as * expected. * * Returns: * 0 on success / -ENXIO if device does not exist **/ static int ibmvfc_target_alloc(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ibmvfc_host *vhost = shost_priv(shost); unsigned long flags = 0; spin_lock_irqsave(shost->host_lock, flags); starget->hostdata = (void *)(unsigned long)vhost->task_set++; spin_unlock_irqrestore(shost->host_lock, flags); return 0; } /** * ibmvfc_slave_configure - Configure the device * @sdev: struct scsi_device device to configure * * Enable allow_restart for a device if it is a disk. Adjust the * queue_depth here also. * * Returns: * 0 **/ static int ibmvfc_slave_configure(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; unsigned long flags = 0; spin_lock_irqsave(shost->host_lock, flags); if (sdev->type == TYPE_DISK) { sdev->allow_restart = 1; blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); } spin_unlock_irqrestore(shost->host_lock, flags); return 0; } /** * ibmvfc_change_queue_depth - Change the device's queue depth * @sdev: scsi device struct * @qdepth: depth to set * * Return value: * actual depth set **/ static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth) { if (qdepth > IBMVFC_MAX_CMDS_PER_LUN) qdepth = IBMVFC_MAX_CMDS_PER_LUN; return scsi_change_queue_depth(sdev, qdepth); } static ssize_t ibmvfc_show_host_partition_name(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%s\n", vhost->login_buf->resp.partition_name); } static ssize_t ibmvfc_show_host_device_name(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%s\n", vhost->login_buf->resp.device_name); } static ssize_t ibmvfc_show_host_loc_code(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%s\n", vhost->login_buf->resp.port_loc_code); } static ssize_t ibmvfc_show_host_drc_name(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%s\n", vhost->login_buf->resp.drc_name); } static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version)); } static ssize_t ibmvfc_show_host_capabilities(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities)); } /** * ibmvfc_show_log_level - Show the adapter's error logging level * @dev: class device struct * @attr: unused * @buf: buffer * * Return value: * number of bytes printed to buffer **/ static ssize_t ibmvfc_show_log_level(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); unsigned long flags = 0; int len; spin_lock_irqsave(shost->host_lock, flags); len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level); spin_unlock_irqrestore(shost->host_lock, flags); return len; } /** * ibmvfc_store_log_level - Change the adapter's error logging level * @dev: class device struct * @attr: unused * @buf: buffer * @count: buffer size * * Return value: * number of bytes printed to buffer **/ static ssize_t ibmvfc_store_log_level(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); unsigned long flags = 0; spin_lock_irqsave(shost->host_lock, flags); vhost->log_level = simple_strtoul(buf, NULL, 10); spin_unlock_irqrestore(shost->host_lock, flags); return strlen(buf); } static ssize_t ibmvfc_show_scsi_channels(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); unsigned long flags = 0; int len; spin_lock_irqsave(shost->host_lock, flags); len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels); spin_unlock_irqrestore(shost->host_lock, flags); return len; } static ssize_t ibmvfc_store_scsi_channels(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); unsigned long flags = 0; unsigned int channels; spin_lock_irqsave(shost->host_lock, flags); channels = simple_strtoul(buf, NULL, 10); vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues); ibmvfc_hard_reset_host(vhost); spin_unlock_irqrestore(shost->host_lock, flags); return strlen(buf); } static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL); static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL); static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL); static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL); static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL); static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL); static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR, ibmvfc_show_log_level, ibmvfc_store_log_level); static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR, ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels); #ifdef CONFIG_SCSI_IBMVFC_TRACE /** * ibmvfc_read_trace - Dump the adapter trace * @filp: open sysfs file * @kobj: kobject struct * @bin_attr: bin_attribute struct * @buf: buffer * @off: offset * @count: buffer size * * Return value: * number of bytes printed to buffer **/ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); unsigned long flags = 0; int size = IBMVFC_TRACE_SIZE; char *src = (char *)vhost->trace; if (off > size) return 0; if (off + count > size) { size -= off; count = size; } spin_lock_irqsave(shost->host_lock, flags); memcpy(buf, &src[off], count); spin_unlock_irqrestore(shost->host_lock, flags); return count; } static struct bin_attribute ibmvfc_trace_attr = { .attr = { .name = "trace", .mode = S_IRUGO, }, .size = 0, .read = ibmvfc_read_trace, }; #endif static struct attribute *ibmvfc_host_attrs[] = { &dev_attr_partition_name.attr, &dev_attr_device_name.attr, &dev_attr_port_loc_code.attr, &dev_attr_drc_name.attr, &dev_attr_npiv_version.attr, &dev_attr_capabilities.attr, &dev_attr_log_level.attr, &dev_attr_nr_scsi_channels.attr, NULL }; ATTRIBUTE_GROUPS(ibmvfc_host); static const struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = "IBM POWER Virtual FC Adapter", .proc_name = IBMVFC_NAME, .queuecommand = ibmvfc_queuecommand, .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = ibmvfc_eh_abort_handler, .eh_device_reset_handler = ibmvfc_eh_device_reset_handler, .eh_target_reset_handler = ibmvfc_eh_target_reset_handler, .eh_host_reset_handler = ibmvfc_eh_host_reset_handler, .slave_alloc = ibmvfc_slave_alloc, .slave_configure = ibmvfc_slave_configure, .target_alloc = ibmvfc_target_alloc, .scan_finished = ibmvfc_scan_finished, .change_queue_depth = ibmvfc_change_queue_depth, .cmd_per_lun = 16, .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT, .this_id = -1, .sg_tablesize = SG_ALL, .max_sectors = IBMVFC_MAX_SECTORS, .shost_groups = ibmvfc_host_groups, .track_queue_depth = 1, .host_tagset = 1, }; /** * ibmvfc_next_async_crq - Returns the next entry in async queue * @vhost: ibmvfc host struct * * Returns: * Pointer to next entry in queue / NULL if empty **/ static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost) { struct ibmvfc_queue *async_crq = &vhost->async_crq; struct ibmvfc_async_crq *crq; crq = &async_crq->msgs.async[async_crq->cur]; if (crq->valid & 0x80) { if (++async_crq->cur == async_crq->size) async_crq->cur = 0; rmb(); } else crq = NULL; return crq; } /** * ibmvfc_next_crq - Returns the next entry in message queue * @vhost: ibmvfc host struct * * Returns: * Pointer to next entry in queue / NULL if empty **/ static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost) { struct ibmvfc_queue *queue = &vhost->crq; struct ibmvfc_crq *crq; crq = &queue->msgs.crq[queue->cur]; if (crq->valid & 0x80) { if (++queue->cur == queue->size) queue->cur = 0; rmb(); } else crq = NULL; return crq; } /** * ibmvfc_interrupt - Interrupt handler * @irq: number of irq to handle, not used * @dev_instance: ibmvfc_host that received interrupt * * Returns: * IRQ_HANDLED **/ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance) { struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance; unsigned long flags; spin_lock_irqsave(vhost->host->host_lock, flags); vio_disable_interrupts(to_vio_dev(vhost->dev)); tasklet_schedule(&vhost->tasklet); spin_unlock_irqrestore(vhost->host->host_lock, flags); return IRQ_HANDLED; } /** * ibmvfc_tasklet - Interrupt handler tasklet * @data: ibmvfc host struct * * Returns: * Nothing **/ static void ibmvfc_tasklet(void *data) { struct ibmvfc_host *vhost = data; struct vio_dev *vdev = to_vio_dev(vhost->dev); struct ibmvfc_crq *crq; struct ibmvfc_async_crq *async; struct ibmvfc_event *evt, *temp; unsigned long flags; int done = 0; LIST_HEAD(evt_doneq); spin_lock_irqsave(vhost->host->host_lock, flags); spin_lock(vhost->crq.q_lock); while (!done) { /* Pull all the valid messages off the async CRQ */ while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { ibmvfc_handle_async(async, vhost); async->valid = 0; wmb(); } /* Pull all the valid messages off the CRQ */ while ((crq = ibmvfc_next_crq(vhost)) != NULL) { ibmvfc_handle_crq(crq, vhost, &evt_doneq); crq->valid = 0; wmb(); } vio_enable_interrupts(vdev); if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { vio_disable_interrupts(vdev); ibmvfc_handle_async(async, vhost); async->valid = 0; wmb(); } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) { vio_disable_interrupts(vdev); ibmvfc_handle_crq(crq, vhost, &evt_doneq); crq->valid = 0; wmb(); } else done = 1; } spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { del_timer(&evt->timer); list_del(&evt->queue_list); ibmvfc_trc_end(evt); evt->done(evt); } } static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable) { struct device *dev = scrq->vhost->dev; struct vio_dev *vdev = to_vio_dev(dev); unsigned long rc; int irq_action = H_ENABLE_VIO_INTERRUPT; if (!enable) irq_action = H_DISABLE_VIO_INTERRUPT; rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action, scrq->hw_irq, 0, 0); if (rc) dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n", enable ? "enable" : "disable", scrq->hwq_id, rc); return rc; } static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, struct list_head *evt_doneq) { struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); switch (crq->valid) { case IBMVFC_CRQ_CMD_RSP: break; case IBMVFC_CRQ_XPORT_EVENT: return; default: dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid); return; } /* The only kind of payload CRQs we should get are responses to * things we send. Make sure this response is to something we * actually sent */ if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) { dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n", crq->ioba); return; } if (unlikely(atomic_dec_if_positive(&evt->active))) { dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", crq->ioba); return; } spin_lock(&evt->queue->l_lock); list_move_tail(&evt->queue_list, evt_doneq); spin_unlock(&evt->queue->l_lock); } static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq) { struct ibmvfc_crq *crq; crq = &scrq->msgs.scrq[scrq->cur].crq; if (crq->valid & 0x80) { if (++scrq->cur == scrq->size) scrq->cur = 0; rmb(); } else crq = NULL; return crq; } static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq) { struct ibmvfc_crq *crq; struct ibmvfc_event *evt, *temp; unsigned long flags; int done = 0; LIST_HEAD(evt_doneq); spin_lock_irqsave(scrq->q_lock, flags); while (!done) { while ((crq = ibmvfc_next_scrq(scrq)) != NULL) { ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq); crq->valid = 0; wmb(); } ibmvfc_toggle_scrq_irq(scrq, 1); if ((crq = ibmvfc_next_scrq(scrq)) != NULL) { ibmvfc_toggle_scrq_irq(scrq, 0); ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq); crq->valid = 0; wmb(); } else done = 1; } spin_unlock_irqrestore(scrq->q_lock, flags); list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { del_timer(&evt->timer); list_del(&evt->queue_list); ibmvfc_trc_end(evt); evt->done(evt); } } static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance) { struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance; ibmvfc_toggle_scrq_irq(scrq, 0); ibmvfc_drain_sub_crq(scrq); return IRQ_HANDLED; } /** * ibmvfc_init_tgt - Set the next init job step for the target * @tgt: ibmvfc target struct * @job_step: job step to perform * **/ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt, void (*job_step) (struct ibmvfc_target *)) { if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT)) tgt->job_step = job_step; wake_up(&tgt->vhost->work_wait_q); } /** * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization * @tgt: ibmvfc target struct * @job_step: initialization job step * * Returns: 1 if step will be retried / 0 if not * **/ static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, void (*job_step) (struct ibmvfc_target *)) { if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { ibmvfc_del_tgt(tgt); wake_up(&tgt->vhost->work_wait_q); return 0; } else ibmvfc_init_tgt(tgt, job_step); return 1; } /* Defined in FC-LS */ static const struct { int code; int retry; int logged_in; } prli_rsp [] = { { 0, 1, 0 }, { 1, 0, 1 }, { 2, 1, 0 }, { 3, 1, 0 }, { 4, 0, 0 }, { 5, 0, 0 }, { 6, 0, 1 }, { 7, 0, 0 }, { 8, 1, 0 }, }; /** * ibmvfc_get_prli_rsp - Find PRLI response index * @flags: PRLI response flags * **/ static int ibmvfc_get_prli_rsp(u16 flags) { int i; int code = (flags & 0x0f00) >> 8; for (i = 0; i < ARRAY_SIZE(prli_rsp); i++) if (prli_rsp[i].code == code) return i; return 0; } /** * ibmvfc_tgt_prli_done - Completion handler for Process Login * @evt: ibmvfc event struct * **/ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) { struct ibmvfc_target *tgt = evt->tgt; struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; struct ibmvfc_prli_svc_parms *parms = &rsp->parms; u32 status = be16_to_cpu(rsp->common.status); int index, level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); switch (status) { case IBMVFC_MAD_SUCCESS: tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n", parms->type, parms->flags, parms->service_parms); if (parms->type == IBMVFC_SCSI_FCP_TYPE) { index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags)); if (prli_rsp[index].logged_in) { if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) { tgt->need_login = 0; tgt->ids.roles = 0; if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC) tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC) tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; tgt->add_rport = 1; } else ibmvfc_del_tgt(tgt); } else if (prli_rsp[index].retry) ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); else ibmvfc_del_tgt(tgt); } else ibmvfc_del_tgt(tgt); break; case IBMVFC_MAD_DRIVER_FAILED: break; case IBMVFC_MAD_CRQ_ERROR: ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); break; case IBMVFC_MAD_FAILED: default: if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) && be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); else if (tgt->logo_rcvd) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); else ibmvfc_del_tgt(tgt); tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status); break; } kref_put(&tgt->kref, ibmvfc_release_tgt); ibmvfc_free_event(evt); wake_up(&vhost->work_wait_q); } /** * ibmvfc_tgt_send_prli - Send a process login * @tgt: ibmvfc target struct * **/ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt) { struct ibmvfc_process_login *prli; struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_event *evt; if (vhost->discovery_threads >= disc_threads) return; kref_get(&tgt->kref); evt = ibmvfc_get_event(&vhost->crq); vhost->discovery_threads++; ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT); evt->tgt = tgt; prli = &evt->iu.prli; memset(prli, 0, sizeof(*prli)); if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { prli->common.version = cpu_to_be32(2); prli->target_wwpn = cpu_to_be64(tgt->wwpn); } else { prli->common.version = cpu_to_be32(1); } prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN); prli->common.length = cpu_to_be16(sizeof(*prli)); prli->scsi_id = cpu_to_be64(tgt->scsi_id); prli->parms.type = IBMVFC_SCSI_FCP_TYPE; prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR); prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC); prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED); if (cls3_error) prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); if (ibmvfc_send_event(evt, vhost, default_timeout)) { vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); kref_put(&tgt->kref, ibmvfc_release_tgt); } else tgt_dbg(tgt, "Sent process login\n"); } /** * ibmvfc_tgt_plogi_done - Completion handler for Port Login * @evt: ibmvfc event struct * **/ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) { struct ibmvfc_target *tgt = evt->tgt; struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; u32 status = be16_to_cpu(rsp->common.status); int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); switch (status) { case IBMVFC_MAD_SUCCESS: tgt_dbg(tgt, "Port Login succeeded\n"); if (tgt->ids.port_name && tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) { vhost->reinit = 1; tgt_dbg(tgt, "Port re-init required\n"); break; } tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name); tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name); tgt->ids.port_id = tgt->scsi_id; memcpy(&tgt->service_parms, &rsp->service_parms, sizeof(tgt->service_parms)); memcpy(&tgt->service_parms_change, &rsp->service_parms_change, sizeof(tgt->service_parms_change)); ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli); break; case IBMVFC_MAD_DRIVER_FAILED: break; case IBMVFC_MAD_CRQ_ERROR: ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); break; case IBMVFC_MAD_FAILED: default: if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); else ibmvfc_del_tgt(tgt); tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type), ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status); break; } kref_put(&tgt->kref, ibmvfc_release_tgt); ibmvfc_free_event(evt); wake_up(&vhost->work_wait_q); } /** * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target * @tgt: ibmvfc target struct * **/ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt) { struct ibmvfc_port_login *plogi; struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_event *evt; if (vhost->discovery_threads >= disc_threads) return; kref_get(&tgt->kref); tgt->logo_rcvd = 0; evt = ibmvfc_get_event(&vhost->crq); vhost->discovery_threads++; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT); evt->tgt = tgt; plogi = &evt->iu.plogi; memset(plogi, 0, sizeof(*plogi)); if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { plogi->common.version = cpu_to_be32(2); plogi->target_wwpn = cpu_to_be64(tgt->wwpn); } else { plogi->common.version = cpu_to_be32(1); } plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN); plogi->common.length = cpu_to_be16(sizeof(*plogi)); plogi->scsi_id = cpu_to_be64(tgt->scsi_id); if (ibmvfc_send_event(evt, vhost, default_timeout)) { vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); kref_put(&tgt->kref, ibmvfc_release_tgt); } else tgt_dbg(tgt, "Sent port login\n"); } /** * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD * @evt: ibmvfc event struct * **/ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt) { struct ibmvfc_target *tgt = evt->tgt; struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout; u32 status = be16_to_cpu(rsp->common.status); vhost->discovery_threads--; ibmvfc_free_event(evt); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); switch (status) { case IBMVFC_MAD_SUCCESS: tgt_dbg(tgt, "Implicit Logout succeeded\n"); break; case IBMVFC_MAD_DRIVER_FAILED: kref_put(&tgt->kref, ibmvfc_release_tgt); wake_up(&vhost->work_wait_q); return; case IBMVFC_MAD_FAILED: default: tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status); break; } ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi); kref_put(&tgt->kref, ibmvfc_release_tgt); wake_up(&vhost->work_wait_q); } /** * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout * @tgt: ibmvfc target struct * @done: Routine to call when the event is responded to * * Returns: * Allocated and initialized ibmvfc_event struct **/ static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt, void (*done) (struct ibmvfc_event *)) { struct ibmvfc_implicit_logout *mad; struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_event *evt; kref_get(&tgt->kref); evt = ibmvfc_get_event(&vhost->crq); ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT); evt->tgt = tgt; mad = &evt->iu.implicit_logout; memset(mad, 0, sizeof(*mad)); mad->common.version = cpu_to_be32(1); mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT); mad->common.length = cpu_to_be16(sizeof(*mad)); mad->old_scsi_id = cpu_to_be64(tgt->scsi_id); return evt; } /** * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target * @tgt: ibmvfc target struct * **/ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt) { struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_event *evt; if (vhost->discovery_threads >= disc_threads) return; vhost->discovery_threads++; evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt, ibmvfc_tgt_implicit_logout_done); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); if (ibmvfc_send_event(evt, vhost, default_timeout)) { vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); kref_put(&tgt->kref, ibmvfc_release_tgt); } else tgt_dbg(tgt, "Sent Implicit Logout\n"); } /** * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD * @evt: ibmvfc event struct * **/ static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt) { struct ibmvfc_target *tgt = evt->tgt; struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru; u32 status = be16_to_cpu(mad->common.status); vhost->discovery_threads--; ibmvfc_free_event(evt); /* * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the * driver in which case we need to free up all the targets. If we are * not unloading, we will still go through a hard reset to get out of * offline state, so there is no need to track the old targets in that * case. */ if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT); tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed"); kref_put(&tgt->kref, ibmvfc_release_tgt); wake_up(&vhost->work_wait_q); } /** * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target * @tgt: ibmvfc target struct * **/ static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt) { struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_event *evt; if (!vhost->logged_in) { ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); return; } if (vhost->discovery_threads >= disc_threads) return; vhost->discovery_threads++; evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt, ibmvfc_tgt_implicit_logout_and_del_done); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT); if (ibmvfc_send_event(evt, vhost, default_timeout)) { vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); kref_put(&tgt->kref, ibmvfc_release_tgt); } else tgt_dbg(tgt, "Sent Implicit Logout\n"); } /** * ibmvfc_tgt_move_login_done - Completion handler for Move Login * @evt: ibmvfc event struct * **/ static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt) { struct ibmvfc_target *tgt = evt->tgt; struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login; u32 status = be16_to_cpu(rsp->common.status); int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); switch (status) { case IBMVFC_MAD_SUCCESS: tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id); tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name); tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name); tgt->scsi_id = tgt->new_scsi_id; tgt->ids.port_id = tgt->scsi_id; memcpy(&tgt->service_parms, &rsp->service_parms, sizeof(tgt->service_parms)); memcpy(&tgt->service_parms_change, &rsp->service_parms_change, sizeof(tgt->service_parms_change)); ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli); break; case IBMVFC_MAD_DRIVER_FAILED: break; case IBMVFC_MAD_CRQ_ERROR: ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login); break; case IBMVFC_MAD_FAILED: default: level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login); tgt_log(tgt, level, "Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n", tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags), status); break; } kref_put(&tgt->kref, ibmvfc_release_tgt); ibmvfc_free_event(evt); wake_up(&vhost->work_wait_q); } /** * ibmvfc_tgt_move_login - Initiate a move login for specified target * @tgt: ibmvfc target struct * **/ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt) { struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_move_login *move; struct ibmvfc_event *evt; if (vhost->discovery_threads >= disc_threads) return; kref_get(&tgt->kref); evt = ibmvfc_get_event(&vhost->crq); vhost->discovery_threads++; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT); evt->tgt = tgt; move = &evt->iu.move_login; memset(move, 0, sizeof(*move)); move->common.version = cpu_to_be32(1); move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN); move->common.length = cpu_to_be16(sizeof(*move)); move->old_scsi_id = cpu_to_be64(tgt->scsi_id); move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id); move->wwpn = cpu_to_be64(tgt->wwpn); move->node_name = cpu_to_be64(tgt->ids.node_name); if (ibmvfc_send_event(evt, vhost, default_timeout)) { vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); kref_put(&tgt->kref, ibmvfc_release_tgt); } else tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id); } /** * ibmvfc_adisc_needs_plogi - Does device need PLOGI? * @mad: ibmvfc passthru mad struct * @tgt: ibmvfc target struct * * Returns: * 1 if PLOGI needed / 0 if PLOGI not needed **/ static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad, struct ibmvfc_target *tgt) { if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name) return 1; if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name) return 1; if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id) return 1; return 0; } /** * ibmvfc_tgt_adisc_done - Completion handler for ADISC * @evt: ibmvfc event struct * **/ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) { struct ibmvfc_target *tgt = evt->tgt; struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru; u32 status = be16_to_cpu(mad->common.status); u8 fc_reason, fc_explain; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); del_timer(&tgt->timer); switch (status) { case IBMVFC_MAD_SUCCESS: tgt_dbg(tgt, "ADISC succeeded\n"); if (ibmvfc_adisc_needs_plogi(mad, tgt)) ibmvfc_del_tgt(tgt); break; case IBMVFC_MAD_DRIVER_FAILED: break; case IBMVFC_MAD_FAILED: default: ibmvfc_del_tgt(tgt); fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16; fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8; tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)), be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error), ibmvfc_get_fc_type(fc_reason), fc_reason, ibmvfc_get_ls_explain(fc_explain), fc_explain, status); break; } kref_put(&tgt->kref, ibmvfc_release_tgt); ibmvfc_free_event(evt); wake_up(&vhost->work_wait_q); } /** * ibmvfc_init_passthru - Initialize an event struct for FC passthru * @evt: ibmvfc event struct * **/ static void ibmvfc_init_passthru(struct ibmvfc_event *evt) { struct ibmvfc_passthru_mad *mad = &evt->iu.passthru; memset(mad, 0, sizeof(*mad)); mad->common.version = cpu_to_be32(1); mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU); mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu)); mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_passthru_mad, iu)); mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu)); mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload)); mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response)); mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_passthru_mad, fc_iu) + offsetof(struct ibmvfc_passthru_fc_iu, payload)); mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload)); mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_passthru_mad, fc_iu) + offsetof(struct ibmvfc_passthru_fc_iu, response)); mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response)); } /** * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC * @evt: ibmvfc event struct * * Just cleanup this event struct. Everything else is handled by * the ADISC completion handler. If the ADISC never actually comes * back, we still have the timer running on the ADISC event struct * which will fire and cause the CRQ to get reset. * **/ static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt) { struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_target *tgt = evt->tgt; tgt_dbg(tgt, "ADISC cancel complete\n"); vhost->abort_threads--; ibmvfc_free_event(evt); kref_put(&tgt->kref, ibmvfc_release_tgt); wake_up(&vhost->work_wait_q); } /** * ibmvfc_adisc_timeout - Handle an ADISC timeout * @t: ibmvfc target struct * * If an ADISC times out, send a cancel. If the cancel times * out, reset the CRQ. When the ADISC comes back as cancelled, * log back into the target. **/ static void ibmvfc_adisc_timeout(struct timer_list *t) { struct ibmvfc_target *tgt = from_timer(tgt, t, timer); struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_event *evt; struct ibmvfc_tmf *tmf; unsigned long flags; int rc; tgt_dbg(tgt, "ADISC timeout\n"); spin_lock_irqsave(vhost->host->host_lock, flags); if (vhost->abort_threads >= disc_threads || tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT || vhost->state != IBMVFC_INITIALIZING || vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) { spin_unlock_irqrestore(vhost->host->host_lock, flags); return; } vhost->abort_threads++; kref_get(&tgt->kref); evt = ibmvfc_get_event(&vhost->crq); ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT); evt->tgt = tgt; tmf = &evt->iu.tmf; memset(tmf, 0, sizeof(*tmf)); if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { tmf->common.version = cpu_to_be32(2); tmf->target_wwpn = cpu_to_be64(tgt->wwpn); } else { tmf->common.version = cpu_to_be32(1); } tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD); tmf->common.length = cpu_to_be16(sizeof(*tmf)); tmf->scsi_id = cpu_to_be64(tgt->scsi_id); tmf->cancel_key = cpu_to_be32(tgt->cancel_key); rc = ibmvfc_send_event(evt, vhost, default_timeout); if (rc) { tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc); vhost->abort_threads--; kref_put(&tgt->kref, ibmvfc_release_tgt); __ibmvfc_reset_host(vhost); } else tgt_dbg(tgt, "Attempting to cancel ADISC\n"); spin_unlock_irqrestore(vhost->host->host_lock, flags); } /** * ibmvfc_tgt_adisc - Initiate an ADISC for specified target * @tgt: ibmvfc target struct * * When sending an ADISC we end up with two timers running. The * first timer is the timer in the ibmvfc target struct. If this * fires, we send a cancel to the target. The second timer is the * timer on the ibmvfc event for the ADISC, which is longer. If that * fires, it means the ADISC timed out and our attempt to cancel it * also failed, so we need to reset the CRQ. **/ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt) { struct ibmvfc_passthru_mad *mad; struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_event *evt; if (vhost->discovery_threads >= disc_threads) return; kref_get(&tgt->kref); evt = ibmvfc_get_event(&vhost->crq); vhost->discovery_threads++; ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT); evt->tgt = tgt; ibmvfc_init_passthru(evt); mad = &evt->iu.passthru; mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS); mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id); mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key); mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC); memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name, sizeof(vhost->login_buf->resp.port_name)); memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name, sizeof(vhost->login_buf->resp.node_name)); mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff); if (timer_pending(&tgt->timer)) mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ)); else { tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ); add_timer(&tgt->timer); } ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) { vhost->discovery_threads--; del_timer(&tgt->timer); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); kref_put(&tgt->kref, ibmvfc_release_tgt); } else tgt_dbg(tgt, "Sent ADISC\n"); } /** * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD * @evt: ibmvfc event struct * **/ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) { struct ibmvfc_target *tgt = evt->tgt; struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; u32 status = be16_to_cpu(rsp->common.status); int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); switch (status) { case IBMVFC_MAD_SUCCESS: tgt_dbg(tgt, "Query Target succeeded\n"); if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id) ibmvfc_del_tgt(tgt); else ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc); break; case IBMVFC_MAD_DRIVER_FAILED: break; case IBMVFC_MAD_CRQ_ERROR: ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); break; case IBMVFC_MAD_FAILED: default: if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ && be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG) ibmvfc_del_tgt(tgt); else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); else ibmvfc_del_tgt(tgt); tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type), ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status); break; } kref_put(&tgt->kref, ibmvfc_release_tgt); ibmvfc_free_event(evt); wake_up(&vhost->work_wait_q); } /** * ibmvfc_tgt_query_target - Initiate a Query Target for specified target * @tgt: ibmvfc target struct * **/ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt) { struct ibmvfc_query_tgt *query_tgt; struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_event *evt; if (vhost->discovery_threads >= disc_threads) return; kref_get(&tgt->kref); evt = ibmvfc_get_event(&vhost->crq); vhost->discovery_threads++; evt->tgt = tgt; ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT); query_tgt = &evt->iu.query_tgt; memset(query_tgt, 0, sizeof(*query_tgt)); query_tgt->common.version = cpu_to_be32(1); query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET); query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt)); query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); if (ibmvfc_send_event(evt, vhost, default_timeout)) { vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); kref_put(&tgt->kref, ibmvfc_release_tgt); } else tgt_dbg(tgt, "Sent Query Target\n"); } /** * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target * @vhost: ibmvfc host struct * @target: Holds SCSI ID to allocate target forand the WWPN * * Returns: * 0 on success / other on failure **/ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, struct ibmvfc_discover_targets_entry *target) { struct ibmvfc_target *stgt = NULL; struct ibmvfc_target *wtgt = NULL; struct ibmvfc_target *tgt; unsigned long flags; u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK; u64 wwpn = be64_to_cpu(target->wwpn); /* Look to see if we already have a target allocated for this SCSI ID or WWPN */ spin_lock_irqsave(vhost->host->host_lock, flags); list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->wwpn == wwpn) { wtgt = tgt; break; } } list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->scsi_id == scsi_id) { stgt = tgt; break; } } if (wtgt && !stgt) { /* * A WWPN target has moved and we still are tracking the old * SCSI ID. The only way we should be able to get here is if * we attempted to send an implicit logout for the old SCSI ID * and it failed for some reason, such as there being I/O * pending to the target. In this case, we will have already * deleted the rport from the FC transport so we do a move * login, which works even with I/O pending, however, if * there is still I/O pending, it will stay outstanding, so * we only do this if fast fail is disabled for the rport, * otherwise we let terminate_rport_io clean up the port * before we login at the new location. */ if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) { if (wtgt->move_login) { /* * Do a move login here. The old target is no longer * known to the transport layer We don't use the * normal ibmvfc_set_tgt_action to set this, as we * don't normally want to allow this state change. */ wtgt->new_scsi_id = scsi_id; wtgt->action = IBMVFC_TGT_ACTION_INIT; wtgt->init_retries = 0; ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login); } goto unlock_out; } else { tgt_err(wtgt, "Unexpected target state: %d, %p\n", wtgt->action, wtgt->rport); } } else if (stgt) { if (tgt->need_login) ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); goto unlock_out; } spin_unlock_irqrestore(vhost->host->host_lock, flags); tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO); memset(tgt, 0, sizeof(*tgt)); tgt->scsi_id = scsi_id; tgt->wwpn = wwpn; tgt->vhost = vhost; tgt->need_login = 1; timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0); kref_init(&tgt->kref); ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); spin_lock_irqsave(vhost->host->host_lock, flags); tgt->cancel_key = vhost->task_set++; list_add_tail(&tgt->queue, &vhost->targets); unlock_out: spin_unlock_irqrestore(vhost->host->host_lock, flags); return 0; } /** * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets * @vhost: ibmvfc host struct * * Returns: * 0 on success / other on failure **/ static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost) { int i, rc; for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++) rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]); return rc; } /** * ibmvfc_discover_targets_done - Completion handler for discover targets MAD * @evt: ibmvfc event struct * **/ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) { struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; u32 mad_status = be16_to_cpu(rsp->common.status); int level = IBMVFC_DEFAULT_LOG_LEVEL; switch (mad_status) { case IBMVFC_MAD_SUCCESS: ibmvfc_dbg(vhost, "Discover Targets succeeded\n"); vhost->num_targets = be32_to_cpu(rsp->num_written); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); break; case IBMVFC_MAD_FAILED: level += ibmvfc_retry_host_init(vhost); ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)); break; case IBMVFC_MAD_DRIVER_FAILED: break; default: dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status); ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); break; } ibmvfc_free_event(evt); wake_up(&vhost->work_wait_q); } /** * ibmvfc_discover_targets - Send Discover Targets MAD * @vhost: ibmvfc host struct * **/ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost) { struct ibmvfc_discover_targets *mad; struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT); mad = &evt->iu.discover_targets; memset(mad, 0, sizeof(*mad)); mad->common.version = cpu_to_be32(1); mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS); mad->common.length = cpu_to_be16(sizeof(*mad)); mad->bufflen = cpu_to_be32(vhost->disc_buf_sz); mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma); mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz); mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); if (!ibmvfc_send_event(evt, vhost, default_timeout)) ibmvfc_dbg(vhost, "Sent discover targets\n"); else ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); } static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt) { struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf; struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs; u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status); int level = IBMVFC_DEFAULT_LOG_LEVEL; int flags, active_queues, i; ibmvfc_free_event(evt); switch (mad_status) { case IBMVFC_MAD_SUCCESS: ibmvfc_dbg(vhost, "Channel Setup succeeded\n"); flags = be32_to_cpu(setup->flags); vhost->do_enquiry = 0; active_queues = be32_to_cpu(setup->num_scsi_subq_channels); scrqs->active_queues = active_queues; if (flags & IBMVFC_CHANNELS_CANCELED) { ibmvfc_dbg(vhost, "Channels Canceled\n"); vhost->using_channels = 0; } else { if (active_queues) vhost->using_channels = 1; for (i = 0; i < active_queues; i++) scrqs->scrqs[i].vios_cookie = be64_to_cpu(setup->channel_handles[i]); ibmvfc_dbg(vhost, "Using %u channels\n", vhost->scsi_scrqs.active_queues); } break; case IBMVFC_MAD_FAILED: level += ibmvfc_retry_host_init(vhost); ibmvfc_log(vhost, level, "Channel Setup failed\n"); fallthrough; case IBMVFC_MAD_DRIVER_FAILED: return; default: dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n", mad_status); ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); return; } ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); wake_up(&vhost->work_wait_q); } static void ibmvfc_channel_setup(struct ibmvfc_host *vhost) { struct ibmvfc_channel_setup_mad *mad; struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf; struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs; unsigned int num_channels = min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels); int i; memset(setup_buf, 0, sizeof(*setup_buf)); if (num_channels == 0) setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS); else { setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels); for (i = 0; i < num_channels; i++) setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie); } ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT); mad = &evt->iu.channel_setup; memset(mad, 0, sizeof(*mad)); mad->common.version = cpu_to_be32(1); mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP); mad->common.length = cpu_to_be16(sizeof(*mad)); mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma); mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf)); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); if (!ibmvfc_send_event(evt, vhost, default_timeout)) ibmvfc_dbg(vhost, "Sent channel setup\n"); else ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); } static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt) { struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry; u32 mad_status = be16_to_cpu(rsp->common.status); int level = IBMVFC_DEFAULT_LOG_LEVEL; switch (mad_status) { case IBMVFC_MAD_SUCCESS: ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n"); vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels); ibmvfc_free_event(evt); break; case IBMVFC_MAD_FAILED: level += ibmvfc_retry_host_init(vhost); ibmvfc_log(vhost, level, "Channel Enquiry failed\n"); fallthrough; case IBMVFC_MAD_DRIVER_FAILED: ibmvfc_free_event(evt); return; default: dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n", mad_status); ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); ibmvfc_free_event(evt); return; } ibmvfc_channel_setup(vhost); } static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost) { struct ibmvfc_channel_enquiry *mad; struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT); mad = &evt->iu.channel_enquiry; memset(mad, 0, sizeof(*mad)); mad->common.version = cpu_to_be32(1); mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY); mad->common.length = cpu_to_be16(sizeof(*mad)); if (mig_channels_only) mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT); if (mig_no_less_channels) mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); if (!ibmvfc_send_event(evt, vhost, default_timeout)) ibmvfc_dbg(vhost, "Send channel enquiry\n"); else ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); } /** * ibmvfc_npiv_login_done - Completion handler for NPIV Login * @evt: ibmvfc event struct * **/ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) { struct ibmvfc_host *vhost = evt->vhost; u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status); struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; unsigned int npiv_max_sectors; int level = IBMVFC_DEFAULT_LOG_LEVEL; switch (mad_status) { case IBMVFC_MAD_SUCCESS: ibmvfc_free_event(evt); break; case IBMVFC_MAD_FAILED: if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) level += ibmvfc_retry_host_init(vhost); else ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)); ibmvfc_free_event(evt); return; case IBMVFC_MAD_CRQ_ERROR: ibmvfc_retry_host_init(vhost); fallthrough; case IBMVFC_MAD_DRIVER_FAILED: ibmvfc_free_event(evt); return; default: dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status); ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); ibmvfc_free_event(evt); return; } vhost->client_migrated = 0; if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) { dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n", rsp->flags); ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); wake_up(&vhost->work_wait_q); return; } if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) { dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n", rsp->max_cmds); ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); wake_up(&vhost->work_wait_q); return; } vhost->logged_in = 1; npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS); dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", rsp->partition_name, rsp->device_name, rsp->port_loc_code, rsp->drc_name, npiv_max_sectors); fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name); fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name); fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name); fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id); fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV; fc_host_supported_classes(vhost->host) = 0; if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000) fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1; if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000) fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2; if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000) fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3; fc_host_maxframe_size(vhost->host) = be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff; vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ; vhost->host->max_sectors = npiv_max_sectors; if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) { ibmvfc_channel_enquiry(vhost); } else { vhost->do_enquiry = 0; ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); wake_up(&vhost->work_wait_q); } } /** * ibmvfc_npiv_login - Sends NPIV login * @vhost: ibmvfc host struct * **/ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) { struct ibmvfc_npiv_login_mad *mad; struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); ibmvfc_gather_partition_info(vhost); ibmvfc_set_login_info(vhost); ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT); memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info)); mad = &evt->iu.npiv_login; memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad)); mad->common.version = cpu_to_be32(1); mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN); mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad)); mad->buffer.va = cpu_to_be64(vhost->login_buf_dma); mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf)); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); if (!ibmvfc_send_event(evt, vhost, default_timeout)) ibmvfc_dbg(vhost, "Sent NPIV login\n"); else ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); } /** * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout * @evt: ibmvfc event struct * **/ static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) { struct ibmvfc_host *vhost = evt->vhost; u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status); ibmvfc_free_event(evt); switch (mad_status) { case IBMVFC_MAD_SUCCESS: if (list_empty(&vhost->crq.sent) && vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { ibmvfc_init_host(vhost); return; } break; case IBMVFC_MAD_FAILED: case IBMVFC_MAD_NOT_SUPPORTED: case IBMVFC_MAD_CRQ_ERROR: case IBMVFC_MAD_DRIVER_FAILED: default: ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status); break; } ibmvfc_hard_reset_host(vhost); } /** * ibmvfc_npiv_logout - Issue an NPIV Logout * @vhost: ibmvfc host struct * **/ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost) { struct ibmvfc_npiv_logout_mad *mad; struct ibmvfc_event *evt; evt = ibmvfc_get_event(&vhost->crq); ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT); mad = &evt->iu.npiv_logout; memset(mad, 0, sizeof(*mad)); mad->common.version = cpu_to_be32(1); mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT); mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad)); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT); if (!ibmvfc_send_event(evt, vhost, default_timeout)) ibmvfc_dbg(vhost, "Sent NPIV logout\n"); else ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); } /** * ibmvfc_dev_init_to_do - Is there target initialization work to do? * @vhost: ibmvfc host struct * * Returns: * 1 if work to do / 0 if not **/ static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost) { struct ibmvfc_target *tgt; list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->action == IBMVFC_TGT_ACTION_INIT || tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) return 1; } return 0; } /** * ibmvfc_dev_logo_to_do - Is there target logout work to do? * @vhost: ibmvfc host struct * * Returns: * 1 if work to do / 0 if not **/ static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost) { struct ibmvfc_target *tgt; list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT || tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT) return 1; } return 0; } /** * __ibmvfc_work_to_do - Is there task level work to do? (no locking) * @vhost: ibmvfc host struct * * Returns: * 1 if work to do / 0 if not **/ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) { struct ibmvfc_target *tgt; if (kthread_should_stop()) return 1; switch (vhost->action) { case IBMVFC_HOST_ACTION_NONE: case IBMVFC_HOST_ACTION_INIT_WAIT: case IBMVFC_HOST_ACTION_LOGO_WAIT: return 0; case IBMVFC_HOST_ACTION_TGT_INIT: case IBMVFC_HOST_ACTION_QUERY_TGTS: if (vhost->discovery_threads == disc_threads) return 0; list_for_each_entry(tgt, &vhost->targets, queue) if (tgt->action == IBMVFC_TGT_ACTION_INIT) return 1; list_for_each_entry(tgt, &vhost->targets, queue) if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) return 0; return 1; case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: if (vhost->discovery_threads == disc_threads) return 0; list_for_each_entry(tgt, &vhost->targets, queue) if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) return 1; list_for_each_entry(tgt, &vhost->targets, queue) if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT) return 0; return 1; case IBMVFC_HOST_ACTION_LOGO: case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_ALLOC_TGTS: case IBMVFC_HOST_ACTION_QUERY: case IBMVFC_HOST_ACTION_RESET: case IBMVFC_HOST_ACTION_REENABLE: default: break; } return 1; } /** * ibmvfc_work_to_do - Is there task level work to do? * @vhost: ibmvfc host struct * * Returns: * 1 if work to do / 0 if not **/ static int ibmvfc_work_to_do(struct ibmvfc_host *vhost) { unsigned long flags; int rc; spin_lock_irqsave(vhost->host->host_lock, flags); rc = __ibmvfc_work_to_do(vhost); spin_unlock_irqrestore(vhost->host->host_lock, flags); return rc; } /** * ibmvfc_log_ae - Log async events if necessary * @vhost: ibmvfc host struct * @events: events to log * **/ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events) { if (events & IBMVFC_AE_RSCN) fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0); if ((events & IBMVFC_AE_LINKDOWN) && vhost->state >= IBMVFC_HALTED) fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); if ((events & IBMVFC_AE_LINKUP) && vhost->state == IBMVFC_INITIALIZING) fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0); } /** * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port * @tgt: ibmvfc target struct * **/ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) { struct ibmvfc_host *vhost = tgt->vhost; struct fc_rport *rport; unsigned long flags; tgt_dbg(tgt, "Adding rport\n"); rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); spin_lock_irqsave(vhost->host->host_lock, flags); if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { tgt_dbg(tgt, "Deleting rport\n"); list_del(&tgt->queue); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT); spin_unlock_irqrestore(vhost->host->host_lock, flags); fc_remote_port_delete(rport); del_timer_sync(&tgt->timer); kref_put(&tgt->kref, ibmvfc_release_tgt); return; } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { tgt_dbg(tgt, "Deleting rport with outstanding I/O\n"); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT); tgt->rport = NULL; tgt->init_retries = 0; spin_unlock_irqrestore(vhost->host->host_lock, flags); fc_remote_port_delete(rport); return; } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) { spin_unlock_irqrestore(vhost->host->host_lock, flags); return; } if (rport) { tgt_dbg(tgt, "rport add succeeded\n"); tgt->rport = rport; rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff; rport->supported_classes = 0; tgt->target_id = rport->scsi_target_id; if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000) rport->supported_classes |= FC_COS_CLASS1; if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000) rport->supported_classes |= FC_COS_CLASS2; if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000) rport->supported_classes |= FC_COS_CLASS3; if (rport->rqst_q) blk_queue_max_segments(rport->rqst_q, 1); } else tgt_dbg(tgt, "rport add failed\n"); spin_unlock_irqrestore(vhost->host->host_lock, flags); } /** * ibmvfc_do_work - Do task level work * @vhost: ibmvfc host struct * **/ static void ibmvfc_do_work(struct ibmvfc_host *vhost) { struct ibmvfc_target *tgt; unsigned long flags; struct fc_rport *rport; LIST_HEAD(purge); int rc; ibmvfc_log_ae(vhost, vhost->events_to_log); spin_lock_irqsave(vhost->host->host_lock, flags); vhost->events_to_log = 0; switch (vhost->action) { case IBMVFC_HOST_ACTION_NONE: case IBMVFC_HOST_ACTION_LOGO_WAIT: case IBMVFC_HOST_ACTION_INIT_WAIT: break; case IBMVFC_HOST_ACTION_RESET: list_splice_init(&vhost->purge, &purge); spin_unlock_irqrestore(vhost->host->host_lock, flags); ibmvfc_complete_purge(&purge); rc = ibmvfc_reset_crq(vhost); spin_lock_irqsave(vhost->host->host_lock, flags); if (!rc || rc == H_CLOSED) vio_enable_interrupts(to_vio_dev(vhost->dev)); if (vhost->action == IBMVFC_HOST_ACTION_RESET) { /* * The only action we could have changed to would have * been reenable, in which case, we skip the rest of * this path and wait until we've done the re-enable * before sending the crq init. */ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; if (rc || (rc = ibmvfc_send_crq_init(vhost)) || (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); } } break; case IBMVFC_HOST_ACTION_REENABLE: list_splice_init(&vhost->purge, &purge); spin_unlock_irqrestore(vhost->host->host_lock, flags); ibmvfc_complete_purge(&purge); rc = ibmvfc_reenable_crq_queue(vhost); spin_lock_irqsave(vhost->host->host_lock, flags); if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) { /* * The only action we could have changed to would have * been reset, in which case, we skip the rest of this * path and wait until we've done the reset before * sending the crq init. */ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; if (rc || (rc = ibmvfc_send_crq_init(vhost))) { ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc); } } break; case IBMVFC_HOST_ACTION_LOGO: vhost->job_step(vhost); break; case IBMVFC_HOST_ACTION_INIT: BUG_ON(vhost->state != IBMVFC_INITIALIZING); if (vhost->delay_init) { vhost->delay_init = 0; spin_unlock_irqrestore(vhost->host->host_lock, flags); ssleep(15); return; } else vhost->job_step(vhost); break; case IBMVFC_HOST_ACTION_QUERY: list_for_each_entry(tgt, &vhost->targets, queue) ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS); break; case IBMVFC_HOST_ACTION_QUERY_TGTS: list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->action == IBMVFC_TGT_ACTION_INIT) { tgt->job_step(tgt); break; } } if (!ibmvfc_dev_init_to_do(vhost)) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL); break; case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) { tgt->job_step(tgt); break; } } if (ibmvfc_dev_logo_to_do(vhost)) { spin_unlock_irqrestore(vhost->host->host_lock, flags); return; } list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { tgt_dbg(tgt, "Deleting rport\n"); rport = tgt->rport; tgt->rport = NULL; list_del(&tgt->queue); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT); spin_unlock_irqrestore(vhost->host->host_lock, flags); if (rport) fc_remote_port_delete(rport); del_timer_sync(&tgt->timer); kref_put(&tgt->kref, ibmvfc_release_tgt); return; } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { tgt_dbg(tgt, "Deleting rport with I/O outstanding\n"); rport = tgt->rport; tgt->rport = NULL; tgt->init_retries = 0; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT); /* * If fast fail is enabled, we wait for it to fire and then clean up * the old port, since we expect the fast fail timer to clean up the * outstanding I/O faster than waiting for normal command timeouts. * However, if fast fail is disabled, any I/O outstanding to the * rport LUNs will stay outstanding indefinitely, since the EH handlers * won't get invoked for I/O's timing out. If this is a NPIV failover * scenario, the better alternative is to use the move login. */ if (rport && rport->fast_io_fail_tmo == -1) tgt->move_login = 1; spin_unlock_irqrestore(vhost->host->host_lock, flags); if (rport) fc_remote_port_delete(rport); return; } } if (vhost->state == IBMVFC_INITIALIZING) { if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { if (vhost->reinit) { vhost->reinit = 0; scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); spin_unlock_irqrestore(vhost->host->host_lock, flags); } else { ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); wake_up(&vhost->init_wait_q); schedule_work(&vhost->rport_add_work_q); vhost->init_retries = 0; spin_unlock_irqrestore(vhost->host->host_lock, flags); scsi_unblock_requests(vhost->host); } return; } else { ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); vhost->job_step = ibmvfc_discover_targets; } } else { ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); spin_unlock_irqrestore(vhost->host->host_lock, flags); scsi_unblock_requests(vhost->host); wake_up(&vhost->init_wait_q); return; } break; case IBMVFC_HOST_ACTION_ALLOC_TGTS: ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT); spin_unlock_irqrestore(vhost->host->host_lock, flags); ibmvfc_alloc_targets(vhost); spin_lock_irqsave(vhost->host->host_lock, flags); break; case IBMVFC_HOST_ACTION_TGT_INIT: list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->action == IBMVFC_TGT_ACTION_INIT) { tgt->job_step(tgt); break; } } if (!ibmvfc_dev_init_to_do(vhost)) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); break; default: break; } spin_unlock_irqrestore(vhost->host->host_lock, flags); } /** * ibmvfc_work - Do task level work * @data: ibmvfc host struct * * Returns: * zero **/ static int ibmvfc_work(void *data) { struct ibmvfc_host *vhost = data; int rc; set_user_nice(current, MIN_NICE); while (1) { rc = wait_event_interruptible(vhost->work_wait_q, ibmvfc_work_to_do(vhost)); BUG_ON(rc); if (kthread_should_stop()) break; ibmvfc_do_work(vhost); } ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n"); return 0; } /** * ibmvfc_alloc_queue - Allocate queue * @vhost: ibmvfc host struct * @queue: ibmvfc queue to allocate * @fmt: queue format to allocate * * Returns: * 0 on success / non-zero on failure **/ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost, struct ibmvfc_queue *queue, enum ibmvfc_msg_fmt fmt) { struct device *dev = vhost->dev; size_t fmt_size; unsigned int pool_size = 0; ENTER; spin_lock_init(&queue->_lock); queue->q_lock = &queue->_lock; switch (fmt) { case IBMVFC_CRQ_FMT: fmt_size = sizeof(*queue->msgs.crq); pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ; break; case IBMVFC_ASYNC_FMT: fmt_size = sizeof(*queue->msgs.async); break; case IBMVFC_SUB_CRQ_FMT: fmt_size = sizeof(*queue->msgs.scrq); /* We need one extra event for Cancel Commands */ pool_size = max_requests + 1; break; default: dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt); return -EINVAL; } if (ibmvfc_init_event_pool(vhost, queue, pool_size)) { dev_err(dev, "Couldn't initialize event pool.\n"); return -ENOMEM; } queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL); if (!queue->msgs.handle) return -ENOMEM; queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, queue->msg_token)) { free_page((unsigned long)queue->msgs.handle); queue->msgs.handle = NULL; return -ENOMEM; } queue->cur = 0; queue->fmt = fmt; queue->size = PAGE_SIZE / fmt_size; queue->vhost = vhost; return 0; } /** * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor * @vhost: ibmvfc host struct * * Allocates a page for messages, maps it for dma, and registers * the crq with the hypervisor. * * Return value: * zero on success / other on failure **/ static int ibmvfc_init_crq(struct ibmvfc_host *vhost) { int rc, retrc = -ENOMEM; struct device *dev = vhost->dev; struct vio_dev *vdev = to_vio_dev(dev); struct ibmvfc_queue *crq = &vhost->crq; ENTER; if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT)) return -ENOMEM; retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, crq->msg_token, PAGE_SIZE); if (rc == H_RESOURCE) /* maybe kexecing and resource is busy. try a reset */ retrc = rc = ibmvfc_reset_crq(vhost); if (rc == H_CLOSED) dev_warn(dev, "Partner adapter not ready\n"); else if (rc) { dev_warn(dev, "Error %d opening adapter\n", rc); goto reg_crq_failed; } retrc = 0; tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost); if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) { dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc); goto req_irq_failed; } if ((rc = vio_enable_interrupts(vdev))) { dev_err(dev, "Error %d enabling interrupts\n", rc); goto req_irq_failed; } LEAVE; return retrc; req_irq_failed: tasklet_kill(&vhost->tasklet); do { rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); reg_crq_failed: ibmvfc_free_queue(vhost, crq); return retrc; } static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, int index) { struct device *dev = vhost->dev; struct vio_dev *vdev = to_vio_dev(dev); struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index]; int rc = -ENOMEM; ENTER; rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE, &scrq->cookie, &scrq->hw_irq); /* H_CLOSED indicates successful register, but no CRQ partner */ if (rc && rc != H_CLOSED) { dev_warn(dev, "Error registering sub-crq: %d\n", rc); if (rc == H_PARAMETER) dev_warn_once(dev, "Firmware may not support MQ\n"); goto reg_failed; } scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); if (!scrq->irq) { rc = -EINVAL; dev_err(dev, "Error mapping sub-crq[%d] irq\n", index); goto irq_failed; } snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d", vdev->unit_address, index); rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq); if (rc) { dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index); irq_dispose_mapping(scrq->irq); goto irq_failed; } scrq->hwq_id = index; LEAVE; return 0; irq_failed: do { rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); } while (rtas_busy_delay(rc)); reg_failed: LEAVE; return rc; } static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index) { struct device *dev = vhost->dev; struct vio_dev *vdev = to_vio_dev(dev); struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index]; long rc; ENTER; free_irq(scrq->irq, scrq); irq_dispose_mapping(scrq->irq); scrq->irq = 0; do { rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); if (rc) dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc); /* Clean out the queue */ memset(scrq->msgs.crq, 0, PAGE_SIZE); scrq->cur = 0; LEAVE; } static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost) { int i, j; ENTER; if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs) return; for (i = 0; i < nr_scsi_hw_queues; i++) { if (ibmvfc_register_scsi_channel(vhost, i)) { for (j = i; j > 0; j--) ibmvfc_deregister_scsi_channel(vhost, j - 1); vhost->do_enquiry = 0; return; } } LEAVE; } static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost) { int i; ENTER; if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs) return; for (i = 0; i < nr_scsi_hw_queues; i++) ibmvfc_deregister_scsi_channel(vhost, i); LEAVE; } static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) { struct ibmvfc_queue *scrq; int i, j; ENTER; if (!vhost->mq_enabled) return; vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues, sizeof(*vhost->scsi_scrqs.scrqs), GFP_KERNEL); if (!vhost->scsi_scrqs.scrqs) { vhost->do_enquiry = 0; return; } for (i = 0; i < nr_scsi_hw_queues; i++) { scrq = &vhost->scsi_scrqs.scrqs[i]; if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) { for (j = i; j > 0; j--) { scrq = &vhost->scsi_scrqs.scrqs[j - 1]; ibmvfc_free_queue(vhost, scrq); } kfree(vhost->scsi_scrqs.scrqs); vhost->scsi_scrqs.scrqs = NULL; vhost->scsi_scrqs.active_queues = 0; vhost->do_enquiry = 0; vhost->mq_enabled = 0; return; } } ibmvfc_reg_sub_crqs(vhost); LEAVE; } static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost) { struct ibmvfc_queue *scrq; int i; ENTER; if (!vhost->scsi_scrqs.scrqs) return; ibmvfc_dereg_sub_crqs(vhost); for (i = 0; i < nr_scsi_hw_queues; i++) { scrq = &vhost->scsi_scrqs.scrqs[i]; ibmvfc_free_queue(vhost, scrq); } kfree(vhost->scsi_scrqs.scrqs); vhost->scsi_scrqs.scrqs = NULL; vhost->scsi_scrqs.active_queues = 0; LEAVE; } /** * ibmvfc_free_mem - Free memory for vhost * @vhost: ibmvfc host struct * * Return value: * none **/ static void ibmvfc_free_mem(struct ibmvfc_host *vhost) { struct ibmvfc_queue *async_q = &vhost->async_crq; ENTER; mempool_destroy(vhost->tgt_pool); kfree(vhost->trace); dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf, vhost->disc_buf_dma); dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf), vhost->login_buf, vhost->login_buf_dma); dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf), vhost->channel_setup_buf, vhost->channel_setup_dma); dma_pool_destroy(vhost->sg_pool); ibmvfc_free_queue(vhost, async_q); LEAVE; } /** * ibmvfc_alloc_mem - Allocate memory for vhost * @vhost: ibmvfc host struct * * Return value: * 0 on success / non-zero on failure **/ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost) { struct ibmvfc_queue *async_q = &vhost->async_crq; struct device *dev = vhost->dev; ENTER; if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) { dev_err(dev, "Couldn't allocate/map async queue.\n"); goto nomem; } vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev, SG_ALL * sizeof(struct srp_direct_buf), sizeof(struct srp_direct_buf), 0); if (!vhost->sg_pool) { dev_err(dev, "Failed to allocate sg pool\n"); goto unmap_async_crq; } vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf), &vhost->login_buf_dma, GFP_KERNEL); if (!vhost->login_buf) { dev_err(dev, "Couldn't allocate NPIV login buffer\n"); goto free_sg_pool; } vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets; vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz, &vhost->disc_buf_dma, GFP_KERNEL); if (!vhost->disc_buf) { dev_err(dev, "Couldn't allocate Discover Targets buffer\n"); goto free_login_buffer; } vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES, sizeof(struct ibmvfc_trace_entry), GFP_KERNEL); atomic_set(&vhost->trace_index, -1); if (!vhost->trace) goto free_disc_buffer; vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ, sizeof(struct ibmvfc_target)); if (!vhost->tgt_pool) { dev_err(dev, "Couldn't allocate target memory pool\n"); goto free_trace; } vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf), &vhost->channel_setup_dma, GFP_KERNEL); if (!vhost->channel_setup_buf) { dev_err(dev, "Couldn't allocate Channel Setup buffer\n"); goto free_tgt_pool; } LEAVE; return 0; free_tgt_pool: mempool_destroy(vhost->tgt_pool); free_trace: kfree(vhost->trace); free_disc_buffer: dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf, vhost->disc_buf_dma); free_login_buffer: dma_free_coherent(dev, sizeof(*vhost->login_buf), vhost->login_buf, vhost->login_buf_dma); free_sg_pool: dma_pool_destroy(vhost->sg_pool); unmap_async_crq: ibmvfc_free_queue(vhost, async_q); nomem: LEAVE; return -ENOMEM; } /** * ibmvfc_rport_add_thread - Worker thread for rport adds * @work: work struct * **/ static void ibmvfc_rport_add_thread(struct work_struct *work) { struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host, rport_add_work_q); struct ibmvfc_target *tgt; struct fc_rport *rport; unsigned long flags; int did_work; ENTER; spin_lock_irqsave(vhost->host->host_lock, flags); do { did_work = 0; if (vhost->state != IBMVFC_ACTIVE) break; list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->add_rport) { did_work = 1; tgt->add_rport = 0; kref_get(&tgt->kref); rport = tgt->rport; if (!rport) { spin_unlock_irqrestore(vhost->host->host_lock, flags); ibmvfc_tgt_add_rport(tgt); } else if (get_device(&rport->dev)) { spin_unlock_irqrestore(vhost->host->host_lock, flags); tgt_dbg(tgt, "Setting rport roles\n"); fc_remote_port_rolechg(rport, tgt->ids.roles); put_device(&rport->dev); } else { spin_unlock_irqrestore(vhost->host->host_lock, flags); } kref_put(&tgt->kref, ibmvfc_release_tgt); spin_lock_irqsave(vhost->host->host_lock, flags); break; } } } while(did_work); if (vhost->state == IBMVFC_ACTIVE) vhost->scan_complete = 1; spin_unlock_irqrestore(vhost->host->host_lock, flags); LEAVE; } /** * ibmvfc_probe - Adapter hot plug add entry point * @vdev: vio device struct * @id: vio device id struct * * Return value: * 0 on success / non-zero on failure **/ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct ibmvfc_host *vhost; struct Scsi_Host *shost; struct device *dev = &vdev->dev; int rc = -ENOMEM; unsigned int max_scsi_queues = IBMVFC_MAX_SCSI_QUEUES; ENTER; shost = scsi_host_alloc(&driver_template, sizeof(*vhost)); if (!shost) { dev_err(dev, "Couldn't allocate host data\n"); goto out; } shost->transportt = ibmvfc_transport_template; shost->can_queue = max_requests; shost->max_lun = max_lun; shost->max_id = max_targets; shost->max_sectors = IBMVFC_MAX_SECTORS; shost->max_cmd_len = IBMVFC_MAX_CDB_LEN; shost->unique_id = shost->host_no; shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1; vhost = shost_priv(shost); INIT_LIST_HEAD(&vhost->targets); INIT_LIST_HEAD(&vhost->purge); sprintf(vhost->name, IBMVFC_NAME); vhost->host = shost; vhost->dev = dev; vhost->partition_number = -1; vhost->log_level = log_level; vhost->task_set = 1; vhost->mq_enabled = mq_enabled; vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels); vhost->using_channels = 0; vhost->do_enquiry = 1; vhost->scan_timeout = 0; strcpy(vhost->partition_name, "UNKNOWN"); init_waitqueue_head(&vhost->work_wait_q); init_waitqueue_head(&vhost->init_wait_q); INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread); mutex_init(&vhost->passthru_mutex); if ((rc = ibmvfc_alloc_mem(vhost))) goto free_scsi_host; vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME, shost->host_no); if (IS_ERR(vhost->work_thread)) { dev_err(dev, "Couldn't create kernel thread: %ld\n", PTR_ERR(vhost->work_thread)); rc = PTR_ERR(vhost->work_thread); goto free_host_mem; } if ((rc = ibmvfc_init_crq(vhost))) { dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc); goto kill_kthread; } if ((rc = scsi_add_host(shost, dev))) goto release_crq; fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO; if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj, &ibmvfc_trace_attr))) { dev_err(dev, "Failed to create trace file. rc=%d\n", rc); goto remove_shost; } ibmvfc_init_sub_crqs(vhost); if (shost_to_fc_host(shost)->rqst_q) blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1); dev_set_drvdata(dev, vhost); spin_lock(&ibmvfc_driver_lock); list_add_tail(&vhost->queue, &ibmvfc_head); spin_unlock(&ibmvfc_driver_lock); ibmvfc_send_crq_init(vhost); scsi_scan_host(shost); return 0; remove_shost: scsi_remove_host(shost); release_crq: ibmvfc_release_crq_queue(vhost); kill_kthread: kthread_stop(vhost->work_thread); free_host_mem: ibmvfc_free_mem(vhost); free_scsi_host: scsi_host_put(shost); out: LEAVE; return rc; } /** * ibmvfc_remove - Adapter hot plug remove entry point * @vdev: vio device struct * * Return value: * 0 **/ static void ibmvfc_remove(struct vio_dev *vdev) { struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev); LIST_HEAD(purge); unsigned long flags; ENTER; ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr); spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); spin_unlock_irqrestore(vhost->host->host_lock, flags); ibmvfc_wait_while_resetting(vhost); kthread_stop(vhost->work_thread); fc_remove_host(vhost->host); scsi_remove_host(vhost->host); spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_purge_requests(vhost, DID_ERROR); list_splice_init(&vhost->purge, &purge); spin_unlock_irqrestore(vhost->host->host_lock, flags); ibmvfc_complete_purge(&purge); ibmvfc_release_sub_crqs(vhost); ibmvfc_release_crq_queue(vhost); ibmvfc_free_mem(vhost); spin_lock(&ibmvfc_driver_lock); list_del(&vhost->queue); spin_unlock(&ibmvfc_driver_lock); scsi_host_put(vhost->host); LEAVE; } /** * ibmvfc_resume - Resume from suspend * @dev: device struct * * We may have lost an interrupt across suspend/resume, so kick the * interrupt handler * */ static int ibmvfc_resume(struct device *dev) { unsigned long flags; struct ibmvfc_host *vhost = dev_get_drvdata(dev); struct vio_dev *vdev = to_vio_dev(dev); spin_lock_irqsave(vhost->host->host_lock, flags); vio_disable_interrupts(vdev); tasklet_schedule(&vhost->tasklet); spin_unlock_irqrestore(vhost->host->host_lock, flags); return 0; } /** * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver * @vdev: vio device struct * * Return value: * Number of bytes the driver will need to DMA map at the same time in * order to perform well. */ static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev) { unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu); return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun); } static const struct vio_device_id ibmvfc_device_table[] = { {"fcp", "IBM,vfc-client"}, { "", "" } }; MODULE_DEVICE_TABLE(vio, ibmvfc_device_table); static const struct dev_pm_ops ibmvfc_pm_ops = { .resume = ibmvfc_resume }; static struct vio_driver ibmvfc_driver = { .id_table = ibmvfc_device_table, .probe = ibmvfc_probe, .remove = ibmvfc_remove, .get_desired_dma = ibmvfc_get_desired_dma, .name = IBMVFC_NAME, .pm = &ibmvfc_pm_ops, }; static struct fc_function_template ibmvfc_transport_functions = { .show_host_fabric_name = 1, .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_port_type = 1, .show_host_port_id = 1, .show_host_maxframe_size = 1, .get_host_port_state = ibmvfc_get_host_port_state, .show_host_port_state = 1, .get_host_speed = ibmvfc_get_host_speed, .show_host_speed = 1, .issue_fc_host_lip = ibmvfc_issue_fc_host_lip, .terminate_rport_io = ibmvfc_terminate_rport_io, .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_starget_node_name = ibmvfc_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = ibmvfc_get_starget_port_name, .show_starget_port_name = 1, .get_starget_port_id = ibmvfc_get_starget_port_id, .show_starget_port_id = 1, .bsg_request = ibmvfc_bsg_request, .bsg_timeout = ibmvfc_bsg_timeout, }; /** * ibmvfc_module_init - Initialize the ibmvfc module * * Return value: * 0 on success / other on failure **/ static int __init ibmvfc_module_init(void) { int rc; if (!firmware_has_feature(FW_FEATURE_VIO)) return -ENODEV; printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n", IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE); ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions); if (!ibmvfc_transport_template) return -ENOMEM; rc = vio_register_driver(&ibmvfc_driver); if (rc) fc_release_transport(ibmvfc_transport_template); return rc; } /** * ibmvfc_module_exit - Teardown the ibmvfc module * * Return value: * nothing **/ static void __exit ibmvfc_module_exit(void) { vio_unregister_driver(&ibmvfc_driver); fc_release_transport(ibmvfc_transport_template); } module_init(ibmvfc_module_init); module_exit(ibmvfc_module_exit);
linux-master
drivers/scsi/ibmvscsi/ibmvfc.c
/* * Adaptec AIC79xx device driver for Linux. * * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.c#171 $ * * -------------------------------------------------------------------------- * Copyright (c) 1994-2000 Justin T. Gibbs. * Copyright (c) 1997-1999 Doug Ledford * Copyright (c) 2000-2003 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include "aic79xx_osm.h" #include "aic79xx_inline.h" #include <scsi/scsicam.h> static struct scsi_transport_template *ahd_linux_transport_template = NULL; #include <linux/init.h> /* __setup */ #include <linux/mm.h> /* For fetching system memory size */ #include <linux/blkdev.h> /* For block_size() */ #include <linux/delay.h> /* For ssleep/msleep */ #include <linux/device.h> #include <linux/slab.h> /* * Bucket size for counting good commands in between bad ones. */ #define AHD_LINUX_ERR_THRESH 1000 /* * Set this to the delay in seconds after SCSI bus reset. * Note, we honor this only for the initial bus reset. * The scsi error recovery code performs its own bus settle * delay handling for error recovery actions. */ #ifdef CONFIG_AIC79XX_RESET_DELAY_MS #define AIC79XX_RESET_DELAY CONFIG_AIC79XX_RESET_DELAY_MS #else #define AIC79XX_RESET_DELAY 5000 #endif /* * To change the default number of tagged transactions allowed per-device, * add a line to the lilo.conf file like: * append="aic79xx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}" * which will result in the first four devices on the first two * controllers being set to a tagged queue depth of 32. * * The tag_commands is an array of 16 to allow for wide and twin adapters. * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15 * for channel 1. */ typedef struct { uint16_t tag_commands[16]; /* Allow for wide/twin adapters. */ } adapter_tag_info_t; /* * Modify this as you see fit for your system. * * 0 tagged queuing disabled * 1 <= n <= 253 n == max tags ever dispatched. * * The driver will throttle the number of commands dispatched to a * device if it returns queue full. For devices with a fixed maximum * queue depth, the driver will eventually determine this depth and * lock it in (a console message is printed to indicate that a lock * has occurred). On some devices, queue full is returned for a temporary * resource shortage. These devices will return queue full at varying * depths. The driver will throttle back when the queue fulls occur and * attempt to slowly increase the depth over time as the device recovers * from the resource shortage. * * In this example, the first line will disable tagged queueing for all * the devices on the first probed aic79xx adapter. * * The second line enables tagged queueing with 4 commands/LUN for IDs * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the * driver to attempt to use up to 64 tags for ID 1. * * The third line is the same as the first line. * * The fourth line disables tagged queueing for devices 0 and 3. It * enables tagged queueing for the other IDs, with 16 commands/LUN * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for * IDs 2, 5-7, and 9-15. */ /* * NOTE: The below structure is for reference only, the actual structure * to modify in order to change things is just below this comment block. adapter_tag_info_t aic79xx_tag_info[] = { {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}}, {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}} }; */ #ifdef CONFIG_AIC79XX_CMDS_PER_DEVICE #define AIC79XX_CMDS_PER_DEVICE CONFIG_AIC79XX_CMDS_PER_DEVICE #else #define AIC79XX_CMDS_PER_DEVICE AHD_MAX_QUEUE #endif #define AIC79XX_CONFIGED_TAG_COMMANDS { \ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \ AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE \ } /* * By default, use the number of commands specified by * the users kernel configuration. */ static adapter_tag_info_t aic79xx_tag_info[] = { {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS}, {AIC79XX_CONFIGED_TAG_COMMANDS} }; /* * The I/O cell on the chip is very configurable in respect to its analog * characteristics. Set the defaults here; they can be overriden with * the proper insmod parameters. */ struct ahd_linux_iocell_opts { uint8_t precomp; uint8_t slewrate; uint8_t amplitude; }; #define AIC79XX_DEFAULT_PRECOMP 0xFF #define AIC79XX_DEFAULT_SLEWRATE 0xFF #define AIC79XX_DEFAULT_AMPLITUDE 0xFF #define AIC79XX_DEFAULT_IOOPTS \ { \ AIC79XX_DEFAULT_PRECOMP, \ AIC79XX_DEFAULT_SLEWRATE, \ AIC79XX_DEFAULT_AMPLITUDE \ } #define AIC79XX_PRECOMP_INDEX 0 #define AIC79XX_SLEWRATE_INDEX 1 #define AIC79XX_AMPLITUDE_INDEX 2 static struct ahd_linux_iocell_opts aic79xx_iocell_info[] __ro_after_init = { AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS, AIC79XX_DEFAULT_IOOPTS }; /* * There should be a specific return value for this in scsi.h, but * it seems that most drivers ignore it. */ #define DID_UNDERFLOW DID_ERROR void ahd_print_path(struct ahd_softc *ahd, struct scb *scb) { printk("(scsi%d:%c:%d:%d): ", ahd->platform_data->host->host_no, scb != NULL ? SCB_GET_CHANNEL(ahd, scb) : 'X', scb != NULL ? SCB_GET_TARGET(ahd, scb) : -1, scb != NULL ? SCB_GET_LUN(scb) : -1); } /* * XXX - these options apply unilaterally to _all_ adapters * cards in the system. This should be fixed. Exceptions to this * rule are noted in the comments. */ /* * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This * has no effect on any later resets that might occur due to things like * SCSI bus timeouts. */ static uint32_t aic79xx_no_reset; /* * Should we force EXTENDED translation on a controller. * 0 == Use whatever is in the SEEPROM or default to off * 1 == Use whatever is in the SEEPROM or default to on */ static uint32_t aic79xx_extended; /* * PCI bus parity checking of the Adaptec controllers. This is somewhat * dubious at best. To my knowledge, this option has never actually * solved a PCI parity problem, but on certain machines with broken PCI * chipset configurations, it can generate tons of false error messages. * It's included in the driver for completeness. * 0 = Shut off PCI parity check * non-0 = Enable PCI parity check * * NOTE: you can't actually pass -1 on the lilo prompt. So, to set this * variable to -1 you would actually want to simply pass the variable * name without a number. That will invert the 0 which will result in * -1. */ static uint32_t aic79xx_pci_parity = ~0; /* * There are lots of broken chipsets in the world. Some of them will * violate the PCI spec when we issue byte sized memory writes to our * controller. I/O mapped register access, if allowed by the given * platform, will work in almost all cases. */ uint32_t aic79xx_allow_memio = ~0; /* * So that we can set how long each device is given as a selection timeout. * The table of values goes like this: * 0 - 256ms * 1 - 128ms * 2 - 64ms * 3 - 32ms * We default to 256ms because some older devices need a longer time * to respond to initial selection. */ static uint32_t aic79xx_seltime; /* * Certain devices do not perform any aging on commands. Should the * device be saturated by commands in one portion of the disk, it is * possible for transactions on far away sectors to never be serviced. * To handle these devices, we can periodically send an ordered tag to * force all outstanding transactions to be serviced prior to a new * transaction. */ static uint32_t aic79xx_periodic_otag; /* Some storage boxes are using an LSI chip which has a bug making it * impossible to use aic79xx Rev B chip in 320 speeds. The following * storage boxes have been reported to be buggy: * EonStor 3U 16-Bay: U16U-G3A3 * EonStor 2U 12-Bay: U12U-G3A3 * SentinelRAID: 2500F R5 / R6 * SentinelRAID: 2500F R1 * SentinelRAID: 2500F/1500F * SentinelRAID: 150F * * To get around this LSI bug, you can set your board to 160 mode * or you can enable the SLOWCRC bit. */ uint32_t aic79xx_slowcrc; /* * Module information and settable options. */ static char *aic79xx = NULL; MODULE_AUTHOR("Maintainer: Hannes Reinecke <[email protected]>"); MODULE_DESCRIPTION("Adaptec AIC790X U320 SCSI Host Bus Adapter driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(AIC79XX_DRIVER_VERSION); module_param(aic79xx, charp, 0444); MODULE_PARM_DESC(aic79xx, "period-delimited options string:\n" " verbose Enable verbose/diagnostic logging\n" " allow_memio Allow device registers to be memory mapped\n" " debug Bitmask of debug values to enable\n" " no_reset Suppress initial bus resets\n" " extended Enable extended geometry on all controllers\n" " periodic_otag Send an ordered tagged transaction\n" " periodically to prevent tag starvation.\n" " This may be required by some older disk\n" " or drives/RAID arrays.\n" " tag_info:<tag_str> Set per-target tag depth\n" " global_tag_depth:<int> Global tag depth for all targets on all buses\n" " slewrate:<slewrate_list>Set the signal slew rate (0-15).\n" " precomp:<pcomp_list> Set the signal precompensation (0-7).\n" " amplitude:<int> Set the signal amplitude (0-7).\n" " seltime:<int> Selection Timeout:\n" " (0/256ms,1/128ms,2/64ms,3/32ms)\n" " slowcrc Turn on the SLOWCRC bit (Rev B only)\n" "\n" " Sample modprobe configuration file:\n" " # Enable verbose logging\n" " # Set tag depth on Controller 2/Target 2 to 10 tags\n" " # Shorten the selection timeout to 128ms\n" "\n" " options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n" ); static void ahd_linux_handle_scsi_status(struct ahd_softc *, struct scsi_device *, struct scb *); static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd); static int ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd); static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd); static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_linux_device_queue_depth(struct scsi_device *); static int ahd_linux_run_command(struct ahd_softc*, struct ahd_linux_device *, struct scsi_cmnd *); static void ahd_linux_setup_tag_info_global(char *p); static int aic79xx_setup(char *c); static void ahd_freeze_simq(struct ahd_softc *ahd); static void ahd_release_simq(struct ahd_softc *ahd); static int ahd_linux_unit; /************************** OS Utility Wrappers *******************************/ void ahd_delay(long); void ahd_delay(long usec) { /* * udelay on Linux can have problems for * multi-millisecond waits. Wait at most * 1024us per call. */ while (usec > 0) { udelay(usec % 1024); usec -= 1024; } } /***************************** Low Level I/O **********************************/ uint8_t ahd_inb(struct ahd_softc * ahd, long port); void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val); void ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val); void ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *, int count); void ahd_insb(struct ahd_softc * ahd, long port, uint8_t *, int count); uint8_t ahd_inb(struct ahd_softc * ahd, long port) { uint8_t x; if (ahd->tags[0] == BUS_SPACE_MEMIO) { x = readb(ahd->bshs[0].maddr + port); } else { x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF)); } mb(); return (x); } #if 0 /* unused */ static uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port) { uint8_t x; if (ahd->tags[0] == BUS_SPACE_MEMIO) { x = readw(ahd->bshs[0].maddr + port); } else { x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF)); } mb(); return (x); } #endif void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val) { if (ahd->tags[0] == BUS_SPACE_MEMIO) { writeb(val, ahd->bshs[0].maddr + port); } else { outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF)); } mb(); } void ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val) { if (ahd->tags[0] == BUS_SPACE_MEMIO) { writew(val, ahd->bshs[0].maddr + port); } else { outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF)); } mb(); } void ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count) { int i; /* * There is probably a more efficient way to do this on Linux * but we don't use this for anything speed critical and this * should work. */ for (i = 0; i < count; i++) ahd_outb(ahd, port, *array++); } void ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count) { int i; /* * There is probably a more efficient way to do this on Linux * but we don't use this for anything speed critical and this * should work. */ for (i = 0; i < count; i++) *array++ = ahd_inb(ahd, port); } /******************************* PCI Routines *********************************/ uint32_t ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width) { switch (width) { case 1: { uint8_t retval; pci_read_config_byte(pci, reg, &retval); return (retval); } case 2: { uint16_t retval; pci_read_config_word(pci, reg, &retval); return (retval); } case 4: { uint32_t retval; pci_read_config_dword(pci, reg, &retval); return (retval); } default: panic("ahd_pci_read_config: Read size too big"); /* NOTREACHED */ return (0); } } void ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width) { switch (width) { case 1: pci_write_config_byte(pci, reg, value); break; case 2: pci_write_config_word(pci, reg, value); break; case 4: pci_write_config_dword(pci, reg, value); break; default: panic("ahd_pci_write_config: Write size too big"); /* NOTREACHED */ } } /****************************** Inlines ***************************************/ static void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*); static void ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb) { struct scsi_cmnd *cmd; cmd = scb->io_ctx; ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE); scsi_dma_unmap(cmd); } /******************************** Macros **************************************/ #define BUILD_SCSIID(ahd, cmd) \ (((scmd_id(cmd) << TID_SHIFT) & TID) | (ahd)->our_id) /* * Return a string describing the driver. */ static const char * ahd_linux_info(struct Scsi_Host *host) { static char buffer[512]; char ahd_info[256]; char *bp; struct ahd_softc *ahd; bp = &buffer[0]; ahd = *(struct ahd_softc **)host->hostdata; memset(bp, 0, sizeof(buffer)); strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev " AIC79XX_DRIVER_VERSION "\n" " <"); strcat(bp, ahd->description); strcat(bp, ">\n" " "); ahd_controller_info(ahd, ahd_info); strcat(bp, ahd_info); return (bp); } /* * Queue an SCB to the controller. */ static int ahd_linux_queue_lck(struct scsi_cmnd *cmd) { struct ahd_softc *ahd; struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device); int rtn = SCSI_MLQUEUE_HOST_BUSY; ahd = *(struct ahd_softc **)cmd->device->host->hostdata; cmd->result = CAM_REQ_INPROG << 16; rtn = ahd_linux_run_command(ahd, dev, cmd); return rtn; } static DEF_SCSI_QCMD(ahd_linux_queue) static struct scsi_target ** ahd_linux_target_in_softc(struct scsi_target *starget) { struct ahd_softc *ahd = *((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata); unsigned int target_offset; target_offset = starget->id; if (starget->channel != 0) target_offset += 8; return &ahd->platform_data->starget[target_offset]; } static int ahd_linux_target_alloc(struct scsi_target *starget) { struct ahd_softc *ahd = *((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata); struct seeprom_config *sc = ahd->seep_config; unsigned long flags; struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget); struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; char channel = starget->channel + 'A'; ahd_lock(ahd, &flags); BUG_ON(*ahd_targp != NULL); *ahd_targp = starget; if (sc) { int flags = sc->device_flags[starget->id]; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, starget->id, &tstate); if ((flags & CFPACKETIZED) == 0) { /* don't negotiate packetized (IU) transfers */ spi_max_iu(starget) = 0; } else { if ((ahd->features & AHD_RTI) == 0) spi_rti(starget) = 0; } if ((flags & CFQAS) == 0) spi_max_qas(starget) = 0; /* Transinfo values have been set to BIOS settings */ spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0; spi_min_period(starget) = tinfo->user.period; spi_max_offset(starget) = tinfo->user.offset; } tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id, starget->id, &tstate); ahd_compile_devinfo(&devinfo, ahd->our_id, starget->id, CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); ahd_set_syncrate(ahd, &devinfo, 0, 0, 0, AHD_TRANS_GOAL, /*paused*/FALSE); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_GOAL, /*paused*/FALSE); ahd_unlock(ahd, &flags); return 0; } static void ahd_linux_target_destroy(struct scsi_target *starget) { struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget); *ahd_targp = NULL; } static int ahd_linux_slave_alloc(struct scsi_device *sdev) { struct ahd_softc *ahd = *((struct ahd_softc **)sdev->host->hostdata); struct ahd_linux_device *dev; if (bootverbose) printk("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id); dev = scsi_transport_device_data(sdev); memset(dev, 0, sizeof(*dev)); /* * We start out life using untagged * transactions of which we allow one. */ dev->openings = 1; /* * Set maxtags to 0. This will be changed if we * later determine that we are dealing with * a tagged queuing capable device. */ dev->maxtags = 0; return (0); } static int ahd_linux_slave_configure(struct scsi_device *sdev) { if (bootverbose) sdev_printk(KERN_INFO, sdev, "Slave Configure\n"); ahd_linux_device_queue_depth(sdev); /* Initial Domain Validation */ if (!spi_initial_dv(sdev->sdev_target)) spi_dv_device(sdev); return 0; } #if defined(__i386__) /* * Return the disk geometry for the given SCSI device. */ static int ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads; int sectors; int cylinders; int extended; struct ahd_softc *ahd; ahd = *((struct ahd_softc **)sdev->host->hostdata); if (scsi_partsize(bdev, capacity, geom)) return 0; heads = 64; sectors = 32; cylinders = aic_sector_div(capacity, heads, sectors); if (aic79xx_extended != 0) extended = 1; else extended = (ahd->flags & AHD_EXTENDED_TRANS_A) != 0; if (extended && cylinders >= 1024) { heads = 255; sectors = 63; cylinders = aic_sector_div(capacity, heads, sectors); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return (0); } #endif /* * Abort the current SCSI command(s). */ static int ahd_linux_abort(struct scsi_cmnd *cmd) { return ahd_linux_queue_abort_cmd(cmd); } /* * Attempt to send a target reset message to the device that timed out. */ static int ahd_linux_dev_reset(struct scsi_cmnd *cmd) { struct ahd_softc *ahd; struct ahd_linux_device *dev; struct scb *reset_scb; u_int cdb_byte; int retval = SUCCESS; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; unsigned long flags; DECLARE_COMPLETION_ONSTACK(done); reset_scb = NULL; ahd = *(struct ahd_softc **)cmd->device->host->hostdata; scmd_printk(KERN_INFO, cmd, "Attempting to queue a TARGET RESET message:"); printk("CDB:"); for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++) printk(" 0x%x", cmd->cmnd[cdb_byte]); printk("\n"); /* * Determine if we currently own this command. */ dev = scsi_transport_device_data(cmd->device); if (dev == NULL) { /* * No target device for this command exists, * so we must not still own the command. */ scmd_printk(KERN_INFO, cmd, "Is not an active device\n"); return SUCCESS; } /* * Generate us a new SCB */ reset_scb = ahd_get_scb(ahd, AHD_NEVER_COL_IDX); if (!reset_scb) { scmd_printk(KERN_INFO, cmd, "No SCB available\n"); return FAILED; } tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, cmd->device->id, &tstate); reset_scb->io_ctx = cmd; reset_scb->platform_data->dev = dev; reset_scb->sg_count = 0; ahd_set_residual(reset_scb, 0); ahd_set_sense_residual(reset_scb, 0); reset_scb->platform_data->xfer_len = 0; reset_scb->hscb->control = 0; reset_scb->hscb->scsiid = BUILD_SCSIID(ahd,cmd); reset_scb->hscb->lun = cmd->device->lun; reset_scb->hscb->cdb_len = 0; reset_scb->hscb->task_management = SIU_TASKMGMT_LUN_RESET; reset_scb->flags |= SCB_DEVICE_RESET|SCB_RECOVERY_SCB|SCB_ACTIVE; if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { reset_scb->flags |= SCB_PACKETIZED; } else { reset_scb->hscb->control |= MK_MESSAGE; } dev->openings--; dev->active++; dev->commands_issued++; ahd_lock(ahd, &flags); LIST_INSERT_HEAD(&ahd->pending_scbs, reset_scb, pending_links); ahd_queue_scb(ahd, reset_scb); ahd->platform_data->eh_done = &done; ahd_unlock(ahd, &flags); printk("%s: Device reset code sleeping\n", ahd_name(ahd)); if (!wait_for_completion_timeout(&done, 5 * HZ)) { ahd_lock(ahd, &flags); ahd->platform_data->eh_done = NULL; ahd_unlock(ahd, &flags); printk("%s: Device reset timer expired (active %d)\n", ahd_name(ahd), dev->active); retval = FAILED; } printk("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval); return (retval); } /* * Reset the SCSI bus. */ static int ahd_linux_bus_reset(struct scsi_cmnd *cmd) { struct ahd_softc *ahd; int found; unsigned long flags; ahd = *(struct ahd_softc **)cmd->device->host->hostdata; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printk("%s: Bus reset called for cmd %p\n", ahd_name(ahd), cmd); #endif ahd_lock(ahd, &flags); found = ahd_reset_channel(ahd, scmd_channel(cmd) + 'A', /*initiate reset*/TRUE); ahd_unlock(ahd, &flags); if (bootverbose) printk("%s: SCSI bus reset delivered. " "%d SCBs aborted.\n", ahd_name(ahd), found); return (SUCCESS); } struct scsi_host_template aic79xx_driver_template = { .module = THIS_MODULE, .name = "aic79xx", .proc_name = "aic79xx", .show_info = ahd_linux_show_info, .write_info = ahd_proc_write_seeprom, .info = ahd_linux_info, .queuecommand = ahd_linux_queue, .eh_abort_handler = ahd_linux_abort, .eh_device_reset_handler = ahd_linux_dev_reset, .eh_bus_reset_handler = ahd_linux_bus_reset, #if defined(__i386__) .bios_param = ahd_linux_biosparam, #endif .can_queue = AHD_MAX_QUEUE, .this_id = -1, .max_sectors = 8192, .cmd_per_lun = 2, .slave_alloc = ahd_linux_slave_alloc, .slave_configure = ahd_linux_slave_configure, .target_alloc = ahd_linux_target_alloc, .target_destroy = ahd_linux_target_destroy, }; /******************************** Bus DMA *************************************/ int ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent, bus_size_t alignment, bus_size_t boundary, dma_addr_t lowaddr, dma_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag) { bus_dma_tag_t dmat; dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC); if (dmat == NULL) return (ENOMEM); /* * Linux is very simplistic about DMA memory. For now don't * maintain all specification information. Once Linux supplies * better facilities for doing these operations, or the * needs of this particular driver change, we might need to do * more here. */ dmat->alignment = alignment; dmat->boundary = boundary; dmat->maxsize = maxsize; *ret_tag = dmat; return (0); } void ahd_dma_tag_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat) { kfree(dmat); } int ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { *vaddr = dma_alloc_coherent(&ahd->dev_softc->dev, dmat->maxsize, mapp, GFP_ATOMIC); if (*vaddr == NULL) return (ENOMEM); return(0); } void ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map) { dma_free_coherent(&ahd->dev_softc->dev, dmat->maxsize, vaddr, map); } int ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb, void *cb_arg, int flags) { /* * Assume for now that this will only be used during * initialization and not for per-transaction buffer mapping. */ bus_dma_segment_t stack_sg; stack_sg.ds_addr = map; stack_sg.ds_len = dmat->maxsize; cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0); return (0); } void ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map) { } int ahd_dmamap_unload(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map) { /* Nothing to do */ return (0); } /********************* Platform Dependent Functions ***************************/ static void ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value) { if ((instance >= 0) && (instance < ARRAY_SIZE(aic79xx_iocell_info))) { uint8_t *iocell_info; iocell_info = (uint8_t*)&aic79xx_iocell_info[instance]; iocell_info[index] = value & 0xFFFF; if (bootverbose) printk("iocell[%d:%ld] = %d\n", instance, index, value); } } static void ahd_linux_setup_tag_info_global(char *p) { int tags, i, j; tags = simple_strtoul(p + 1, NULL, 0) & 0xff; printk("Setting Global Tags= %d\n", tags); for (i = 0; i < ARRAY_SIZE(aic79xx_tag_info); i++) { for (j = 0; j < AHD_NUM_TARGETS; j++) { aic79xx_tag_info[i].tag_commands[j] = tags; } } } static void ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value) { if ((instance >= 0) && (targ >= 0) && (instance < ARRAY_SIZE(aic79xx_tag_info)) && (targ < AHD_NUM_TARGETS)) { aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF; if (bootverbose) printk("tag_info[%d:%d] = %d\n", instance, targ, value); } } static char * ahd_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth, void (*callback)(u_long, int, int, int32_t), u_long callback_arg) { char *tok_end; char *tok_end2; int i; int instance; int targ; int done; char tok_list[] = {'.', ',', '{', '}', '\0'}; /* All options use a ':' name/arg separator */ if (*opt_arg != ':') return (opt_arg); opt_arg++; instance = -1; targ = -1; done = FALSE; /* * Restore separator that may be in * the middle of our option argument. */ tok_end = strchr(opt_arg, '\0'); if (tok_end < end) *tok_end = ','; while (!done) { switch (*opt_arg) { case '{': if (instance == -1) { instance = 0; } else { if (depth > 1) { if (targ == -1) targ = 0; } else { printk("Malformed Option %s\n", opt_name); done = TRUE; } } opt_arg++; break; case '}': if (targ != -1) targ = -1; else if (instance != -1) instance = -1; opt_arg++; break; case ',': case '.': if (instance == -1) done = TRUE; else if (targ >= 0) targ++; else if (instance >= 0) instance++; opt_arg++; break; case '\0': done = TRUE; break; default: tok_end = end; for (i = 0; tok_list[i]; i++) { tok_end2 = strchr(opt_arg, tok_list[i]); if ((tok_end2) && (tok_end2 < tok_end)) tok_end = tok_end2; } callback(callback_arg, instance, targ, simple_strtol(opt_arg, NULL, 0)); opt_arg = tok_end; break; } } return (opt_arg); } /* * Handle Linux boot parameters. This routine allows for assigning a value * to a parameter with a ':' between the parameter and the value. * ie. aic79xx=stpwlev:1,extended */ static int aic79xx_setup(char *s) { int i, n; char *p; char *end; static const struct { const char *name; uint32_t *flag; } options[] = { { "extended", &aic79xx_extended }, { "no_reset", &aic79xx_no_reset }, { "verbose", &aic79xx_verbose }, { "allow_memio", &aic79xx_allow_memio}, #ifdef AHD_DEBUG { "debug", &ahd_debug }, #endif { "periodic_otag", &aic79xx_periodic_otag }, { "pci_parity", &aic79xx_pci_parity }, { "seltime", &aic79xx_seltime }, { "tag_info", NULL }, { "global_tag_depth", NULL}, { "slewrate", NULL }, { "precomp", NULL }, { "amplitude", NULL }, { "slowcrc", &aic79xx_slowcrc }, }; end = strchr(s, '\0'); /* * XXX ia64 gcc isn't smart enough to know that ARRAY_SIZE * will never be 0 in this case. */ n = 0; while ((p = strsep(&s, ",.")) != NULL) { if (*p == '\0') continue; for (i = 0; i < ARRAY_SIZE(options); i++) { n = strlen(options[i].name); if (strncmp(options[i].name, p, n) == 0) break; } if (i == ARRAY_SIZE(options)) continue; if (strncmp(p, "global_tag_depth", n) == 0) { ahd_linux_setup_tag_info_global(p + n); } else if (strncmp(p, "tag_info", n) == 0) { s = ahd_parse_brace_option("tag_info", p + n, end, 2, ahd_linux_setup_tag_info, 0); } else if (strncmp(p, "slewrate", n) == 0) { s = ahd_parse_brace_option("slewrate", p + n, end, 1, ahd_linux_setup_iocell_info, AIC79XX_SLEWRATE_INDEX); } else if (strncmp(p, "precomp", n) == 0) { s = ahd_parse_brace_option("precomp", p + n, end, 1, ahd_linux_setup_iocell_info, AIC79XX_PRECOMP_INDEX); } else if (strncmp(p, "amplitude", n) == 0) { s = ahd_parse_brace_option("amplitude", p + n, end, 1, ahd_linux_setup_iocell_info, AIC79XX_AMPLITUDE_INDEX); } else if (p[n] == ':') { *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); } else if (!strncmp(p, "verbose", n)) { *(options[i].flag) = 1; } else { *(options[i].flag) ^= 0xFFFFFFFF; } } return 1; } __setup("aic79xx=", aic79xx_setup); uint32_t aic79xx_verbose; int ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *template) { char buf[80]; struct Scsi_Host *host; char *new_name; u_long s; int retval; template->name = ahd->description; host = scsi_host_alloc(template, sizeof(struct ahd_softc *)); if (host == NULL) return (ENOMEM); *((struct ahd_softc **)host->hostdata) = ahd; ahd->platform_data->host = host; host->can_queue = AHD_MAX_QUEUE; host->cmd_per_lun = 2; host->sg_tablesize = AHD_NSEG; host->this_id = ahd->our_id; host->irq = ahd->platform_data->irq; host->max_id = (ahd->features & AHD_WIDE) ? 16 : 8; host->max_lun = AHD_NUM_LUNS; host->max_channel = 0; host->sg_tablesize = AHD_NSEG; ahd_lock(ahd, &s); ahd_set_unit(ahd, ahd_linux_unit++); ahd_unlock(ahd, &s); sprintf(buf, "scsi%d", host->host_no); new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC); if (new_name != NULL) { strcpy(new_name, buf); ahd_set_name(ahd, new_name); } host->unique_id = ahd->unit; ahd_linux_initialize_scsi_bus(ahd); ahd_intr_enable(ahd, TRUE); host->transportt = ahd_linux_transport_template; retval = scsi_add_host(host, &ahd->dev_softc->dev); if (retval) { printk(KERN_WARNING "aic79xx: scsi_add_host failed\n"); scsi_host_put(host); return retval; } scsi_scan_host(host); return 0; } /* * Place the SCSI bus into a known state by either resetting it, * or forcing transfer negotiations on the next command to any * target. */ static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd) { u_int target_id; u_int numtarg; unsigned long s; target_id = 0; numtarg = 0; if (aic79xx_no_reset != 0) ahd->flags &= ~AHD_RESET_BUS_A; if ((ahd->flags & AHD_RESET_BUS_A) != 0) ahd_reset_channel(ahd, 'A', /*initiate_reset*/TRUE); else numtarg = (ahd->features & AHD_WIDE) ? 16 : 8; ahd_lock(ahd, &s); /* * Force negotiation to async for all targets that * will not see an initial bus reset. */ for (; target_id < numtarg; target_id++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, target_id, &tstate); ahd_compile_devinfo(&devinfo, ahd->our_id, target_id, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_update_neg_request(ahd, &devinfo, tstate, tinfo, AHD_NEG_ALWAYS); } ahd_unlock(ahd, &s); /* Give the bus some time to recover */ if ((ahd->flags & AHD_RESET_BUS_A) != 0) { ahd_freeze_simq(ahd); msleep(AIC79XX_RESET_DELAY); ahd_release_simq(ahd); } } int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg) { ahd->platform_data = kzalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC); if (ahd->platform_data == NULL) return (ENOMEM); ahd->platform_data->irq = AHD_LINUX_NOIRQ; ahd_lockinit(ahd); ahd->seltime = (aic79xx_seltime & 0x3) << 4; return (0); } void ahd_platform_free(struct ahd_softc *ahd) { struct scsi_target *starget; int i; if (ahd->platform_data != NULL) { /* destroy all of the device and target objects */ for (i = 0; i < AHD_NUM_TARGETS; i++) { starget = ahd->platform_data->starget[i]; if (starget != NULL) { ahd->platform_data->starget[i] = NULL; } } if (ahd->platform_data->irq != AHD_LINUX_NOIRQ) free_irq(ahd->platform_data->irq, ahd); if (ahd->tags[0] == BUS_SPACE_PIO && ahd->bshs[0].ioport != 0) release_region(ahd->bshs[0].ioport, 256); if (ahd->tags[1] == BUS_SPACE_PIO && ahd->bshs[1].ioport != 0) release_region(ahd->bshs[1].ioport, 256); if (ahd->tags[0] == BUS_SPACE_MEMIO && ahd->bshs[0].maddr != NULL) { iounmap(ahd->bshs[0].maddr); release_mem_region(ahd->platform_data->mem_busaddr, 0x1000); } if (ahd->platform_data->host) scsi_host_put(ahd->platform_data->host); kfree(ahd->platform_data); } } void ahd_platform_init(struct ahd_softc *ahd) { /* * Lookup and commit any modified IO Cell options. */ if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) { const struct ahd_linux_iocell_opts *iocell_opts; iocell_opts = &aic79xx_iocell_info[ahd->unit]; if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP) AHD_SET_PRECOMP(ahd, iocell_opts->precomp); if (iocell_opts->slewrate != AIC79XX_DEFAULT_SLEWRATE) AHD_SET_SLEWRATE(ahd, iocell_opts->slewrate); if (iocell_opts->amplitude != AIC79XX_DEFAULT_AMPLITUDE) AHD_SET_AMPLITUDE(ahd, iocell_opts->amplitude); } } void ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb) { ahd_platform_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ); } void ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev, struct ahd_devinfo *devinfo, ahd_queue_alg alg) { struct ahd_linux_device *dev; int was_queuing; int now_queuing; if (sdev == NULL) return; dev = scsi_transport_device_data(sdev); if (dev == NULL) return; was_queuing = dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED); switch (alg) { default: case AHD_QUEUE_NONE: now_queuing = 0; break; case AHD_QUEUE_BASIC: now_queuing = AHD_DEV_Q_BASIC; break; case AHD_QUEUE_TAGGED: now_queuing = AHD_DEV_Q_TAGGED; break; } if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) == 0 && (was_queuing != now_queuing) && (dev->active != 0)) { dev->flags |= AHD_DEV_FREEZE_TIL_EMPTY; dev->qfrozen++; } dev->flags &= ~(AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED|AHD_DEV_PERIODIC_OTAG); if (now_queuing) { u_int usertags; usertags = ahd_linux_user_tagdepth(ahd, devinfo); if (!was_queuing) { /* * Start out aggressively and allow our * dynamic queue depth algorithm to take * care of the rest. */ dev->maxtags = usertags; dev->openings = dev->maxtags - dev->active; } if (dev->maxtags == 0) { /* * Queueing is disabled by the user. */ dev->openings = 1; } else if (alg == AHD_QUEUE_TAGGED) { dev->flags |= AHD_DEV_Q_TAGGED; if (aic79xx_periodic_otag != 0) dev->flags |= AHD_DEV_PERIODIC_OTAG; } else dev->flags |= AHD_DEV_Q_BASIC; } else { /* We can only have one opening. */ dev->maxtags = 0; dev->openings = 1 - dev->active; } switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) { case AHD_DEV_Q_BASIC: case AHD_DEV_Q_TAGGED: scsi_change_queue_depth(sdev, dev->openings + dev->active); break; default: /* * We allow the OS to queue 2 untagged transactions to * us at any time even though we can only execute them * serially on the controller/device. This should * remove some latency. */ scsi_change_queue_depth(sdev, 1); break; } } int ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { return 0; } static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { static int warned_user; u_int tags; tags = 0; if ((ahd->user_discenable & devinfo->target_mask) != 0) { if (ahd->unit >= ARRAY_SIZE(aic79xx_tag_info)) { if (warned_user == 0) { printk(KERN_WARNING "aic79xx: WARNING: Insufficient tag_info instances\n" "aic79xx: for installed controllers. Using defaults\n" "aic79xx: Please update the aic79xx_tag_info array in\n" "aic79xx: the aic79xx_osm.c source file.\n"); warned_user++; } tags = AHD_MAX_QUEUE; } else { adapter_tag_info_t *tag_info; tag_info = &aic79xx_tag_info[ahd->unit]; tags = tag_info->tag_commands[devinfo->target_offset]; if (tags > AHD_MAX_QUEUE) tags = AHD_MAX_QUEUE; } } return (tags); } /* * Determines the queue depth for a given device. */ static void ahd_linux_device_queue_depth(struct scsi_device *sdev) { struct ahd_devinfo devinfo; u_int tags; struct ahd_softc *ahd = *((struct ahd_softc **)sdev->host->hostdata); ahd_compile_devinfo(&devinfo, ahd->our_id, sdev->sdev_target->id, sdev->lun, sdev->sdev_target->channel == 0 ? 'A' : 'B', ROLE_INITIATOR); tags = ahd_linux_user_tagdepth(ahd, &devinfo); if (tags != 0 && sdev->tagged_supported != 0) { ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_TAGGED); ahd_send_async(ahd, devinfo.channel, devinfo.target, devinfo.lun, AC_TRANSFER_NEG); ahd_print_devinfo(ahd, &devinfo); printk("Tagged Queuing enabled. Depth %d\n", tags); } else { ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_NONE); ahd_send_async(ahd, devinfo.channel, devinfo.target, devinfo.lun, AC_TRANSFER_NEG); } } static int ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev, struct scsi_cmnd *cmd) { struct scb *scb; struct hardware_scb *hscb; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int col_idx; uint16_t mask; unsigned long flags; int nseg; nseg = scsi_dma_map(cmd); if (nseg < 0) return SCSI_MLQUEUE_HOST_BUSY; ahd_lock(ahd, &flags); /* * Get an scb to use. */ tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, cmd->device->id, &tstate); if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { col_idx = AHD_NEVER_COL_IDX; } else { col_idx = AHD_BUILD_COL_IDX(cmd->device->id, cmd->device->lun); } if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) { ahd->flags |= AHD_RESOURCE_SHORTAGE; ahd_unlock(ahd, &flags); scsi_dma_unmap(cmd); return SCSI_MLQUEUE_HOST_BUSY; } scb->io_ctx = cmd; scb->platform_data->dev = dev; hscb = scb->hscb; cmd->host_scribble = (char *)scb; /* * Fill out basics of the HSCB. */ hscb->control = 0; hscb->scsiid = BUILD_SCSIID(ahd, cmd); hscb->lun = cmd->device->lun; scb->hscb->task_management = 0; mask = SCB_GET_TARGET_MASK(ahd, scb); if ((ahd->user_discenable & mask) != 0) hscb->control |= DISCENB; if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) scb->flags |= SCB_PACKETIZED; if ((tstate->auto_negotiate & mask) != 0) { scb->flags |= SCB_AUTO_NEGOTIATE; scb->hscb->control |= MK_MESSAGE; } if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) { if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH && (dev->flags & AHD_DEV_Q_TAGGED) != 0) { hscb->control |= ORDERED_QUEUE_TAG; dev->commands_since_idle_or_otag = 0; } else { hscb->control |= SIMPLE_QUEUE_TAG; } } hscb->cdb_len = cmd->cmd_len; memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len); scb->platform_data->xfer_len = 0; ahd_set_residual(scb, 0); ahd_set_sense_residual(scb, 0); scb->sg_count = 0; if (nseg > 0) { void *sg = scb->sg_list; struct scatterlist *cur_seg; int i; scb->platform_data->xfer_len = 0; scsi_for_each_sg(cmd, cur_seg, nseg, i) { dma_addr_t addr; bus_size_t len; addr = sg_dma_address(cur_seg); len = sg_dma_len(cur_seg); scb->platform_data->xfer_len += len; sg = ahd_sg_setup(ahd, scb, sg, addr, len, i == (nseg - 1)); } } LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); dev->openings--; dev->active++; dev->commands_issued++; if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0) dev->commands_since_idle_or_otag++; scb->flags |= SCB_ACTIVE; ahd_queue_scb(ahd, scb); ahd_unlock(ahd, &flags); return 0; } /* * SCSI controller interrupt handler. */ irqreturn_t ahd_linux_isr(int irq, void *dev_id) { struct ahd_softc *ahd; u_long flags; int ours; ahd = (struct ahd_softc *) dev_id; ahd_lock(ahd, &flags); ours = ahd_intr(ahd); ahd_unlock(ahd, &flags); return IRQ_RETVAL(ours); } void ahd_send_async(struct ahd_softc *ahd, char channel, u_int target, u_int lun, ac_code code) { switch (code) { case AC_TRANSFER_NEG: { struct scsi_target *starget; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; unsigned int target_ppr_options; BUG_ON(target == CAM_TARGET_WILDCARD); tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id, target, &tstate); /* * Don't bother reporting results while * negotiations are still pending. */ if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options) if (bootverbose == 0) break; /* * Don't bother reporting results that * are identical to those last reported. */ starget = ahd->platform_data->starget[target]; if (starget == NULL) break; target_ppr_options = (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0) + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0) + (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0) + (spi_rd_strm(starget) ? MSG_EXT_PPR_RD_STRM : 0) + (spi_pcomp_en(starget) ? MSG_EXT_PPR_PCOMP_EN : 0) + (spi_rti(starget) ? MSG_EXT_PPR_RTI : 0) + (spi_wr_flow(starget) ? MSG_EXT_PPR_WR_FLOW : 0) + (spi_hold_mcs(starget) ? MSG_EXT_PPR_HOLD_MCS : 0); if (tinfo->curr.period == spi_period(starget) && tinfo->curr.width == spi_width(starget) && tinfo->curr.offset == spi_offset(starget) && tinfo->curr.ppr_options == target_ppr_options) if (bootverbose == 0) break; spi_period(starget) = tinfo->curr.period; spi_width(starget) = tinfo->curr.width; spi_offset(starget) = tinfo->curr.offset; spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0; spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0; spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0; spi_rd_strm(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RD_STRM ? 1 : 0; spi_pcomp_en(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_PCOMP_EN ? 1 : 0; spi_rti(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RTI ? 1 : 0; spi_wr_flow(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_WR_FLOW ? 1 : 0; spi_hold_mcs(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_HOLD_MCS ? 1 : 0; spi_display_xfer_agreement(starget); break; } case AC_SENT_BDR: { WARN_ON(lun != CAM_LUN_WILDCARD); scsi_report_device_reset(ahd->platform_data->host, channel - 'A', target); break; } case AC_BUS_RESET: if (ahd->platform_data->host != NULL) { scsi_report_bus_reset(ahd->platform_data->host, channel - 'A'); } break; default: panic("ahd_send_async: Unexpected async event"); } } /* * Calls the higher level scsi done function and frees the scb. */ void ahd_done(struct ahd_softc *ahd, struct scb *scb) { struct scsi_cmnd *cmd; struct ahd_linux_device *dev; if ((scb->flags & SCB_ACTIVE) == 0) { printk("SCB %d done'd twice\n", SCB_GET_TAG(scb)); ahd_dump_card_state(ahd); panic("Stopping for safety"); } LIST_REMOVE(scb, pending_links); cmd = scb->io_ctx; dev = scb->platform_data->dev; dev->active--; dev->openings++; if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) { cmd->result &= ~(CAM_DEV_QFRZN << 16); dev->qfrozen--; } ahd_linux_unmap_scb(ahd, scb); /* * Guard against stale sense data. * The Linux mid-layer assumes that sense * was retrieved anytime the first byte of * the sense buffer looks "sane". */ cmd->sense_buffer[0] = 0; if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) { #ifdef AHD_REPORT_UNDERFLOWS uint32_t amount_xferred; amount_xferred = ahd_get_transfer_length(scb) - ahd_get_residual(scb); #endif if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) { ahd_print_path(ahd, scb); printk("Set CAM_UNCOR_PARITY\n"); } #endif ahd_set_transaction_status(scb, CAM_UNCOR_PARITY); #ifdef AHD_REPORT_UNDERFLOWS /* * This code is disabled by default as some * clients of the SCSI system do not properly * initialize the underflow parameter. This * results in spurious termination of commands * that complete as expected (e.g. underflow is * allowed as command can return variable amounts * of data. */ } else if (amount_xferred < scb->io_ctx->underflow) { u_int i; ahd_print_path(ahd, scb); printk("CDB:"); for (i = 0; i < scb->io_ctx->cmd_len; i++) printk(" 0x%x", scb->io_ctx->cmnd[i]); printk("\n"); ahd_print_path(ahd, scb); printk("Saw underflow (%ld of %ld bytes). " "Treated as error\n", ahd_get_residual(scb), ahd_get_transfer_length(scb)); ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); #endif } else { ahd_set_transaction_status(scb, CAM_REQ_CMP); } } else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) { ahd_linux_handle_scsi_status(ahd, cmd->device, scb); } if (dev->openings == 1 && ahd_get_transaction_status(scb) == CAM_REQ_CMP && ahd_get_scsi_status(scb) != SAM_STAT_TASK_SET_FULL) dev->tag_success_count++; /* * Some devices deal with temporary internal resource * shortages by returning queue full. When the queue * full occurrs, we throttle back. Slowly try to get * back to our previous queue depth. */ if ((dev->openings + dev->active) < dev->maxtags && dev->tag_success_count > AHD_TAG_SUCCESS_INTERVAL) { dev->tag_success_count = 0; dev->openings++; } if (dev->active == 0) dev->commands_since_idle_or_otag = 0; if ((scb->flags & SCB_RECOVERY_SCB) != 0) { printk("Recovery SCB completes\n"); if (ahd_get_transaction_status(scb) == CAM_BDR_SENT || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED) ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT); if (ahd->platform_data->eh_done) complete(ahd->platform_data->eh_done); } ahd_free_scb(ahd, scb); ahd_linux_queue_cmd_complete(ahd, cmd); } static void ahd_linux_handle_scsi_status(struct ahd_softc *ahd, struct scsi_device *sdev, struct scb *scb) { struct ahd_devinfo devinfo; struct ahd_linux_device *dev = scsi_transport_device_data(sdev); ahd_compile_devinfo(&devinfo, ahd->our_id, sdev->sdev_target->id, sdev->lun, sdev->sdev_target->channel == 0 ? 'A' : 'B', ROLE_INITIATOR); /* * We don't currently trust the mid-layer to * properly deal with queue full or busy. So, * when one occurs, we tell the mid-layer to * unconditionally requeue the command to us * so that we can retry it ourselves. We also * implement our own throttling mechanism so * we don't clobber the device with too many * commands. */ switch (ahd_get_scsi_status(scb)) { default: break; case SAM_STAT_CHECK_CONDITION: case SAM_STAT_COMMAND_TERMINATED: { struct scsi_cmnd *cmd; /* * Copy sense information to the OS's cmd * structure if it is available. */ cmd = scb->io_ctx; if ((scb->flags & (SCB_SENSE|SCB_PKT_SENSE)) != 0) { struct scsi_status_iu_header *siu; u_int sense_size; u_int sense_offset; if (scb->flags & SCB_SENSE) { sense_size = min(sizeof(struct scsi_sense_data) - ahd_get_sense_residual(scb), (u_long)SCSI_SENSE_BUFFERSIZE); sense_offset = 0; } else { /* * Copy only the sense data into the provided * buffer. */ siu = (struct scsi_status_iu_header *) scb->sense_data; sense_size = min_t(size_t, scsi_4btoul(siu->sense_length), SCSI_SENSE_BUFFERSIZE); sense_offset = SIU_SENSE_OFFSET(siu); } memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); memcpy(cmd->sense_buffer, ahd_get_sense_buf(ahd, scb) + sense_offset, sense_size); set_status_byte(cmd, SAM_STAT_CHECK_CONDITION); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { int i; printk("Copied %d bytes of sense data at %d:", sense_size, sense_offset); for (i = 0; i < sense_size; i++) { if ((i & 0xF) == 0) printk("\n"); printk("0x%x ", cmd->sense_buffer[i]); } printk("\n"); } #endif } break; } case SAM_STAT_TASK_SET_FULL: /* * By the time the core driver has returned this * command, all other commands that were queued * to us but not the device have been returned. * This ensures that dev->active is equal to * the number of commands actually queued to * the device. */ dev->tag_success_count = 0; if (dev->active != 0) { /* * Drop our opening count to the number * of commands currently outstanding. */ dev->openings = 0; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_QFULL) != 0) { ahd_print_path(ahd, scb); printk("Dropping tag count to %d\n", dev->active); } #endif if (dev->active == dev->tags_on_last_queuefull) { dev->last_queuefull_same_count++; /* * If we repeatedly see a queue full * at the same queue depth, this * device has a fixed number of tag * slots. Lock in this tag depth * so we stop seeing queue fulls from * this device. */ if (dev->last_queuefull_same_count == AHD_LOCK_TAGS_COUNT) { dev->maxtags = dev->active; ahd_print_path(ahd, scb); printk("Locking max tag count at %d\n", dev->active); } } else { dev->tags_on_last_queuefull = dev->active; dev->last_queuefull_same_count = 0; } ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); ahd_set_scsi_status(scb, SAM_STAT_GOOD); ahd_platform_set_tags(ahd, sdev, &devinfo, (dev->flags & AHD_DEV_Q_BASIC) ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED); break; } /* * Drop down to a single opening, and treat this * as if the target returned BUSY SCSI status. */ dev->openings = 1; ahd_platform_set_tags(ahd, sdev, &devinfo, (dev->flags & AHD_DEV_Q_BASIC) ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED); ahd_set_scsi_status(scb, SAM_STAT_BUSY); } } static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd) { int status; int new_status = DID_OK; int do_fallback = 0; int scsi_status; struct scsi_sense_data *sense; /* * Map CAM error codes into Linux Error codes. We * avoid the conversion so that the DV code has the * full error information available when making * state change decisions. */ status = ahd_cmd_get_transaction_status(cmd); switch (status) { case CAM_REQ_INPROG: case CAM_REQ_CMP: new_status = DID_OK; break; case CAM_AUTOSENSE_FAIL: new_status = DID_ERROR; fallthrough; case CAM_SCSI_STATUS_ERROR: scsi_status = ahd_cmd_get_scsi_status(cmd); switch(scsi_status) { case SAM_STAT_COMMAND_TERMINATED: case SAM_STAT_CHECK_CONDITION: sense = (struct scsi_sense_data *) cmd->sense_buffer; if (sense->extra_len >= 5 && (sense->add_sense_code == 0x47 || sense->add_sense_code == 0x48)) do_fallback = 1; break; default: break; } break; case CAM_REQ_ABORTED: new_status = DID_ABORT; break; case CAM_BUSY: new_status = DID_BUS_BUSY; break; case CAM_REQ_INVALID: case CAM_PATH_INVALID: new_status = DID_BAD_TARGET; break; case CAM_SEL_TIMEOUT: new_status = DID_NO_CONNECT; break; case CAM_SCSI_BUS_RESET: case CAM_BDR_SENT: new_status = DID_RESET; break; case CAM_UNCOR_PARITY: new_status = DID_PARITY; do_fallback = 1; break; case CAM_CMD_TIMEOUT: new_status = DID_TIME_OUT; do_fallback = 1; break; case CAM_REQ_CMP_ERR: case CAM_UNEXP_BUSFREE: case CAM_DATA_RUN_ERR: new_status = DID_ERROR; do_fallback = 1; break; case CAM_UA_ABORT: case CAM_NO_HBA: case CAM_SEQUENCE_FAIL: case CAM_CCB_LEN_ERR: case CAM_PROVIDE_FAIL: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_REQ_TOO_BIG: new_status = DID_ERROR; break; case CAM_REQUEUE_REQ: new_status = DID_REQUEUE; break; default: /* We should never get here */ new_status = DID_ERROR; break; } if (do_fallback) { printk("%s: device overrun (status %x) on %d:%d:%d\n", ahd_name(ahd), status, cmd->device->channel, cmd->device->id, (u8)cmd->device->lun); } ahd_cmd_set_transaction_status(cmd, new_status); scsi_done(cmd); } static void ahd_freeze_simq(struct ahd_softc *ahd) { scsi_block_requests(ahd->platform_data->host); } static void ahd_release_simq(struct ahd_softc *ahd) { scsi_unblock_requests(ahd->platform_data->host); } static int ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd) { struct ahd_softc *ahd; struct ahd_linux_device *dev; struct scb *pending_scb; u_int saved_scbptr; u_int active_scbptr; u_int last_phase; u_int cdb_byte; int retval = SUCCESS; int was_paused; int paused; int wait; int disconnected; ahd_mode_state saved_modes; unsigned long flags; pending_scb = NULL; paused = FALSE; wait = FALSE; ahd = *(struct ahd_softc **)cmd->device->host->hostdata; scmd_printk(KERN_INFO, cmd, "Attempting to queue an ABORT message:"); printk("CDB:"); for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++) printk(" 0x%x", cmd->cmnd[cdb_byte]); printk("\n"); ahd_lock(ahd, &flags); /* * First determine if we currently own this command. * Start by searching the device queue. If not found * there, check the pending_scb list. If not found * at all, and the system wanted us to just abort the * command, return success. */ dev = scsi_transport_device_data(cmd->device); if (dev == NULL) { /* * No target device for this command exists, * so we must not still own the command. */ scmd_printk(KERN_INFO, cmd, "Is not an active device\n"); goto done; } /* * See if we can find a matching cmd in the pending list. */ LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { if (pending_scb->io_ctx == cmd) break; } if (pending_scb == NULL) { scmd_printk(KERN_INFO, cmd, "Command not found\n"); goto done; } if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) { /* * We can't queue two recovery actions using the same SCB */ retval = FAILED; goto done; } /* * Ensure that the card doesn't do anything * behind our back. Also make sure that we * didn't "just" miss an interrupt that would * affect this cmd. */ was_paused = ahd_is_paused(ahd); ahd_pause_and_flushwork(ahd); paused = TRUE; if ((pending_scb->flags & SCB_ACTIVE) == 0) { scmd_printk(KERN_INFO, cmd, "Command already completed\n"); goto done; } printk("%s: At time of recovery, card was %spaused\n", ahd_name(ahd), was_paused ? "" : "not "); ahd_dump_card_state(ahd); disconnected = TRUE; if (ahd_search_qinfifo(ahd, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, pending_scb->hscb->tag, ROLE_INITIATOR, CAM_REQ_ABORTED, SEARCH_COMPLETE) > 0) { printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n", ahd_name(ahd), cmd->device->channel, cmd->device->id, (u8)cmd->device->lun); goto done; } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); last_phase = ahd_inb(ahd, LASTPHASE); saved_scbptr = ahd_get_scbptr(ahd); active_scbptr = saved_scbptr; if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) { struct scb *bus_scb; bus_scb = ahd_lookup_scb(ahd, active_scbptr); if (bus_scb == pending_scb) disconnected = FALSE; } /* * At this point, pending_scb is the scb associated with the * passed in command. That command is currently active on the * bus or is in the disconnected state. */ ahd_inb(ahd, SAVED_SCSIID); if (last_phase != P_BUSFREE && SCB_GET_TAG(pending_scb) == active_scbptr) { /* * We're active on the bus, so assert ATN * and hope that the target responds. */ pending_scb = ahd_lookup_scb(ahd, active_scbptr); pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, SCSISIGO, last_phase|ATNO); scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n"); wait = TRUE; } else if (disconnected) { /* * Actually re-queue this SCB in an attempt * to select the device before it reconnects. */ pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT; ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb)); pending_scb->hscb->cdb_len = 0; pending_scb->hscb->task_attribute = 0; pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK; if ((pending_scb->flags & SCB_PACKETIZED) != 0) { /* * Mark the SCB has having an outstanding * task management function. Should the command * complete normally before the task management * function can be sent, the host will be notified * to abort our requeued SCB. */ ahd_outb(ahd, SCB_TASK_MANAGEMENT, pending_scb->hscb->task_management); } else { /* * If non-packetized, set the MK_MESSAGE control * bit indicating that we desire to send a message. * We also set the disconnected flag since there is * no guarantee that our SCB control byte matches * the version on the card. We don't want the * sequencer to abort the command thinking an * unsolicited reselection occurred. */ pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED; /* * The sequencer will never re-reference the * in-core SCB. To make sure we are notified * during reselection, set the MK_MESSAGE flag in * the card's copy of the SCB. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE); } /* * Clear out any entries in the QINFIFO first * so we are the next SCB for this target * to run. */ ahd_search_qinfifo(ahd, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahd_qinfifo_requeue_tail(ahd, pending_scb); ahd_set_scbptr(ahd, saved_scbptr); ahd_print_path(ahd, pending_scb); printk("Device is disconnected, re-queuing SCB\n"); wait = TRUE; } else { scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n"); retval = FAILED; } ahd_restore_modes(ahd, saved_modes); done: if (paused) ahd_unpause(ahd); if (wait) { DECLARE_COMPLETION_ONSTACK(done); ahd->platform_data->eh_done = &done; ahd_unlock(ahd, &flags); printk("%s: Recovery code sleeping\n", ahd_name(ahd)); if (!wait_for_completion_timeout(&done, 5 * HZ)) { ahd_lock(ahd, &flags); ahd->platform_data->eh_done = NULL; ahd_unlock(ahd, &flags); printk("%s: Timer Expired (active %d)\n", ahd_name(ahd), dev->active); retval = FAILED; } printk("Recovery code awake\n"); } else ahd_unlock(ahd, &flags); if (retval != SUCCESS) printk("%s: Command abort returning 0x%x\n", ahd_name(ahd), retval); return retval; } static void ahd_linux_set_width(struct scsi_target *starget, int width) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); struct ahd_devinfo devinfo; unsigned long flags; ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); ahd_lock(ahd, &flags); ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE); ahd_unlock(ahd, &flags); } static void ahd_linux_set_period(struct scsi_target *starget, int period) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); struct ahd_tmode_tstate *tstate; struct ahd_initiator_tinfo *tinfo = ahd_fetch_transinfo(ahd, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahd_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options; unsigned int dt; unsigned long flags; unsigned long offset = tinfo->goal.offset; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_DV) != 0) printk("%s: set period to %d\n", ahd_name(ahd), period); #endif if (offset == 0) offset = MAX_OFFSET; if (period < 8) period = 8; if (period < 10) { if (spi_max_width(starget)) { ppr_options |= MSG_EXT_PPR_DT_REQ; if (period == 8) ppr_options |= MSG_EXT_PPR_IU_REQ; } else period = 10; } dt = ppr_options & MSG_EXT_PPR_DT_REQ; ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); /* all PPR requests apart from QAS require wide transfers */ if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) { if (spi_width(starget) == 0) ppr_options &= MSG_EXT_PPR_QAS_REQ; } ahd_find_syncrate(ahd, &period, &ppr_options, dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); ahd_lock(ahd, &flags); ahd_set_syncrate(ahd, &devinfo, period, offset, ppr_options, AHD_TRANS_GOAL, FALSE); ahd_unlock(ahd, &flags); } static void ahd_linux_set_offset(struct scsi_target *starget, int offset) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); struct ahd_tmode_tstate *tstate; struct ahd_initiator_tinfo *tinfo = ahd_fetch_transinfo(ahd, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahd_devinfo devinfo; unsigned int ppr_options = 0; unsigned int period = 0; unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; unsigned long flags; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_DV) != 0) printk("%s: set offset to %d\n", ahd_name(ahd), offset); #endif ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); if (offset != 0) { period = tinfo->goal.period; ppr_options = tinfo->goal.ppr_options; ahd_find_syncrate(ahd, &period, &ppr_options, dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); } ahd_lock(ahd, &flags); ahd_set_syncrate(ahd, &devinfo, period, offset, ppr_options, AHD_TRANS_GOAL, FALSE); ahd_unlock(ahd, &flags); } static void ahd_linux_set_dt(struct scsi_target *starget, int dt) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); struct ahd_tmode_tstate *tstate; struct ahd_initiator_tinfo *tinfo = ahd_fetch_transinfo(ahd, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahd_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_DT_REQ; unsigned int period = tinfo->goal.period; unsigned int width = tinfo->goal.width; unsigned long flags; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_DV) != 0) printk("%s: %s DT\n", ahd_name(ahd), dt ? "enabling" : "disabling"); #endif if (dt && spi_max_width(starget)) { ppr_options |= MSG_EXT_PPR_DT_REQ; if (!width) ahd_linux_set_width(starget, 1); } else { if (period <= 9) period = 10; /* If resetting DT, period must be >= 25ns */ /* IU is invalid without DT set */ ppr_options &= ~MSG_EXT_PPR_IU_REQ; } ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); ahd_find_syncrate(ahd, &period, &ppr_options, dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); ahd_lock(ahd, &flags); ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ppr_options, AHD_TRANS_GOAL, FALSE); ahd_unlock(ahd, &flags); } static void ahd_linux_set_qas(struct scsi_target *starget, int qas) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); struct ahd_tmode_tstate *tstate; struct ahd_initiator_tinfo *tinfo = ahd_fetch_transinfo(ahd, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahd_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_QAS_REQ; unsigned int period = tinfo->goal.period; unsigned int dt; unsigned long flags; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_DV) != 0) printk("%s: %s QAS\n", ahd_name(ahd), qas ? "enabling" : "disabling"); #endif if (qas) { ppr_options |= MSG_EXT_PPR_QAS_REQ; } dt = ppr_options & MSG_EXT_PPR_DT_REQ; ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); ahd_find_syncrate(ahd, &period, &ppr_options, dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); ahd_lock(ahd, &flags); ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ppr_options, AHD_TRANS_GOAL, FALSE); ahd_unlock(ahd, &flags); } static void ahd_linux_set_iu(struct scsi_target *starget, int iu) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); struct ahd_tmode_tstate *tstate; struct ahd_initiator_tinfo *tinfo = ahd_fetch_transinfo(ahd, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahd_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_IU_REQ; unsigned int period = tinfo->goal.period; unsigned int dt; unsigned long flags; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_DV) != 0) printk("%s: %s IU\n", ahd_name(ahd), iu ? "enabling" : "disabling"); #endif if (iu && spi_max_width(starget)) { ppr_options |= MSG_EXT_PPR_IU_REQ; ppr_options |= MSG_EXT_PPR_DT_REQ; /* IU requires DT */ } dt = ppr_options & MSG_EXT_PPR_DT_REQ; ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); ahd_find_syncrate(ahd, &period, &ppr_options, dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); ahd_lock(ahd, &flags); ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ppr_options, AHD_TRANS_GOAL, FALSE); ahd_unlock(ahd, &flags); } static void ahd_linux_set_rd_strm(struct scsi_target *starget, int rdstrm) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); struct ahd_tmode_tstate *tstate; struct ahd_initiator_tinfo *tinfo = ahd_fetch_transinfo(ahd, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahd_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_RD_STRM; unsigned int period = tinfo->goal.period; unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; unsigned long flags; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_DV) != 0) printk("%s: %s Read Streaming\n", ahd_name(ahd), rdstrm ? "enabling" : "disabling"); #endif if (rdstrm && spi_max_width(starget)) ppr_options |= MSG_EXT_PPR_RD_STRM; ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); ahd_find_syncrate(ahd, &period, &ppr_options, dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); ahd_lock(ahd, &flags); ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ppr_options, AHD_TRANS_GOAL, FALSE); ahd_unlock(ahd, &flags); } static void ahd_linux_set_wr_flow(struct scsi_target *starget, int wrflow) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); struct ahd_tmode_tstate *tstate; struct ahd_initiator_tinfo *tinfo = ahd_fetch_transinfo(ahd, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahd_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_WR_FLOW; unsigned int period = tinfo->goal.period; unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; unsigned long flags; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_DV) != 0) printk("%s: %s Write Flow Control\n", ahd_name(ahd), wrflow ? "enabling" : "disabling"); #endif if (wrflow && spi_max_width(starget)) ppr_options |= MSG_EXT_PPR_WR_FLOW; ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); ahd_find_syncrate(ahd, &period, &ppr_options, dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); ahd_lock(ahd, &flags); ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ppr_options, AHD_TRANS_GOAL, FALSE); ahd_unlock(ahd, &flags); } static void ahd_linux_set_rti(struct scsi_target *starget, int rti) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); struct ahd_tmode_tstate *tstate; struct ahd_initiator_tinfo *tinfo = ahd_fetch_transinfo(ahd, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahd_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_RTI; unsigned int period = tinfo->goal.period; unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; unsigned long flags; if ((ahd->features & AHD_RTI) == 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_DV) != 0) printk("%s: RTI not available\n", ahd_name(ahd)); #endif return; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_DV) != 0) printk("%s: %s RTI\n", ahd_name(ahd), rti ? "enabling" : "disabling"); #endif if (rti && spi_max_width(starget)) ppr_options |= MSG_EXT_PPR_RTI; ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); ahd_find_syncrate(ahd, &period, &ppr_options, dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); ahd_lock(ahd, &flags); ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ppr_options, AHD_TRANS_GOAL, FALSE); ahd_unlock(ahd, &flags); } static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); struct ahd_tmode_tstate *tstate; struct ahd_initiator_tinfo *tinfo = ahd_fetch_transinfo(ahd, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahd_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_PCOMP_EN; unsigned int period = tinfo->goal.period; unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; unsigned long flags; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_DV) != 0) printk("%s: %s Precompensation\n", ahd_name(ahd), pcomp ? "Enable" : "Disable"); #endif if (pcomp && spi_max_width(starget)) { uint8_t precomp; if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) { const struct ahd_linux_iocell_opts *iocell_opts; iocell_opts = &aic79xx_iocell_info[ahd->unit]; precomp = iocell_opts->precomp; } else { precomp = AIC79XX_DEFAULT_PRECOMP; } ppr_options |= MSG_EXT_PPR_PCOMP_EN; AHD_SET_PRECOMP(ahd, precomp); } else { AHD_SET_PRECOMP(ahd, 0); } ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); ahd_find_syncrate(ahd, &period, &ppr_options, dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); ahd_lock(ahd, &flags); ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ppr_options, AHD_TRANS_GOAL, FALSE); ahd_unlock(ahd, &flags); } static void ahd_linux_set_hold_mcs(struct scsi_target *starget, int hold) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata); struct ahd_tmode_tstate *tstate; struct ahd_initiator_tinfo *tinfo = ahd_fetch_transinfo(ahd, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahd_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_HOLD_MCS; unsigned int period = tinfo->goal.period; unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ; unsigned long flags; if (hold && spi_max_width(starget)) ppr_options |= MSG_EXT_PPR_HOLD_MCS; ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); ahd_find_syncrate(ahd, &period, &ppr_options, dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2); ahd_lock(ahd, &flags); ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset, ppr_options, AHD_TRANS_GOAL, FALSE); ahd_unlock(ahd, &flags); } static void ahd_linux_get_signalling(struct Scsi_Host *shost) { struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata; unsigned long flags; u8 mode; ahd_lock(ahd, &flags); ahd_pause(ahd); mode = ahd_inb(ahd, SBLKCTL); ahd_unpause(ahd); ahd_unlock(ahd, &flags); if (mode & ENAB40) spi_signalling(shost) = SPI_SIGNAL_LVD; else if (mode & ENAB20) spi_signalling(shost) = SPI_SIGNAL_SE; else spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; } static struct spi_function_template ahd_linux_transport_functions = { .set_offset = ahd_linux_set_offset, .show_offset = 1, .set_period = ahd_linux_set_period, .show_period = 1, .set_width = ahd_linux_set_width, .show_width = 1, .set_dt = ahd_linux_set_dt, .show_dt = 1, .set_iu = ahd_linux_set_iu, .show_iu = 1, .set_qas = ahd_linux_set_qas, .show_qas = 1, .set_rd_strm = ahd_linux_set_rd_strm, .show_rd_strm = 1, .set_wr_flow = ahd_linux_set_wr_flow, .show_wr_flow = 1, .set_rti = ahd_linux_set_rti, .show_rti = 1, .set_pcomp_en = ahd_linux_set_pcomp_en, .show_pcomp_en = 1, .set_hold_mcs = ahd_linux_set_hold_mcs, .show_hold_mcs = 1, .get_signalling = ahd_linux_get_signalling, }; static int __init ahd_linux_init(void) { int error = 0; /* * If we've been passed any parameters, process them now. */ if (aic79xx) aic79xx_setup(aic79xx); ahd_linux_transport_template = spi_attach_transport(&ahd_linux_transport_functions); if (!ahd_linux_transport_template) return -ENODEV; scsi_transport_reserve_device(ahd_linux_transport_template, sizeof(struct ahd_linux_device)); error = ahd_linux_pci_init(); if (error) spi_release_transport(ahd_linux_transport_template); return error; } static void __exit ahd_linux_exit(void) { ahd_linux_pci_exit(); spi_release_transport(ahd_linux_transport_template); } module_init(ahd_linux_init); module_exit(ahd_linux_exit);
linux-master
drivers/scsi/aic7xxx/aic79xx_osm.c
/* * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * String handling code courtesy of Gerard Roudier's <[email protected]> * sym driver. * * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_proc.c#29 $ */ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aic7xxx_93cx6.h" static void ahc_dump_target_state(struct ahc_softc *ahc, struct seq_file *m, u_int our_id, char channel, u_int target_id, u_int target_offset); static void ahc_dump_device_state(struct seq_file *m, struct scsi_device *dev); /* * Table of syncrates that don't follow the "divisible by 4" * rule. This table will be expanded in future SCSI specs. */ static const struct { u_int period_factor; u_int period; /* in 100ths of ns */ } scsi_syncrates[] = { { 0x08, 625 }, /* FAST-160 */ { 0x09, 1250 }, /* FAST-80 */ { 0x0a, 2500 }, /* FAST-40 40MHz */ { 0x0b, 3030 }, /* FAST-40 33MHz */ { 0x0c, 5000 } /* FAST-20 */ }; /* * Return the frequency in kHz corresponding to the given * sync period factor. */ static u_int ahc_calc_syncsrate(u_int period_factor) { int i; /* See if the period is in the "exception" table */ for (i = 0; i < ARRAY_SIZE(scsi_syncrates); i++) { if (period_factor == scsi_syncrates[i].period_factor) { /* Period in kHz */ return (100000000 / scsi_syncrates[i].period); } } /* * Wasn't in the table, so use the standard * 4 times conversion. */ return (10000000 / (period_factor * 4 * 10)); } static void ahc_format_transinfo(struct seq_file *m, struct ahc_transinfo *tinfo) { u_int speed; u_int freq; u_int mb; speed = 3300; freq = 0; if (tinfo->offset != 0) { freq = ahc_calc_syncsrate(tinfo->period); speed = freq; } speed *= (0x01 << tinfo->width); mb = speed / 1000; if (mb > 0) seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000); else seq_printf(m, "%dKB/s transfers", speed); if (freq != 0) { seq_printf(m, " (%d.%03dMHz%s, offset %d", freq / 1000, freq % 1000, (tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 ? " DT" : "", tinfo->offset); } if (tinfo->width > 0) { if (freq != 0) { seq_puts(m, ", "); } else { seq_puts(m, " ("); } seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); } else if (freq != 0) { seq_putc(m, ')'); } seq_putc(m, '\n'); } static void ahc_dump_target_state(struct ahc_softc *ahc, struct seq_file *m, u_int our_id, char channel, u_int target_id, u_int target_offset) { struct scsi_target *starget; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; int lun; tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id, &tstate); if ((ahc->features & AHC_TWIN) != 0) seq_printf(m, "Channel %c ", channel); seq_printf(m, "Target %d Negotiation Settings\n", target_id); seq_puts(m, "\tUser: "); ahc_format_transinfo(m, &tinfo->user); starget = ahc->platform_data->starget[target_offset]; if (!starget) return; seq_puts(m, "\tGoal: "); ahc_format_transinfo(m, &tinfo->goal); seq_puts(m, "\tCurr: "); ahc_format_transinfo(m, &tinfo->curr); for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct scsi_device *sdev; sdev = scsi_device_lookup_by_target(starget, lun); if (sdev == NULL) continue; ahc_dump_device_state(m, sdev); } } static void ahc_dump_device_state(struct seq_file *m, struct scsi_device *sdev) { struct ahc_linux_device *dev = scsi_transport_device_data(sdev); seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n", sdev->sdev_target->channel + 'A', sdev->sdev_target->id, (u8)sdev->lun); seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued); seq_printf(m, "\t\tCommands Active %d\n", dev->active); seq_printf(m, "\t\tCommand Openings %d\n", dev->openings); seq_printf(m, "\t\tMax Tagged Openings %d\n", dev->maxtags); seq_printf(m, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen); } int ahc_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length) { struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata; struct seeprom_descriptor sd; int have_seeprom; u_long s; int paused; int written; /* Default to failure. */ written = -EINVAL; ahc_lock(ahc, &s); paused = ahc_is_paused(ahc); if (!paused) ahc_pause(ahc); if (length != sizeof(struct seeprom_config)) { printk("ahc_proc_write_seeprom: incorrect buffer size\n"); goto done; } have_seeprom = ahc_verify_cksum((struct seeprom_config*)buffer); if (have_seeprom == 0) { printk("ahc_proc_write_seeprom: cksum verification failed\n"); goto done; } sd.sd_ahc = ahc; #if AHC_PCI_CONFIG > 0 if ((ahc->chip & AHC_PCI) != 0) { sd.sd_control_offset = SEECTL; sd.sd_status_offset = SEECTL; sd.sd_dataout_offset = SEECTL; if (ahc->flags & AHC_LARGE_SEEPROM) sd.sd_chip = C56_66; else sd.sd_chip = C46; sd.sd_MS = SEEMS; sd.sd_RDY = SEERDY; sd.sd_CS = SEECS; sd.sd_CK = SEECK; sd.sd_DO = SEEDO; sd.sd_DI = SEEDI; have_seeprom = ahc_acquire_seeprom(ahc, &sd); } else #endif if ((ahc->chip & AHC_VL) != 0) { sd.sd_control_offset = SEECTL_2840; sd.sd_status_offset = STATUS_2840; sd.sd_dataout_offset = STATUS_2840; sd.sd_chip = C46; sd.sd_MS = 0; sd.sd_RDY = EEPROM_TF; sd.sd_CS = CS_2840; sd.sd_CK = CK_2840; sd.sd_DO = DO_2840; sd.sd_DI = DI_2840; have_seeprom = TRUE; } else { printk("ahc_proc_write_seeprom: unsupported adapter type\n"); goto done; } if (!have_seeprom) { printk("ahc_proc_write_seeprom: No Serial EEPROM\n"); goto done; } else { u_int start_addr; if (ahc->seep_config == NULL) { ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC); if (ahc->seep_config == NULL) { printk("aic7xxx: Unable to allocate serial " "eeprom buffer. Write failing\n"); goto done; } } printk("aic7xxx: Writing Serial EEPROM\n"); start_addr = 32 * (ahc->channel - 'A'); ahc_write_seeprom(&sd, (u_int16_t *)buffer, start_addr, sizeof(struct seeprom_config)/2); ahc_read_seeprom(&sd, (uint16_t *)ahc->seep_config, start_addr, sizeof(struct seeprom_config)/2); #if AHC_PCI_CONFIG > 0 if ((ahc->chip & AHC_VL) == 0) ahc_release_seeprom(&sd); #endif written = length; } done: if (!paused) ahc_unpause(ahc); ahc_unlock(ahc, &s); return (written); } /* * Return information to handle /proc support for the driver. */ int ahc_linux_show_info(struct seq_file *m, struct Scsi_Host *shost) { struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata; char ahc_info[256]; u_int max_targ; u_int i; seq_printf(m, "Adaptec AIC7xxx driver version: %s\n", AIC7XXX_DRIVER_VERSION); seq_printf(m, "%s\n", ahc->description); ahc_controller_info(ahc, ahc_info); seq_printf(m, "%s\n", ahc_info); seq_printf(m, "Allocated SCBs: %d, SG List Length: %d\n\n", ahc->scb_data->numscbs, AHC_NSEG); if (ahc->seep_config == NULL) seq_puts(m, "No Serial EEPROM\n"); else { seq_puts(m, "Serial EEPROM:\n"); for (i = 0; i < sizeof(*ahc->seep_config)/2; i++) { if (((i % 8) == 0) && (i != 0)) { seq_putc(m, '\n'); } seq_printf(m, "0x%.4x ", ((uint16_t*)ahc->seep_config)[i]); } seq_putc(m, '\n'); } seq_putc(m, '\n'); max_targ = 16; if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) max_targ = 8; for (i = 0; i < max_targ; i++) { u_int our_id; u_int target_id; char channel; channel = 'A'; our_id = ahc->our_id; target_id = i; if (i > 7 && (ahc->features & AHC_TWIN) != 0) { channel = 'B'; our_id = ahc->our_id_b; target_id = i % 8; } ahc_dump_target_state(ahc, m, our_id, channel, target_id, i); } return 0; }
linux-master
drivers/scsi/aic7xxx/aic7xxx_proc.c
/* * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2003 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $ */ #include "aic79xx_osm.h" #include "aic79xx_inline.h" #include "aicasm/aicasm_insformat.h" /***************************** Lookup Tables **********************************/ static const char *const ahd_chip_names[] = { "NONE", "aic7901", "aic7902", "aic7901A" }; /* * Hardware error codes. */ struct ahd_hard_error_entry { uint8_t errno; const char *errmesg; }; static const struct ahd_hard_error_entry ahd_hard_errors[] = { { DSCTMOUT, "Discard Timer has timed out" }, { ILLOPCODE, "Illegal Opcode in sequencer program" }, { SQPARERR, "Sequencer Parity Error" }, { DPARERR, "Data-path Parity Error" }, { MPARERR, "Scratch or SCB Memory Parity Error" }, { CIOPARERR, "CIOBUS Parity Error" }, }; static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors); static const struct ahd_phase_table_entry ahd_phase_table[] = { { P_DATAOUT, NOP, "in Data-out phase" }, { P_DATAIN, INITIATOR_ERROR, "in Data-in phase" }, { P_DATAOUT_DT, NOP, "in DT Data-out phase" }, { P_DATAIN_DT, INITIATOR_ERROR, "in DT Data-in phase" }, { P_COMMAND, NOP, "in Command phase" }, { P_MESGOUT, NOP, "in Message-out phase" }, { P_STATUS, INITIATOR_ERROR, "in Status phase" }, { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, { P_BUSFREE, NOP, "while idle" }, { 0, NOP, "in unknown phase" } }; /* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count. */ static const u_int num_phases = ARRAY_SIZE(ahd_phase_table) - 1; /* Our Sequencer Program */ #include "aic79xx_seq.h" /**************************** Function Declarations ***************************/ static void ahd_handle_transmission_error(struct ahd_softc *ahd); static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1); static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime); static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd); static void ahd_handle_proto_violation(struct ahd_softc *ahd); static void ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static struct ahd_tmode_tstate* ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel); #ifdef AHD_TARGET_MODE static void ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force); #endif static void ahd_devlimited_syncrate(struct ahd_softc *ahd, struct ahd_initiator_tinfo *, u_int *period, u_int *ppr_options, role_t role); static void ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_transinfo *tinfo); static void ahd_update_pending_scbs(struct ahd_softc *ahd); static void ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); static void ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset); static void ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int bus_width); static void ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options); static void ahd_clear_msg_state(struct ahd_softc *ahd); static void ahd_handle_message_phase(struct ahd_softc *ahd); typedef enum { AHDMSG_1B, AHDMSG_2B, AHDMSG_EXT } ahd_msgtype; static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full); static int ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static int ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd); static void ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int lun, cam_status status, char *message, int verbose_level); #ifdef AHD_TARGET_MODE static void ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); #endif static u_int ahd_sglist_size(struct ahd_softc *ahd); static u_int ahd_sglist_allocsize(struct ahd_softc *ahd); static bus_dmamap_callback_t ahd_dmamap_cb; static void ahd_initialize_hscbs(struct ahd_softc *ahd); static int ahd_init_scbdata(struct ahd_softc *ahd); static void ahd_fini_scbdata(struct ahd_softc *ahd); static void ahd_setup_iocell_workaround(struct ahd_softc *ahd); static void ahd_iocell_first_selection(struct ahd_softc *ahd); static void ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx); static void ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb); static void ahd_chip_init(struct ahd_softc *ahd); static void ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, struct scb *scb); static int ahd_qinfifo_count(struct ahd_softc *ahd); static int ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action, u_int *list_head, u_int *list_tail, u_int tid); static void ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, u_int tid_cur, u_int tid_next); static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid); static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid); static void ahd_reset_current_bus(struct ahd_softc *ahd); static void ahd_stat_timer(struct timer_list *t); #ifdef AHD_DUMP_SEQ static void ahd_dumpseq(struct ahd_softc *ahd); #endif static void ahd_loadseq(struct ahd_softc *ahd); static int ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch, u_int start_instr, u_int *skip_addr); static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address); static void ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts); static int ahd_probe_stack_size(struct ahd_softc *ahd); static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb); static void ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb); #ifdef AHD_TARGET_MODE static void ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg); static void ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask); static int ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd); #endif static int ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); static void ahd_alloc_scbs(struct ahd_softc *ahd); static void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid); static void ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb); static void ahd_clear_critical_section(struct ahd_softc *ahd); static void ahd_clear_intstat(struct ahd_softc *ahd); static void ahd_enable_coalescing(struct ahd_softc *ahd, int enable); static u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl); static void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb); static void ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb); static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase); static void ahd_shutdown(void *arg); static void ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, u_int mincmds); static int ahd_verify_vpd_cksum(struct vpd_config *vpd); static int ahd_wait_seeprom(struct ahd_softc *ahd); static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role); static void ahd_reset_cmds_pending(struct ahd_softc *ahd); /*************************** Interrupt Services *******************************/ static void ahd_run_qoutfifo(struct ahd_softc *ahd); #ifdef AHD_TARGET_MODE static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused); #endif static void ahd_handle_hwerrint(struct ahd_softc *ahd); static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat); static void ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat); /************************ Sequencer Execution Control *************************/ void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) { if (ahd->src_mode == src && ahd->dst_mode == dst) return; #ifdef AHD_DEBUG if (ahd->src_mode == AHD_MODE_UNKNOWN || ahd->dst_mode == AHD_MODE_UNKNOWN) panic("Setting mode prior to saving it.\n"); if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) printk("%s: Setting mode 0x%x\n", ahd_name(ahd), ahd_build_mode_state(ahd, src, dst)); #endif ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst)); ahd->src_mode = src; ahd->dst_mode = dst; } static void ahd_update_modes(struct ahd_softc *ahd) { ahd_mode_state mode_ptr; ahd_mode src; ahd_mode dst; mode_ptr = ahd_inb(ahd, MODE_PTR); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) printk("Reading mode 0x%x\n", mode_ptr); #endif ahd_extract_mode_state(ahd, mode_ptr, &src, &dst); ahd_known_modes(ahd, src, dst); } static void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, ahd_mode dstmode, const char *file, int line) { #ifdef AHD_DEBUG if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) { panic("%s:%s:%d: Mode assertion failed.\n", ahd_name(ahd), file, line); } #endif } #define AHD_ASSERT_MODES(ahd, source, dest) \ ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__); ahd_mode_state ahd_save_modes(struct ahd_softc *ahd) { if (ahd->src_mode == AHD_MODE_UNKNOWN || ahd->dst_mode == AHD_MODE_UNKNOWN) ahd_update_modes(ahd); return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode)); } void ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state) { ahd_mode src; ahd_mode dst; ahd_extract_mode_state(ahd, state, &src, &dst); ahd_set_modes(ahd, src, dst); } /* * Determine whether the sequencer has halted code execution. * Returns non-zero status if the sequencer is stopped. */ int ahd_is_paused(struct ahd_softc *ahd) { return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0); } /* * Request that the sequencer stop and wait, indefinitely, for it * to stop. The sequencer will only acknowledge that it is paused * once it has reached an instruction boundary and PAUSEDIS is * cleared in the SEQCTL register. The sequencer may use PAUSEDIS * for critical sections. */ void ahd_pause(struct ahd_softc *ahd) { ahd_outb(ahd, HCNTRL, ahd->pause); /* * Since the sequencer can disable pausing in a critical section, we * must loop until it actually stops. */ while (ahd_is_paused(ahd) == 0) ; } /* * Allow the sequencer to continue program execution. * We check here to ensure that no additional interrupt * sources that would cause the sequencer to halt have been * asserted. If, for example, a SCSI bus reset is detected * while we are fielding a different, pausing, interrupt type, * we don't want to release the sequencer before going back * into our interrupt handler and dealing with this new * condition. */ void ahd_unpause(struct ahd_softc *ahd) { /* * Automatically restore our modes to those saved * prior to the first change of the mode. */ if (ahd->saved_src_mode != AHD_MODE_UNKNOWN && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) { if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0) ahd_reset_cmds_pending(ahd); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); } if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0) ahd_outb(ahd, HCNTRL, ahd->unpause); ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN); } /*********************** Scatter Gather List Handling *************************/ void * ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, void *sgptr, dma_addr_t addr, bus_size_t len, int last) { scb->sg_count++; if (sizeof(dma_addr_t) > 4 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = (struct ahd_dma64_seg *)sgptr; sg->addr = ahd_htole64(addr); sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0)); return (sg + 1); } else { struct ahd_dma_seg *sg; sg = (struct ahd_dma_seg *)sgptr; sg->addr = ahd_htole32(addr & 0xFFFFFFFF); sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000) | (last ? AHD_DMA_LAST_SEG : 0)); return (sg + 1); } } static void ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb) { /* XXX Handle target mode SCBs. */ scb->crc_retry_count = 0; if ((scb->flags & SCB_PACKETIZED) != 0) { /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */ scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE; } else { if (ahd_get_transfer_length(scb) & 0x01) scb->hscb->task_attribute = SCB_XFERLEN_ODD; else scb->hscb->task_attribute = 0; } if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0) scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr = ahd_htole32(scb->sense_busaddr); } static void ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb) { /* * Copy the first SG into the "current" data ponter area. */ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = (struct ahd_dma64_seg *)scb->sg_list; scb->hscb->dataptr = sg->addr; scb->hscb->datacnt = sg->len; } else { struct ahd_dma_seg *sg; uint32_t *dataptr_words; sg = (struct ahd_dma_seg *)scb->sg_list; dataptr_words = (uint32_t*)&scb->hscb->dataptr; dataptr_words[0] = sg->addr; dataptr_words[1] = 0; if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { uint64_t high_addr; high_addr = ahd_le32toh(sg->len) & 0x7F000000; scb->hscb->dataptr |= ahd_htole64(high_addr << 8); } scb->hscb->datacnt = sg->len; } /* * Note where to find the SG entries in bus space. * We also set the full residual flag which the * sequencer will clear as soon as a data transfer * occurs. */ scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID); } static void ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb) { scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL); scb->hscb->dataptr = 0; scb->hscb->datacnt = 0; } /************************** Memory mapping routines ***************************/ static void * ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr) { dma_addr_t sg_offset; /* sg_list_phys points to entry 1, not 0 */ sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd)); return ((uint8_t *)scb->sg_list + sg_offset); } static uint32_t ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg) { dma_addr_t sg_offset; /* sg_list_phys points to entry 1, not 0 */ sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list) - ahd_sg_size(ahd); return (scb->sg_list_busaddr + sg_offset); } static void ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op) { ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat, scb->hscb_map->dmamap, /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr, /*len*/sizeof(*scb->hscb), op); } void ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op) { if (scb->sg_count == 0) return; ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat, scb->sg_map->dmamap, /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd), /*len*/ahd_sg_size(ahd) * scb->sg_count, op); } static void ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op) { ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat, scb->sense_map->dmamap, /*offset*/scb->sense_busaddr, /*len*/AHD_SENSE_BUFSIZE, op); } #ifdef AHD_TARGET_MODE static uint32_t ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index) { return (((uint8_t *)&ahd->targetcmds[index]) - (uint8_t *)ahd->qoutfifo); } #endif /*********************** Miscellaneous Support Functions ***********************/ /* * Return pointers to the transfer negotiation information * for the specified our_id/remote_id pair. */ struct ahd_initiator_tinfo * ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, u_int remote_id, struct ahd_tmode_tstate **tstate) { /* * Transfer data structures are stored from the perspective * of the target role. Since the parameters for a connection * in the initiator role to a given target are the same as * when the roles are reversed, we pretend we are the target. */ if (channel == 'B') our_id += 8; *tstate = ahd->enabled_targets[our_id]; return (&(*tstate)->transinfo[remote_id]); } uint16_t ahd_inw(struct ahd_softc *ahd, u_int port) { /* * Read high byte first as some registers increment * or have other side effects when the low byte is * read. */ uint16_t r = ahd_inb(ahd, port+1) << 8; return r | ahd_inb(ahd, port); } void ahd_outw(struct ahd_softc *ahd, u_int port, u_int value) { /* * Write low byte first to accommodate registers * such as PRGMCNT where the order maters. */ ahd_outb(ahd, port, value & 0xFF); ahd_outb(ahd, port+1, (value >> 8) & 0xFF); } uint32_t ahd_inl(struct ahd_softc *ahd, u_int port) { return ((ahd_inb(ahd, port)) | (ahd_inb(ahd, port+1) << 8) | (ahd_inb(ahd, port+2) << 16) | (ahd_inb(ahd, port+3) << 24)); } void ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value) { ahd_outb(ahd, port, (value) & 0xFF); ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF); ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF); ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF); } uint64_t ahd_inq(struct ahd_softc *ahd, u_int port) { return ((ahd_inb(ahd, port)) | (ahd_inb(ahd, port+1) << 8) | (ahd_inb(ahd, port+2) << 16) | (ahd_inb(ahd, port+3) << 24) | (((uint64_t)ahd_inb(ahd, port+4)) << 32) | (((uint64_t)ahd_inb(ahd, port+5)) << 40) | (((uint64_t)ahd_inb(ahd, port+6)) << 48) | (((uint64_t)ahd_inb(ahd, port+7)) << 56)); } void ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value) { ahd_outb(ahd, port, value & 0xFF); ahd_outb(ahd, port+1, (value >> 8) & 0xFF); ahd_outb(ahd, port+2, (value >> 16) & 0xFF); ahd_outb(ahd, port+3, (value >> 24) & 0xFF); ahd_outb(ahd, port+4, (value >> 32) & 0xFF); ahd_outb(ahd, port+5, (value >> 40) & 0xFF); ahd_outb(ahd, port+6, (value >> 48) & 0xFF); ahd_outb(ahd, port+7, (value >> 56) & 0xFF); } u_int ahd_get_scbptr(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8)); } void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); ahd_outb(ahd, SCBPTR, scbptr & 0xFF); ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF); } #if 0 /* unused */ static u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd) { return (ahd_inw_atomic(ahd, HNSCB_QOFF)); } #endif static void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value) { ahd_outw_atomic(ahd, HNSCB_QOFF, value); } #if 0 /* unused */ static u_int ahd_get_hescb_qoff(struct ahd_softc *ahd) { return (ahd_inb(ahd, HESCB_QOFF)); } #endif static void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value) { ahd_outb(ahd, HESCB_QOFF, value); } static u_int ahd_get_snscb_qoff(struct ahd_softc *ahd) { u_int oldvalue; AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); oldvalue = ahd_inw(ahd, SNSCB_QOFF); ahd_outw(ahd, SNSCB_QOFF, oldvalue); return (oldvalue); } static void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outw(ahd, SNSCB_QOFF, value); } #if 0 /* unused */ static u_int ahd_get_sescb_qoff(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); return (ahd_inb(ahd, SESCB_QOFF)); } #endif static void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outb(ahd, SESCB_QOFF, value); } #if 0 /* unused */ static u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8)); } #endif static void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outb(ahd, SDSCB_QOFF, value & 0xFF); ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF); } u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset) { u_int value; /* * Workaround PCI-X Rev A. hardware bug. * After a host read of SCB memory, the chip * may become confused into thinking prefetch * was required. This starts the discard timer * running and can cause an unexpected discard * timer interrupt. The work around is to read * a normal register prior to the exhaustion of * the discard timer. The mode pointer register * has no side effects and so serves well for * this purpose. * * Razor #528 */ value = ahd_inb(ahd, offset); if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0) ahd_inb(ahd, MODE_PTR); return (value); } u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inb_scbram(ahd, offset) | (ahd_inb_scbram(ahd, offset+1) << 8)); } static uint32_t ahd_inl_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inw_scbram(ahd, offset) | (ahd_inw_scbram(ahd, offset+2) << 16)); } static uint64_t ahd_inq_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inl_scbram(ahd, offset) | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32); } struct scb * ahd_lookup_scb(struct ahd_softc *ahd, u_int tag) { struct scb* scb; if (tag >= AHD_SCB_MAX) return (NULL); scb = ahd->scb_data.scbindex[tag]; if (scb != NULL) ahd_sync_scb(ahd, scb, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); return (scb); } static void ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *q_hscb; struct map_node *q_hscb_map; uint32_t saved_hscb_busaddr; /* * Our queuing method is a bit tricky. The card * knows in advance which HSCB (by address) to download, * and we can't disappoint it. To achieve this, the next * HSCB to download is saved off in ahd->next_queued_hscb. * When we are called to queue "an arbitrary scb", * we copy the contents of the incoming HSCB to the one * the sequencer knows about, swap HSCB pointers and * finally assign the SCB to the tag indexed location * in the scb_array. This makes sure that we can still * locate the correct SCB by SCB_TAG. */ q_hscb = ahd->next_queued_hscb; q_hscb_map = ahd->next_queued_hscb_map; saved_hscb_busaddr = q_hscb->hscb_busaddr; memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); q_hscb->hscb_busaddr = saved_hscb_busaddr; q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; /* Now swap HSCB pointers. */ ahd->next_queued_hscb = scb->hscb; ahd->next_queued_hscb_map = scb->hscb_map; scb->hscb = q_hscb; scb->hscb_map = q_hscb_map; /* Now define the mapping from tag to SCB in the scbindex */ ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; } /* * Tell the sequencer about a new transaction to execute. */ void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb) { ahd_swap_with_next_hscb(ahd, scb); if (SCBID_IS_NULL(SCB_GET_TAG(scb))) panic("Attempt to queue invalid SCB tag %x\n", SCB_GET_TAG(scb)); /* * Keep a history of SCBs we've downloaded in the qinfifo. */ ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); ahd->qinfifonext++; if (scb->sg_count != 0) ahd_setup_data_scb(ahd, scb); else ahd_setup_noxfer_scb(ahd, scb); ahd_setup_scb_common(ahd, scb); /* * Make sure our data is consistent from the * perspective of the adapter. */ ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_QUEUE) != 0) { uint64_t host_dataptr; host_dataptr = ahd_le64toh(scb->hscb->dataptr); printk("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n", ahd_name(ahd), SCB_GET_TAG(scb), scb->hscb->scsiid, ahd_le32toh(scb->hscb->hscb_busaddr), (u_int)((host_dataptr >> 32) & 0xFFFFFFFF), (u_int)(host_dataptr & 0xFFFFFFFF), ahd_le32toh(scb->hscb->datacnt)); } #endif /* Tell the adapter about the newly queued SCB */ ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); } /************************** Interrupt Processing ******************************/ static void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op) { ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, /*offset*/0, /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op); } static void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op) { #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) { ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, 0), sizeof(struct target_cmd) * AHD_TMODE_CMDS, op); } #endif } /* * See if the firmware has posted any completed commands * into our in-core command complete fifos. */ #define AHD_RUN_QOUTFIFO 0x1 #define AHD_RUN_TQINFIFO 0x2 static u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd) { u_int retval; retval = 0; ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo), /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD); if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag == ahd->qoutfifonext_valid_tag) retval |= AHD_RUN_QOUTFIFO; #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) { ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, ahd->tqinfifofnext), /*len*/sizeof(struct target_cmd), BUS_DMASYNC_POSTREAD); if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0) retval |= AHD_RUN_TQINFIFO; } #endif return (retval); } /* * Catch an interrupt from the adapter */ int ahd_intr(struct ahd_softc *ahd) { u_int intstat; if ((ahd->pause & INTEN) == 0) { /* * Our interrupt is not enabled on the chip * and may be disabled for re-entrancy reasons, * so just return. This is likely just a shared * interrupt. */ return (0); } /* * Instead of directly reading the interrupt status register, * infer the cause of the interrupt by checking our in-core * completion queues. This avoids a costly PCI bus read in * most cases. */ if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0 && (ahd_check_cmdcmpltqueues(ahd) != 0)) intstat = CMDCMPLT; else intstat = ahd_inb(ahd, INTSTAT); if ((intstat & INT_PEND) == 0) return (0); if (intstat & CMDCMPLT) { ahd_outb(ahd, CLRINT, CLRCMDINT); /* * Ensure that the chip sees that we've cleared * this interrupt before we walk the output fifo. * Otherwise, we may, due to posted bus writes, * clear the interrupt after we finish the scan, * and after the sequencer has added new entries * and asserted the interrupt again. */ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { if (ahd_is_paused(ahd)) { /* * Potentially lost SEQINT. * If SEQINTCODE is non-zero, * simulate the SEQINT. */ if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT) intstat |= SEQINT; } } else { ahd_flush_device_writes(ahd); } ahd_run_qoutfifo(ahd); ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++; ahd->cmdcmplt_total++; #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) ahd_run_tqinfifo(ahd, /*paused*/FALSE); #endif } /* * Handle statuses that may invalidate our cached * copy of INTSTAT separately. */ if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) { /* Hot eject. Do nothing */ } else if (intstat & HWERRINT) { ahd_handle_hwerrint(ahd); } else if ((intstat & (PCIINT|SPLTINT)) != 0) { ahd->bus_intr(ahd); } else { if ((intstat & SEQINT) != 0) ahd_handle_seqint(ahd, intstat); if ((intstat & SCSIINT) != 0) ahd_handle_scsiint(ahd, intstat); } return (1); } /******************************** Private Inlines *****************************/ static inline void ahd_assert_atn(struct ahd_softc *ahd) { ahd_outb(ahd, SCSISIGO, ATNO); } /* * Determine if the current connection has a packetized * agreement. This does not necessarily mean that we * are currently in a packetized transfer. We could * just as easily be sending or receiving a message. */ static int ahd_currently_packetized(struct ahd_softc *ahd) { ahd_mode_state saved_modes; int packetized; saved_modes = ahd_save_modes(ahd); if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) { /* * The packetized bit refers to the last * connection, not the current one. Check * for non-zero LQISTATE instead. */ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); packetized = ahd_inb(ahd, LQISTATE) != 0; } else { ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED; } ahd_restore_modes(ahd, saved_modes); return (packetized); } static inline int ahd_set_active_fifo(struct ahd_softc *ahd) { u_int active_fifo; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; switch (active_fifo) { case 0: case 1: ahd_set_modes(ahd, active_fifo, active_fifo); return (1); default: return (0); } } static inline void ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl) { ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL); } /* * Determine whether the sequencer reported a residual * for this SCB/transaction. */ static inline void ahd_update_residual(struct ahd_softc *ahd, struct scb *scb) { uint32_t sgptr; sgptr = ahd_le32toh(scb->hscb->sgptr); if ((sgptr & SG_STATUS_VALID) != 0) ahd_calc_residual(ahd, scb); } static inline void ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb) { uint32_t sgptr; sgptr = ahd_le32toh(scb->hscb->sgptr); if ((sgptr & SG_STATUS_VALID) != 0) ahd_handle_scb_status(ahd, scb); else ahd_done(ahd, scb); } /************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero */ static void ahd_restart(struct ahd_softc *ahd) { ahd_pause(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* No more pending messages */ ahd_clear_msg_state(ahd); ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */ ahd_outb(ahd, MSG_OUT, NOP); /* No message to send */ ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET); ahd_outb(ahd, SEQINTCTL, 0); ahd_outb(ahd, LASTPHASE, P_BUSFREE); ahd_outb(ahd, SEQ_FLAGS, 0); ahd_outb(ahd, SAVED_SCSIID, 0xFF); ahd_outb(ahd, SAVED_LUN, 0xFF); /* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not. */ ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); /* Always allow reselection */ ahd_outb(ahd, SCSISEQ1, ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Clear any pending sequencer interrupt. It is no * longer relevant since we're resetting the Program * Counter. */ ahd_outb(ahd, CLRINT, CLRSEQINT); ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); ahd_unpause(ahd); } static void ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo) { ahd_mode_state saved_modes; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_FIFOS) != 0) printk("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo); #endif saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, fifo, fifo); ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) ahd_outb(ahd, CCSGCTL, CCSGRESET); ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SG_STATE, 0); ahd_restore_modes(ahd, saved_modes); } /************************* Input/Output Queues ********************************/ /* * Flush and completed commands that are sitting in the command * complete queues down on the chip but have yet to be dma'ed back up. */ static void ahd_flush_qoutfifo(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int saved_scbptr; u_int ccscbctl; u_int scbid; u_int next_scbid; saved_modes = ahd_save_modes(ahd); /* * Flush the good status FIFO for completed packetized commands. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_scbptr = ahd_get_scbptr(ahd); while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) { u_int fifo_mode; u_int i; scbid = ahd_inw(ahd, GSFIFO); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Warning - GSFIFO SCB %d invalid\n", ahd_name(ahd), scbid); continue; } /* * Determine if this transaction is still active in * any FIFO. If it is, we must flush that FIFO to * the host before completing the command. */ fifo_mode = 0; rescan_fifos: for (i = 0; i < 2; i++) { /* Toggle to the other mode. */ fifo_mode ^= 1; ahd_set_modes(ahd, fifo_mode, fifo_mode); if (ahd_scb_active_in_fifo(ahd, scb) == 0) continue; ahd_run_data_fifo(ahd, scb); /* * Running this FIFO may cause a CFG4DATA for * this same transaction to assert in the other * FIFO or a new snapshot SAVEPTRS interrupt * in this FIFO. Even running a FIFO may not * clear the transaction if we are still waiting * for data to drain to the host. We must loop * until the transaction is not active in either * FIFO just to be sure. Reset our loop counter * so we will visit both FIFOs again before * declaring this transaction finished. We * also delay a bit so that status has a chance * to change before we look at this FIFO again. */ ahd_delay(200); goto rescan_fifos; } ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_set_scbptr(ahd, scbid); if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0 || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR) & SG_LIST_NULL) != 0)) { u_int comp_head; /* * The transfer completed with a residual. * Place this SCB on the complete DMA list * so that we update our in-core copy of the * SCB before completing the command. */ ahd_outb(ahd, SCB_SCSI_STATUS, 0); ahd_outb(ahd, SCB_SGPTR, ahd_inb_scbram(ahd, SCB_SGPTR) | SG_STATUS_VALID); ahd_outw(ahd, SCB_TAG, scbid); ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL); comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); if (SCBID_IS_NULL(comp_head)) { ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); } else { u_int tail; tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL); ahd_set_scbptr(ahd, tail); ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); ahd_set_scbptr(ahd, scbid); } } else ahd_complete_scb(ahd, scb); } ahd_set_scbptr(ahd, saved_scbptr); /* * Setup for command channel portion of flush. */ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Wait for any inprogress DMA to complete and clear DMA state * if this is for an SCB in the qinfifo. */ while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) { if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) { if ((ccscbctl & ARRDONE) != 0) break; } else if ((ccscbctl & CCSCBDONE) != 0) break; ahd_delay(200); } /* * We leave the sequencer to cleanup in the case of DMA's to * update the qoutfifo. In all other cases (DMA's to the * chip or a push of an SCB from the COMPLETE_DMA_SCB list), * we disable the DMA engine so that the sequencer will not * attempt to handle the DMA completion. */ if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0) ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN)); /* * Complete any SCBs that just finished * being DMA'ed into the qoutfifo. */ ahd_run_qoutfifo(ahd); saved_scbptr = ahd_get_scbptr(ahd); /* * Manually update/complete any completed SCBs that are waiting to be * DMA'ed back up to the host. */ scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) { uint8_t *hscb_ptr; u_int i; ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Warning - DMA-up and complete " "SCB %d invalid\n", ahd_name(ahd), scbid); continue; } hscb_ptr = (uint8_t *)scb->hscb; for (i = 0; i < sizeof(struct hardware_scb); i++) *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i); ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); while (!SCBID_IS_NULL(scbid)) { ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Warning - Complete Qfrz SCB %d invalid\n", ahd_name(ahd), scbid); continue; } ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) { ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Warning - Complete SCB %d invalid\n", ahd_name(ahd), scbid); continue; } ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); /* * Restore state. */ ahd_set_scbptr(ahd, saved_scbptr); ahd_restore_modes(ahd, saved_modes); ahd->flags |= AHD_UPDATE_PEND_CMDS; } /* * Determine if an SCB for a packetized transaction * is active in a FIFO. */ static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb) { /* * The FIFO is only active for our transaction if * the SCBPTR matches the SCB's ID and the firmware * has installed a handler for the FIFO or we have * a pending SAVEPTRS or CFG4DATA interrupt. */ if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb) || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0 && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0)) return (0); return (1); } /* * Run a data fifo to completion for a transaction we know * has completed across the SCSI bus (good status has been * received). We are already set to the correct FIFO mode * on entry to this routine. * * This function attempts to operate exactly as the firmware * would when running this FIFO. Care must be taken to update * this routine any time the firmware's FIFO algorithm is * changed. */ static void ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb) { u_int seqintsrc; seqintsrc = ahd_inb(ahd, SEQINTSRC); if ((seqintsrc & CFG4DATA) != 0) { uint32_t datacnt; uint32_t sgptr; /* * Clear full residual flag. */ sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID; ahd_outb(ahd, SCB_SGPTR, sgptr); /* * Load datacnt and address. */ datacnt = ahd_inl_scbram(ahd, SCB_DATACNT); if ((datacnt & AHD_DMA_LAST_SEG) != 0) { sgptr |= LAST_SEG; ahd_outb(ahd, SG_STATE, 0); } else ahd_outb(ahd, SG_STATE, LOADING_NEEDED); ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR)); ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK); ahd_outb(ahd, SG_CACHE_PRE, sgptr); ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); /* * Initialize Residual Fields. */ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK); /* * Mark the SCB as having a FIFO in use. */ ahd_outb(ahd, SCB_FIFO_USE_COUNT, ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1); /* * Install a "fake" handler for this FIFO. */ ahd_outw(ahd, LONGJMP_ADDR, 0); /* * Notify the hardware that we have satisfied * this sequencer interrupt. */ ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA); } else if ((seqintsrc & SAVEPTRS) != 0) { uint32_t sgptr; uint32_t resid; if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) { /* * Snapshot Save Pointers. All that * is necessary to clear the snapshot * is a CLRCHN. */ goto clrchn; } /* * Disable S/G fetch so the DMA engine * is available to future users. */ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) ahd_outb(ahd, CCSGCTL, 0); ahd_outb(ahd, SG_STATE, 0); /* * Flush the data FIFO. Strickly only * necessary for Rev A parts. */ ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH); /* * Calculate residual. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); resid = ahd_inl(ahd, SHCNT); resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24; ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid); if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) { /* * Must back up to the correct S/G element. * Typically this just means resetting our * low byte to the offset in the SG_CACHE, * but if we wrapped, we have to correct * the other bytes of the sgptr too. */ if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0 && (sgptr & 0x80) == 0) sgptr -= 0x100; sgptr &= ~0xFF; sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW) & SG_ADDR_MASK; ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0); } else if ((resid & AHD_SG_LEN_MASK) == 0) { ahd_outb(ahd, SCB_RESIDUAL_SGPTR, sgptr | SG_LIST_NULL); } /* * Save Pointers. */ ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR)); ahd_outl(ahd, SCB_DATACNT, resid); ahd_outl(ahd, SCB_SGPTR, sgptr); ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS); ahd_outb(ahd, SEQIMODE, ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS); /* * If the data is to the SCSI bus, we are * done, otherwise wait for FIFOEMP. */ if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0) goto clrchn; } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) { uint32_t sgptr; uint64_t data_addr; uint32_t data_len; u_int dfcntrl; /* * Disable S/G fetch so the DMA engine * is available to future users. We won't * be using the DMA engine to load segments. */ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) { ahd_outb(ahd, CCSGCTL, 0); ahd_outb(ahd, SG_STATE, LOADING_NEEDED); } /* * Wait for the DMA engine to notice that the * host transfer is enabled and that there is * space in the S/G FIFO for new segments before * loading more segments. */ if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0 && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) { /* * Determine the offset of the next S/G * element to load. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); data_addr = sg->addr; data_len = sg->len; sgptr += sizeof(*sg); } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK; data_addr <<= 8; data_addr |= sg->addr; data_len = sg->len; sgptr += sizeof(*sg); } /* * Update residual information. */ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); /* * Load the S/G. */ if (data_len & AHD_DMA_LAST_SEG) { sgptr |= LAST_SEG; ahd_outb(ahd, SG_STATE, 0); } ahd_outq(ahd, HADDR, data_addr); ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK); ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF); /* * Advertise the segment to the hardware. */ dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN; if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { /* * Use SCSIENWRDIS so that SCSIEN * is never modified by this * operation. */ dfcntrl |= SCSIENWRDIS; } ahd_outb(ahd, DFCNTRL, dfcntrl); } } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) { /* * Transfer completed to the end of SG list * and has flushed to the host. */ ahd_outb(ahd, SCB_SGPTR, ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL); goto clrchn; } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) { clrchn: /* * Clear any handler for this FIFO, decrement * the FIFO use count for the SCB, and release * the FIFO. */ ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SCB_FIFO_USE_COUNT, ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1); ahd_outb(ahd, DFFSXFRCTL, CLRCHN); } } /* * Look for entries in the QoutFIFO that have completed. * The valid_tag completion field indicates the validity * of the entry - the valid value toggles each time through * the queue. We use the sg_status field in the completion * entry to avoid referencing the hscb if the completion * occurred with no errors and no residual. sg_status is * a copy of the first byte (little endian) of the sgptr * hscb field. */ static void ahd_run_qoutfifo(struct ahd_softc *ahd) { struct ahd_completion *completion; struct scb *scb; u_int scb_index; if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0) panic("ahd_run_qoutfifo recursion"); ahd->flags |= AHD_RUNNING_QOUTFIFO; ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD); for (;;) { completion = &ahd->qoutfifo[ahd->qoutfifonext]; if (completion->valid_tag != ahd->qoutfifonext_valid_tag) break; scb_index = ahd_le16toh(completion->tag); scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { printk("%s: WARNING no command for scb %d " "(cmdcmplt)\nQOUTPOS = %d\n", ahd_name(ahd), scb_index, ahd->qoutfifonext); ahd_dump_card_state(ahd); } else if ((completion->sg_status & SG_STATUS_VALID) != 0) { ahd_handle_scb_status(ahd, scb); } else { ahd_done(ahd, scb); } ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1); if (ahd->qoutfifonext == 0) ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID; } ahd->flags &= ~AHD_RUNNING_QOUTFIFO; } /************************* Interrupt Handling *********************************/ static void ahd_handle_hwerrint(struct ahd_softc *ahd) { /* * Some catastrophic hardware error has occurred. * Print it for the user and disable the controller. */ int i; int error; error = ahd_inb(ahd, ERROR); for (i = 0; i < num_errors; i++) { if ((error & ahd_hard_errors[i].errno) != 0) printk("%s: hwerrint, %s\n", ahd_name(ahd), ahd_hard_errors[i].errmesg); } ahd_dump_card_state(ahd); panic("BRKADRINT"); /* Tell everyone that this HBA is no longer available */ ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_NO_HBA); /* Tell the system that this controller has gone away. */ ahd_free(ahd); } #ifdef AHD_DEBUG static void ahd_dump_sglist(struct scb *scb) { int i; if (scb->sg_count > 0) { if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg_list; sg_list = (struct ahd_dma64_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) { uint64_t addr; addr = ahd_le64toh(sg_list[i].addr); printk("sg[%d] - Addr 0x%x%x : Length %d%s\n", i, (uint32_t)((addr >> 32) & 0xFFFFFFFF), (uint32_t)(addr & 0xFFFFFFFF), sg_list[i].len & AHD_SG_LEN_MASK, (sg_list[i].len & AHD_DMA_LAST_SEG) ? " Last" : ""); } } else { struct ahd_dma_seg *sg_list; sg_list = (struct ahd_dma_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) { uint32_t len; len = ahd_le32toh(sg_list[i].len); printk("sg[%d] - Addr 0x%x%x : Length %d%s\n", i, (len & AHD_SG_HIGH_ADDR_MASK) >> 24, ahd_le32toh(sg_list[i].addr), len & AHD_SG_LEN_MASK, len & AHD_DMA_LAST_SEG ? " Last" : ""); } } } } #endif /* AHD_DEBUG */ static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) { u_int seqintcode; /* * Save the sequencer interrupt code and clear the SEQINT * bit. We will unpause the sequencer, if appropriate, * after servicing the request. */ seqintcode = ahd_inb(ahd, SEQINTCODE); ahd_outb(ahd, CLRINT, CLRSEQINT); if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { /* * Unpause the sequencer and let it clear * SEQINT by writing NO_SEQINT to it. This * will cause the sequencer to be paused again, * which is the expected state of this routine. */ ahd_unpause(ahd); while (!ahd_is_paused(ahd)) ; ahd_outb(ahd, CLRINT, CLRSEQINT); } ahd_update_modes(ahd); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: Handle Seqint Called for code %d\n", ahd_name(ahd), seqintcode); #endif switch (seqintcode) { case ENTERING_NONPACK: { struct scb *scb; u_int scbid; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { /* * Somehow need to know if this * is from a selection or reselection. * From that, we can determine target * ID so we at least have an I_T nexus. */ } else { ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); ahd_outb(ahd, SAVED_LUN, scb->hscb->lun); ahd_outb(ahd, SEQ_FLAGS, 0x0); } if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * Phase change after read stream with * CRC error with P0 asserted on last * packet. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printk("%s: Assuming LQIPHASE_NLQ with " "P0 assertion\n", ahd_name(ahd)); #endif } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printk("%s: Entering NONPACK\n", ahd_name(ahd)); #endif break; } case INVALID_SEQINT: printk("%s: Invalid Sequencer interrupt occurred, " "resetting channel.\n", ahd_name(ahd)); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) ahd_dump_card_state(ahd); #endif ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; case STATUS_OVERRUN: { struct scb *scb; u_int scbid; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) ahd_print_path(ahd, scb); else printk("%s: ", ahd_name(ahd)); printk("SCB %d Packetized Status Overrun", scbid); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; } case CFG4ISTAT_INTR: { struct scb *scb; u_int scbid; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { ahd_dump_card_state(ahd); printk("CFG4ISTAT: Free SCB %d referenced", scbid); panic("For safety"); } ahd_outq(ahd, HADDR, scb->sense_busaddr); ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE); ahd_outb(ahd, HCNT + 2, 0); ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG); ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); break; } case ILLEGAL_PHASE: { u_int bus_phase; bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; printk("%s: ILLEGAL_PHASE 0x%x\n", ahd_name(ahd), bus_phase); switch (bus_phase) { case P_DATAOUT: case P_DATAIN: case P_DATAOUT_DT: case P_DATAIN_DT: case P_MESGOUT: case P_STATUS: case P_MESGIN: ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); printk("%s: Issued Bus Reset.\n", ahd_name(ahd)); break; case P_COMMAND: { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; /* * If a target takes us into the command phase * assume that it has been externally reset and * has thus lost our previous packetized negotiation * agreement. Since we have not sent an identify * message and may not have fully qualified the * connection, we change our command to TUR, assert * ATN and ABORT the task when we go to message in * phase. The OSM will see the REQUEUE_REQUEST * status and retry the command. */ scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("Invalid phase with no valid SCB. " "Resetting bus.\n"); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; } ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), ROLE_INITIATOR); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_ACTIVE, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_ACTIVE, /*paused*/TRUE); /* Hand-craft TUR command */ ahd_outb(ahd, SCB_CDB_STORE, 0); ahd_outb(ahd, SCB_CDB_STORE+1, 0); ahd_outb(ahd, SCB_CDB_STORE+2, 0); ahd_outb(ahd, SCB_CDB_STORE+3, 0); ahd_outb(ahd, SCB_CDB_STORE+4, 0); ahd_outb(ahd, SCB_CDB_STORE+5, 0); ahd_outb(ahd, SCB_CDB_LEN, 6); scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE); scb->hscb->control |= MK_MESSAGE; ahd_outb(ahd, SCB_CONTROL, scb->hscb->control); ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); /* * The lun is 0, regardless of the SCB's lun * as we have not sent an identify message. */ ahd_outb(ahd, SAVED_LUN, 0); ahd_outb(ahd, SEQ_FLAGS, 0); ahd_assert_atn(ahd); scb->flags &= ~SCB_PACKETIZED; scb->flags |= SCB_ABORT|SCB_EXTERNAL_RESET; ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); ahd_freeze_scb(scb); /* Notify XPT */ ahd_send_async(ahd, devinfo.channel, devinfo.target, CAM_LUN_WILDCARD, AC_SENT_BDR); /* * Allow the sequencer to continue with * non-pack processing. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT1, 0); } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { ahd_print_path(ahd, scb); printk("Unexpected command phase from " "packetized target\n"); } #endif break; } } break; } case CFG4OVERRUN: { struct scb *scb; u_int scb_index; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printk("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd), ahd_inb(ahd, MODE_PTR)); } #endif scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { /* * Attempt to transfer to an SCB that is * not outstanding. */ ahd_assert_atn(ahd); ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd->msgout_buf[0] = ABORT_TASK; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; /* * Clear status received flag to prevent any * attempt to complete this bogus SCB. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~STATUS_RCVD); } break; } case DUMP_CARD_STATE: { ahd_dump_card_state(ahd); break; } case PDATA_REINIT: { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printk("%s: PDATA_REINIT - DFCNTRL = 0x%x " "SG_CACHE_SHADOW = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, DFCNTRL), ahd_inb(ahd, SG_CACHE_SHADOW)); } #endif ahd_reinitialize_dataptrs(ahd); break; } case HOST_MSG_LOOP: { struct ahd_devinfo devinfo; /* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is * transferred so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop. */ ahd_fetch_devinfo(ahd, &devinfo); if (ahd->msg_type == MSG_TYPE_NONE) { struct scb *scb; u_int scb_index; u_int bus_phase; bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN && bus_phase != P_MESGOUT) { printk("ahd_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); /* * Probably transitioned to bus free before * we got here. Just punt the message. */ ahd_dump_card_state(ahd); ahd_clear_intstat(ahd); ahd_restart(ahd); return; } scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); if (devinfo.role == ROLE_INITIATOR) { if (bus_phase == P_MESGOUT) ahd_setup_initiator_msgout(ahd, &devinfo, scb); else { ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahd->msgin_index = 0; } } #ifdef AHD_TARGET_MODE else { if (bus_phase == P_MESGOUT) { ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; ahd->msgin_index = 0; } else ahd_setup_target_msgin(ahd, &devinfo, scb); } #endif } ahd_handle_message_phase(ahd); break; } case NO_MATCH: { /* Ensure we don't leave the selection hardware on */ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); printk("%s:%c:%d: no active SCB for reconnecting " "target - issuing BUS DEVICE RESET\n", ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4); printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "REG0 == 0x%x ACCUM = 0x%x\n", ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN), ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM)); printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n", ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd), ahd_find_busy_tcl(ahd, BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN))), ahd_inw(ahd, SINDEX)); printk("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_CONTROL == 0x%x\n", ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID), ahd_inb_scbram(ahd, SCB_LUN), ahd_inb_scbram(ahd, SCB_CONTROL)); printk("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n", ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI)); printk("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0)); printk("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0)); ahd_dump_card_state(ahd); ahd->msgout_buf[0] = TARGET_RESET; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_assert_atn(ahd); break; } case PROTO_VIOLATION: { ahd_handle_proto_violation(ahd); break; } case IGN_WIDE_RES: { struct ahd_devinfo devinfo; ahd_fetch_devinfo(ahd, &devinfo); ahd_handle_ign_wide_residue(ahd, &devinfo); break; } case BAD_PHASE: { u_int lastphase; lastphase = ahd_inb(ahd, LASTPHASE); printk("%s:%c:%d: unknown scsi bus phase %x, " "lastphase = 0x%x. Attempting to continue\n", ahd_name(ahd), 'A', SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), lastphase, ahd_inb(ahd, SCSISIGI)); break; } case MISSED_BUSFREE: { u_int lastphase; lastphase = ahd_inb(ahd, LASTPHASE); printk("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n", ahd_name(ahd), 'A', SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), lastphase, ahd_inb(ahd, SCSISIGI)); ahd_restart(ahd); return; } case DATA_OVERRUN: { /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was. */ struct scb *scb; u_int scbindex; #ifdef AHD_DEBUG u_int lastphase; #endif scbindex = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbindex); #ifdef AHD_DEBUG lastphase = ahd_inb(ahd, LASTPHASE); if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { ahd_print_path(ahd, scb); printk("data overrun detected %s. Tag == 0x%x.\n", ahd_lookup_phase_entry(lastphase)->phasemsg, SCB_GET_TAG(scb)); ahd_print_path(ahd, scb); printk("%s seen Data Phase. Length = %ld. " "NumSGs = %d.\n", ahd_inb(ahd, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", ahd_get_transfer_length(scb), scb->sg_count); ahd_dump_sglist(scb); } #endif /* * Set this and it will take effect when the * target does a command complete. */ ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); ahd_freeze_scb(scb); break; } case MKMSG_FAILED: { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; ahd_fetch_devinfo(ahd, &devinfo); printk("%s:%c:%d:%d: Attempt to issue message failed\n", ahd_name(ahd), devinfo.channel, devinfo.target, devinfo.lun); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO. */ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); break; } case TASKMGMT_FUNC_COMPLETE: { u_int scbid; struct scb *scb; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) { u_int lun; u_int tag; cam_status error; ahd_print_path(ahd, scb); printk("Task Management Func 0x%x Complete\n", scb->hscb->task_management); lun = CAM_LUN_WILDCARD; tag = SCB_LIST_NULL; switch (scb->hscb->task_management) { case SIU_TASKMGMT_ABORT_TASK: tag = SCB_GET_TAG(scb); fallthrough; case SIU_TASKMGMT_ABORT_TASK_SET: case SIU_TASKMGMT_CLEAR_TASK_SET: lun = scb->hscb->lun; error = CAM_REQ_ABORTED; ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', lun, tag, ROLE_INITIATOR, error); break; case SIU_TASKMGMT_LUN_RESET: lun = scb->hscb->lun; fallthrough; case SIU_TASKMGMT_TARGET_RESET: { struct ahd_devinfo devinfo; ahd_scb_devinfo(ahd, &devinfo, scb); error = CAM_BDR_SENT; ahd_handle_devreset(ahd, &devinfo, lun, CAM_BDR_SENT, lun != CAM_LUN_WILDCARD ? "Lun Reset" : "Target Reset", /*verbose_level*/0); break; } default: panic("Unexpected TaskMgmt Func\n"); break; } } break; } case TASKMGMT_CMD_CMPLT_OKAY: { u_int scbid; struct scb *scb; /* * An ABORT TASK TMF failed to be delivered before * the targeted command completed normally. */ scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) { /* * Remove the second instance of this SCB from * the QINFIFO if it is still there. */ ahd_print_path(ahd, scb); printk("SCB completes before TMF\n"); /* * Handle losing the race. Wait until any * current selection completes. We will then * set the TMF back to zero in this SCB so that * the sequencer doesn't bother to issue another * sequencer interrupt for its completion. */ while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 && (ahd_inb(ahd, SSTAT0) & SELDO) == 0 && (ahd_inb(ahd, SSTAT1) & SELTO) == 0) ; ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0); ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); } break; } case TRACEPOINT0: case TRACEPOINT1: case TRACEPOINT2: case TRACEPOINT3: printk("%s: Tracepoint %d\n", ahd_name(ahd), seqintcode - TRACEPOINT0); break; case NO_SEQINT: break; case SAW_HWERR: ahd_handle_hwerrint(ahd); break; default: printk("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd), seqintcode); break; } /* * The sequencer is paused immediately on * a SEQINT, so we should restart it when * we're done. */ ahd_unpause(ahd); } static void ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) { struct scb *scb; u_int status0; u_int status3; u_int status; u_int lqistat1; u_int lqostat0; u_int scbid; u_int busfreetime; ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR); status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO); status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); lqistat1 = ahd_inb(ahd, LQISTAT1); lqostat0 = ahd_inb(ahd, LQOSTAT0); busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; /* * Ignore external resets after a bus reset. */ if (((status & SCSIRSTI) != 0) && (ahd->flags & AHD_BUS_RESET_ACTIVE)) { ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); return; } /* * Clear bus reset flag */ ahd->flags &= ~AHD_BUS_RESET_ACTIVE; if ((status0 & (SELDI|SELDO)) != 0) { u_int simode0; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); simode0 = ahd_inb(ahd, SIMODE0); status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; if ((status0 & IOERR) != 0) { u_int now_lvd; now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40; printk("%s: Transceiver State Has Changed to %s mode\n", ahd_name(ahd), now_lvd ? "LVD" : "SE"); ahd_outb(ahd, CLRSINT0, CLRIOERR); /* * A change in I/O mode is equivalent to a bus reset. */ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); ahd_pause(ahd); ahd_setup_iocell_workaround(ahd); ahd_unpause(ahd); } else if ((status0 & OVERRUN) != 0) { printk("%s: SCSI offset overrun detected. Resetting bus.\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } else if ((status & SCSIRSTI) != 0) { printk("%s: Someone reset channel A\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE); } else if ((status & SCSIPERR) != 0) { /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); ahd_handle_transmission_error(ahd); } else if (lqostat0 != 0) { printk("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0); ahd_outb(ahd, CLRLQOINT0, lqostat0); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) ahd_outb(ahd, CLRLQOINT1, 0); } else if ((status & SELTO) != 0) { /* Stop the selection */ ahd_outb(ahd, SCSISEQ0, 0); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* No more pending messages */ ahd_clear_msg_state(ahd); /* Clear interrupt state */ ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy * LED does. SELINGO is only cleared by a successful * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). */ ahd_outb(ahd, CLRSINT0, CLRSELINGO); scbid = ahd_inw(ahd, WAITING_TID_HEAD); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: ahd_intr - referenced scb not " "valid during SELTO scb(0x%x)\n", ahd_name(ahd), scbid); ahd_dump_card_state(ahd); } else { struct ahd_devinfo devinfo; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SELTO) != 0) { ahd_print_path(ahd, scb); printk("Saw Selection Timeout for SCB 0x%x\n", scbid); } #endif ahd_scb_devinfo(ahd, &devinfo, scb); ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahd_freeze_devq(ahd, scb); /* * Cancel any pending transactions on the device * now that it seems to be missing. This will * also revert us to async/narrow transfers until * we can renegotiate with the device. */ ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, CAM_SEL_TIMEOUT, "Selection Timeout", /*verbose_level*/1); } ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_iocell_first_selection(ahd); ahd_unpause(ahd); } else if ((status0 & (SELDI|SELDO)) != 0) { ahd_iocell_first_selection(ahd); ahd_unpause(ahd); } else if (status3 != 0) { printk("%s: SCSI Cell parity error SSTAT3 == 0x%x\n", ahd_name(ahd), status3); ahd_outb(ahd, CLRSINT3, status3); } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) { /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); ahd_handle_lqiphase_error(ahd, lqistat1); } else if ((lqistat1 & LQICRCI_NLQ) != 0) { /* * This status can be delayed during some * streaming operations. The SCSIPHASE * handler has already dealt with this case * so just clear the error. */ ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ); } else if ((status & BUSFREE) != 0 || (lqistat1 & LQOBUSFREE) != 0) { u_int lqostat1; int restart; int clear_fifo; int packetized; u_int mode; /* * Clear our selection hardware as soon as possible. * We may have an entry in the waiting Q for this target, * that is affected by this busfree and we don't want to * go about selecting the target while we handle the event. */ ahd_outb(ahd, SCSISEQ0, 0); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* * Determine what we were up to at the time of * the busfree. */ mode = AHD_MODE_SCSI; busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; lqostat1 = ahd_inb(ahd, LQOSTAT1); switch (busfreetime) { case BUSFREE_DFF0: case BUSFREE_DFF1: { mode = busfreetime == BUSFREE_DFF0 ? AHD_MODE_DFF0 : AHD_MODE_DFF1; ahd_set_modes(ahd, mode, mode); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Invalid SCB %d in DFF%d " "during unexpected busfree\n", ahd_name(ahd), scbid, mode); packetized = 0; } else packetized = (scb->flags & SCB_PACKETIZED) != 0; clear_fifo = 1; break; } case BUSFREE_LQO: clear_fifo = 0; packetized = 1; break; default: clear_fifo = 0; packetized = (lqostat1 & LQOBUSFREE) != 0; if (!packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE && (ahd_inb(ahd, SSTAT0) & SELDI) == 0 && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0)) /* * Assume packetized if we are not * on the bus in a non-packetized * capacity and any pending selection * was a packetized selection. */ packetized = 1; break; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("Saw Busfree. Busfreetime = 0x%x.\n", busfreetime); #endif /* * Busfrees that occur in non-packetized phases are * handled by the nonpkt_busfree handler. */ if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) { restart = ahd_handle_pkt_busfree(ahd, busfreetime); } else { packetized = 0; restart = ahd_handle_nonpkt_busfree(ahd); } /* * Clear the busfree interrupt status. The setting of * the interrupt is a pulse, so in a perfect world, we * would not need to muck with the ENBUSFREE logic. This * would ensure that if the bus moves on to another * connection, busfree protection is still in force. If * BUSFREEREV is broken, however, we must manually clear * the ENBUSFREE if the busfree occurred during a non-pack * connection so that we don't get false positives during * future, packetized, connections. */ ahd_outb(ahd, CLRSINT1, CLRBUSFREE); if (packetized == 0 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0) ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENBUSFREE); if (clear_fifo) ahd_clear_fifo(ahd, mode); ahd_clear_msg_state(ahd); ahd_outb(ahd, CLRINT, CLRSCSIINT); if (restart) { ahd_restart(ahd); } else { ahd_unpause(ahd); } } else { printk("%s: Missing case in ahd_handle_scsiint. status = %x\n", ahd_name(ahd), status); ahd_dump_card_state(ahd); ahd_clear_intstat(ahd); ahd_unpause(ahd); } } static void ahd_handle_transmission_error(struct ahd_softc *ahd) { struct scb *scb; u_int scbid; u_int lqistat1; u_int msg_out; u_int curphase; u_int lastphase; u_int perrdiag; u_int cur_col; int silent; scb = NULL; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ); ahd_inb(ahd, LQISTAT2); if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) { u_int lqistate; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); lqistate = ahd_inb(ahd, LQISTATE); if ((lqistate >= 0x1E && lqistate <= 0x24) || (lqistate == 0x29)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printk("%s: NLQCRC found via LQISTATE\n", ahd_name(ahd)); } #endif lqistat1 |= LQICRCI_NLQ; } ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } ahd_outb(ahd, CLRLQIINT1, lqistat1); lastphase = ahd_inb(ahd, LASTPHASE); curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; perrdiag = ahd_inb(ahd, PERRDIAG); msg_out = INITIATOR_ERROR; ahd_outb(ahd, CLRSINT1, CLRSCSIPERR); /* * Try to find the SCB associated with this error. */ silent = FALSE; if (lqistat1 == 0 || (lqistat1 & LQICRCI_NLQ) != 0) { if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0) ahd_set_active_fifo(ahd); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && SCB_IS_SILENT(scb)) silent = TRUE; } cur_col = 0; if (silent == FALSE) { printk("%s: Transmission error detected\n", ahd_name(ahd)); ahd_lqistat1_print(lqistat1, &cur_col, 50); ahd_lastphase_print(lastphase, &cur_col, 50); ahd_scsisigi_print(curphase, &cur_col, 50); ahd_perrdiag_print(perrdiag, &cur_col, 50); printk("\n"); ahd_dump_card_state(ahd); } if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) { if (silent == FALSE) { printk("%s: Gross protocol error during incoming " "packet. lqistat1 == 0x%x. Resetting bus.\n", ahd_name(ahd), lqistat1); } ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } else if ((lqistat1 & LQICRCI_LQ) != 0) { /* * A CRC error has been detected on an incoming LQ. * The bus is currently hung on the last ACK. * Hit LQIRETRY to release the last ack, and * wait for the sequencer to determine that ATNO * is asserted while in message out to take us * to our host message loop. No NONPACKREQ or * LQIPHASE type errors will occur in this * scenario. After this first LQIRETRY, the LQI * manager will be in ISELO where it will * happily sit until another packet phase begins. * Unexpected bus free detection is enabled * through any phases that occur after we release * this last ack until the LQI manager sees a * packet phase. This implies we may have to * ignore a perfectly valid "unexected busfree" * after our "initiator detected error" message is * sent. A busfree is the expected response after * we tell the target that it's L_Q was corrupted. * (SPI4R09 10.7.3.3.3) */ ahd_outb(ahd, LQCTL2, LQIRETRY); printk("LQIRetry for LQICRCI_LQ to release ACK\n"); } else if ((lqistat1 & LQICRCI_NLQ) != 0) { /* * We detected a CRC error in a NON-LQ packet. * The hardware has varying behavior in this situation * depending on whether this packet was part of a * stream or not. * * PKT by PKT mode: * The hardware has already acked the complete packet. * If the target honors our outstanding ATN condition, * we should be (or soon will be) in MSGOUT phase. * This will trigger the LQIPHASE_LQ status bit as the * hardware was expecting another LQ. Unexpected * busfree detection is enabled. Once LQIPHASE_LQ is * true (first entry into host message loop is much * the same), we must clear LQIPHASE_LQ and hit * LQIRETRY so the hardware is ready to handle * a future LQ. NONPACKREQ will not be asserted again * once we hit LQIRETRY until another packet is * processed. The target may either go busfree * or start another packet in response to our message. * * Read Streaming P0 asserted: * If we raise ATN and the target completes the entire * stream (P0 asserted during the last packet), the * hardware will ack all data and return to the ISTART * state. When the target reponds to our ATN condition, * LQIPHASE_LQ will be asserted. We should respond to * this with an LQIRETRY to prepare for any future * packets. NONPACKREQ will not be asserted again * once we hit LQIRETRY until another packet is * processed. The target may either go busfree or * start another packet in response to our message. * Busfree detection is enabled. * * Read Streaming P0 not asserted: * If we raise ATN and the target transitions to * MSGOUT in or after a packet where P0 is not * asserted, the hardware will assert LQIPHASE_NLQ. * We should respond to the LQIPHASE_NLQ with an * LQIRETRY. Should the target stay in a non-pkt * phase after we send our message, the hardware * will assert LQIPHASE_LQ. Recovery is then just as * listed above for the read streaming with P0 asserted. * Busfree detection is enabled. */ if (silent == FALSE) printk("LQICRC_NLQ\n"); if (scb == NULL) { printk("%s: No SCB valid for LQICRC_NLQ. " "Resetting bus\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } } else if ((lqistat1 & LQIBADLQI) != 0) { printk("Need to handle BADLQI!\n"); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) { if ((curphase & ~P_DATAIN_DT) != 0) { /* Ack the byte. So we can continue. */ if (silent == FALSE) printk("Acking %s to clear perror\n", ahd_lookup_phase_entry(curphase)->phasemsg); ahd_inb(ahd, SCSIDAT); } if (curphase == P_MESGIN) msg_out = MSG_PARITY_ERROR; } /* * We've set the hardware to assert ATN if we * get a parity error on "in" phases, so all we * need to do is stuff the message buffer with * the appropriate message. "In" phases have set * mesg_out to something other than NOP. */ ahd->send_msg_perror = msg_out; if (scb != NULL && msg_out == INITIATOR_ERROR) scb->flags |= SCB_TRANSMISSION_ERROR; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1) { /* * Clear the sources of the interrupts. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRLQIINT1, lqistat1); /* * If the "illegal" phase changes were in response * to our ATN to flag a CRC error, AND we ended up * on packet boundaries, clear the error, restart the * LQI manager as appropriate, and go on our merry * way toward sending the message. Otherwise, reset * the bus to clear the error. */ ahd_set_active_fifo(ahd); if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) { if ((lqistat1 & LQIPHASE_LQ) != 0) { printk("LQIRETRY for LQIPHASE_LQ\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } else if ((lqistat1 & LQIPHASE_NLQ) != 0) { printk("LQIRETRY for LQIPHASE_NLQ\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } else panic("ahd_handle_lqiphase_error: No phase errors\n"); ahd_dump_card_state(ahd); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } else { printk("Resetting Channel for LQI Phase error\n"); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } } /* * Packetized unexpected or expected busfree. * Entered in mode based on busfreetime. */ static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime) { u_int lqostat1; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); lqostat1 = ahd_inb(ahd, LQOSTAT1); if ((lqostat1 & LQOBUSFREE) != 0) { struct scb *scb; u_int scbid; u_int saved_scbptr; u_int waiting_h; u_int waiting_t; u_int next; /* * The LQO manager detected an unexpected busfree * either: * * 1) During an outgoing LQ. * 2) After an outgoing LQ but before the first * REQ of the command packet. * 3) During an outgoing command packet. * * In all cases, CURRSCB is pointing to the * SCB that encountered the failure. Clean * up the queue, clear SELDO and LQOBUSFREE, * and allow the sequencer to restart the select * out at its lesure. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); scbid = ahd_inw(ahd, CURRSCB); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) panic("SCB not valid during LQOBUSFREE"); /* * Clear the status. */ ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) ahd_outb(ahd, CLRLQOINT1, 0); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); ahd_flush_device_writes(ahd); ahd_outb(ahd, CLRSINT0, CLRSELDO); /* * Return the LQO manager to its idle loop. It will * not do this automatically if the busfree occurs * after the first REQ of either the LQ or command * packet or between the LQ and command packet. */ ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE); /* * Update the waiting for selection queue so * we restart on the correct SCB. */ waiting_h = ahd_inw(ahd, WAITING_TID_HEAD); saved_scbptr = ahd_get_scbptr(ahd); if (waiting_h != scbid) { ahd_outw(ahd, WAITING_TID_HEAD, scbid); waiting_t = ahd_inw(ahd, WAITING_TID_TAIL); if (waiting_t == waiting_h) { ahd_outw(ahd, WAITING_TID_TAIL, scbid); next = SCB_LIST_NULL; } else { ahd_set_scbptr(ahd, waiting_h); next = ahd_inw_scbram(ahd, SCB_NEXT2); } ahd_set_scbptr(ahd, scbid); ahd_outw(ahd, SCB_NEXT2, next); } ahd_set_scbptr(ahd, saved_scbptr); if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) { if (SCB_IS_SILENT(scb) == FALSE) { ahd_print_path(ahd, scb); printk("Probable outgoing LQ CRC error. " "Retrying command\n"); } scb->crc_retry_count++; } else { ahd_set_transaction_status(scb, CAM_UNCOR_PARITY); ahd_freeze_scb(scb); ahd_freeze_devq(ahd, scb); } /* Return unpausing the sequencer. */ return (0); } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) { /* * Ignore what are really parity errors that * occur on the last REQ of a free running * clock prior to going busfree. Some drives * do not properly active negate just before * going busfree resulting in a parity glitch. */ ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0) printk("%s: Parity on last REQ detected " "during busfree phase.\n", ahd_name(ahd)); #endif /* Return unpausing the sequencer. */ return (0); } if (ahd->src_mode != AHD_MODE_SCSI) { u_int scbid; struct scb *scb; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); ahd_print_path(ahd, scb); printk("Unexpected PKT busfree condition\n"); ahd_dump_card_state(ahd); ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, CAM_UNEXP_BUSFREE); /* Return restarting the sequencer. */ return (1); } printk("%s: Unexpected PKT busfree condition\n", ahd_name(ahd)); ahd_dump_card_state(ahd); /* Restart the sequencer. */ return (1); } /* * Non-packetized unexpected or expected busfree. */ static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; struct scb *scb; u_int lastphase; u_int saved_scsiid; u_int saved_lun; u_int target; u_int initiator_role_id; u_int scbid; u_int ppr_busfree; int printerror; /* * Look at what phase we were last in. If its message out, * chances are pretty good that the busfree was in response * to one of our abort requests. */ lastphase = ahd_inb(ahd, LASTPHASE); saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); saved_lun = ahd_inb(ahd, SAVED_LUN); target = SCSIID_TARGET(ahd, saved_scsiid); initiator_role_id = SCSIID_OUR_ID(saved_scsiid); ahd_compile_devinfo(&devinfo, initiator_role_id, target, saved_lun, 'A', ROLE_INITIATOR); printerror = 1; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0; if (lastphase == P_MESGOUT) { u_int tag; tag = SCB_LIST_NULL; if (ahd_sent_msg(ahd, AHDMSG_1B, ABORT_TASK, TRUE) || ahd_sent_msg(ahd, AHDMSG_1B, ABORT_TASK_SET, TRUE)) { int found; int sent_msg; if (scb == NULL) { ahd_print_devinfo(ahd, &devinfo); printk("Abort for unidentified " "connection completed.\n"); /* restart the sequencer. */ return (1); } sent_msg = ahd->msgout_buf[ahd->msgout_index - 1]; ahd_print_path(ahd, scb); printk("SCB %d - Abort%s Completed.\n", SCB_GET_TAG(scb), sent_msg == ABORT_TASK ? "" : " Tag"); if (sent_msg == ABORT_TASK) tag = SCB_GET_TAG(scb); if ((scb->flags & SCB_EXTERNAL_RESET) != 0) { /* * This abort is in response to an * unexpected switch to command phase * for a packetized connection. Since * the identify message was never sent, * "saved lun" is 0. We really want to * abort only the SCB that encountered * this error, which could have a different * lun. The SCB will be retried so the OS * will see the UA after renegotiating to * packetized. */ tag = SCB_GET_TAG(scb); saved_lun = scb->hscb->lun; } found = ahd_abort_scbs(ahd, target, 'A', saved_lun, tag, ROLE_INITIATOR, CAM_REQ_ABORTED); printk("found == 0x%x\n", found); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_1B, TARGET_RESET, TRUE)) { ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, CAM_BDR_SENT, "Bus Device Reset", /*verbose_level*/0); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, FALSE) && ppr_busfree == 0) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; /* * PPR Rejected. * * If the previous negotiation was packetized, * this could be because the device has been * reset without our knowledge. Force our * current negotiation to async and retry the * negotiation. Otherwise retry the command * with non-ppr negotiation. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("PPR negotiation rejected busfree.\n"); #endif tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) { ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); /* * The expect PPR busfree handler below * will effect the retry and necessary * abort. */ } else { tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; if (scb != NULL) { /* * Remove any SCBs in the waiting * for selection queue that may * also be for this target so that * command ordering is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); } printerror = 0; } } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, FALSE) && ppr_busfree == 0) { /* * Negotiation Rejected. Go-narrow and * retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("WDTR negotiation rejected busfree.\n"); #endif ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); if (scb != NULL) { /* * Remove any SCBs in the waiting for * selection queue that may also be for * this target so that command ordering * is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); } printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, FALSE) && ppr_busfree == 0) { /* * Negotiation Rejected. Go-async and * retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("SDTR negotiation rejected busfree.\n"); #endif ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); if (scb != NULL) { /* * Remove any SCBs in the waiting for * selection queue that may also be for * this target so that command ordering * is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); } printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 && ahd_sent_msg(ahd, AHDMSG_1B, INITIATOR_ERROR, TRUE)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("Expected IDE Busfree\n"); #endif printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE) && ahd_sent_msg(ahd, AHDMSG_1B, MESSAGE_REJECT, TRUE)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("Expected QAS Reject Busfree\n"); #endif printerror = 0; } } /* * The busfree required flag is honored at the end of * the message phases. We check it last in case we * had to send some other message that caused a busfree. */ if (scb != NULL && printerror != 0 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); ahd_freeze_scb(scb); if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) { ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQ_ABORTED); } else { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("PPR Negotiation Busfree.\n"); #endif ahd_done(ahd, scb); } printerror = 0; } if (printerror != 0) { int aborted; aborted = 0; if (scb != NULL) { u_int tag; if ((scb->hscb->control & TAG_ENB) != 0) tag = SCB_GET_TAG(scb); else tag = SCB_LIST_NULL; ahd_print_path(ahd, scb); aborted = ahd_abort_scbs(ahd, target, 'A', SCB_GET_LUN(scb), tag, ROLE_INITIATOR, CAM_UNEXP_BUSFREE); } else { /* * We had not fully identified this connection, * so we cannot abort anything. */ printk("%s: ", ahd_name(ahd)); } printk("Unexpected busfree %s, %d SCBs aborted, " "PRGMCNT == 0x%x\n", ahd_lookup_phase_entry(lastphase)->phasemsg, aborted, ahd_inw(ahd, PRGMCNT)); ahd_dump_card_state(ahd); if (lastphase != P_BUSFREE) ahd_force_renegotiation(ahd, &devinfo); } /* Always restart the sequencer. */ return (1); } static void ahd_handle_proto_violation(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; u_int seq_flags; u_int curphase; u_int lastphase; int found; ahd_fetch_devinfo(ahd, &devinfo); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); seq_flags = ahd_inb(ahd, SEQ_FLAGS); curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; lastphase = ahd_inb(ahd, LASTPHASE); if ((seq_flags & NOT_IDENTIFIED) != 0) { /* * The reconnecting target either did not send an * identify message, or did, but we didn't find an SCB * to match. */ ahd_print_devinfo(ahd, &devinfo); printk("Target did not send an IDENTIFY message. " "LASTPHASE = 0x%x.\n", lastphase); scb = NULL; } else if (scb == NULL) { /* * We don't seem to have an SCB active for this * transaction. Print an error and reset the bus. */ ahd_print_devinfo(ahd, &devinfo); printk("No SCB found during protocol violation\n"); goto proto_violation_reset; } else { ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL); if ((seq_flags & NO_CDB_SENT) != 0) { ahd_print_path(ahd, scb); printk("No or incomplete CDB sent to device.\n"); } else if ((ahd_inb_scbram(ahd, SCB_CONTROL) & STATUS_RCVD) == 0) { /* * The target never bothered to provide status to * us prior to completing the command. Since we don't * know the disposition of this command, we must attempt * to abort it. Assert ATN and prepare to send an abort * message. */ ahd_print_path(ahd, scb); printk("Completed command without status.\n"); } else { ahd_print_path(ahd, scb); printk("Unknown protocol violation.\n"); ahd_dump_card_state(ahd); } } if ((lastphase & ~P_DATAIN_DT) == 0 || lastphase == P_COMMAND) { proto_violation_reset: /* * Target either went directly to data * phase or didn't respond to our ATN. * The only safe thing to do is to blow * it away with a bus reset. */ found = ahd_reset_channel(ahd, 'A', TRUE); printk("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahd_name(ahd), 'A', found); } else { /* * Leave the selection hardware off in case * this abort attempt will affect yet to * be sent commands. */ ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); ahd_assert_atn(ahd); ahd_outb(ahd, MSG_OUT, HOST_MSG); if (scb == NULL) { ahd_print_devinfo(ahd, &devinfo); ahd->msgout_buf[0] = ABORT_TASK; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } else { ahd_print_path(ahd, scb); scb->flags |= SCB_ABORT; } printk("Protocol violation %s. Attempting to abort.\n", ahd_lookup_phase_entry(curphase)->phasemsg); } } /* * Force renegotiation to occur the next time we initiate * a command to the current device. */ static void ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, devinfo); printk("Forcing renegotiation\n"); } #endif targ_info = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); ahd_update_neg_request(ahd, devinfo, tstate, targ_info, AHD_NEG_IF_NON_ASYNC); } #define AHD_MAX_STEPS 2000 static void ahd_clear_critical_section(struct ahd_softc *ahd) { ahd_mode_state saved_modes; int stepping; int steps; int first_instr; u_int simode0; u_int simode1; u_int simode3; u_int lqimode0; u_int lqimode1; u_int lqomode0; u_int lqomode1; if (ahd->num_critical_sections == 0) return; stepping = FALSE; steps = 0; first_instr = 0; simode0 = 0; simode1 = 0; simode3 = 0; lqimode0 = 0; lqimode1 = 0; lqomode0 = 0; lqomode1 = 0; saved_modes = ahd_save_modes(ahd); for (;;) { struct cs *cs; u_int seqaddr; u_int i; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); seqaddr = ahd_inw(ahd, CURADDR); cs = ahd->critical_sections; for (i = 0; i < ahd->num_critical_sections; i++, cs++) { if (cs->begin < seqaddr && cs->end >= seqaddr) break; } if (i == ahd->num_critical_sections) break; if (steps > AHD_MAX_STEPS) { printk("%s: Infinite loop in critical section\n" "%s: First Instruction 0x%x now 0x%x\n", ahd_name(ahd), ahd_name(ahd), first_instr, seqaddr); ahd_dump_card_state(ahd); panic("critical section loop"); } steps++; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: Single stepping at 0x%x\n", ahd_name(ahd), seqaddr); #endif if (stepping == FALSE) { first_instr = seqaddr; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); simode0 = ahd_inb(ahd, SIMODE0); simode3 = ahd_inb(ahd, SIMODE3); lqimode0 = ahd_inb(ahd, LQIMODE0); lqimode1 = ahd_inb(ahd, LQIMODE1); lqomode0 = ahd_inb(ahd, LQOMODE0); lqomode1 = ahd_inb(ahd, LQOMODE1); ahd_outb(ahd, SIMODE0, 0); ahd_outb(ahd, SIMODE3, 0); ahd_outb(ahd, LQIMODE0, 0); ahd_outb(ahd, LQIMODE1, 0); ahd_outb(ahd, LQOMODE0, 0); ahd_outb(ahd, LQOMODE1, 0); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); simode1 = ahd_inb(ahd, SIMODE1); /* * We don't clear ENBUSFREE. Unfortunately * we cannot re-enable busfree detection within * the current connection, so we must leave it * on while single stepping. */ ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE); ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP); stepping = TRUE; } ahd_outb(ahd, CLRSINT1, CLRBUSFREE); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); ahd_outb(ahd, HCNTRL, ahd->unpause); while (!ahd_is_paused(ahd)) ahd_delay(200); ahd_update_modes(ahd); } if (stepping) { ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, SIMODE0, simode0); ahd_outb(ahd, SIMODE3, simode3); ahd_outb(ahd, LQIMODE0, lqimode0); ahd_outb(ahd, LQIMODE1, lqimode1); ahd_outb(ahd, LQOMODE0, lqomode0); ahd_outb(ahd, LQOMODE1, lqomode1); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP); ahd_outb(ahd, SIMODE1, simode1); /* * SCSIINT seems to glitch occasionally when * the interrupt masks are restored. Clear SCSIINT * one more time so that only persistent errors * are seen as a real interrupt. */ ahd_outb(ahd, CLRINT, CLRSCSIINT); } ahd_restore_modes(ahd, saved_modes); } /* * Clear any pending interrupt status. */ static void ahd_clear_intstat(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); /* Clear any interrupt conditions this may have caused */ ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD); ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ); ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ |CLRLQOATNPKT|CLRLQOTCRC); ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS |CLRLQOBUSFREE|CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT0, 0); ahd_outb(ahd, CLRLQOINT1, 0); } ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR); ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT); ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO |CLRIOERR|CLROVERRUN); ahd_outb(ahd, CLRINT, CLRSCSIINT); } /**************************** Debugging Routines ******************************/ #ifdef AHD_DEBUG uint32_t ahd_debug = AHD_DEBUG_OPTS; #endif #if 0 void ahd_print_scb(struct scb *scb) { struct hardware_scb *hscb; int i; hscb = scb->hscb; printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", (void *)scb, hscb->control, hscb->scsiid, hscb->lun, hscb->cdb_len); printk("Shared Data: "); for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++) printk("%#02x", hscb->shared_data.idata.cdb[i]); printk(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n", (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF), ahd_le32toh(hscb->datacnt), ahd_le32toh(hscb->sgptr), SCB_GET_TAG(scb)); ahd_dump_sglist(scb); } #endif /* 0 */ /************************* Transfer Negotiation *******************************/ /* * Allocate per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static struct ahd_tmode_tstate * ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel) { struct ahd_tmode_tstate *master_tstate; struct ahd_tmode_tstate *tstate; int i; master_tstate = ahd->enabled_targets[ahd->our_id]; if (ahd->enabled_targets[scsi_id] != NULL && ahd->enabled_targets[scsi_id] != master_tstate) panic("%s: ahd_alloc_tstate - Target already allocated", ahd_name(ahd)); tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC); if (tstate == NULL) return (NULL); /* * If we have allocated a master tstate, copy user settings from * the master tstate (taken from SRAM or the EEPROM) for this * channel, but reset our current and goal settings to async/narrow * until an initiator talks to us. */ if (master_tstate != NULL) { memcpy(tstate, master_tstate, sizeof(*tstate)); memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); for (i = 0; i < 16; i++) { memset(&tstate->transinfo[i].curr, 0, sizeof(tstate->transinfo[i].curr)); memset(&tstate->transinfo[i].goal, 0, sizeof(tstate->transinfo[i].goal)); } } else memset(tstate, 0, sizeof(*tstate)); ahd->enabled_targets[scsi_id] = tstate; return (tstate); } #ifdef AHD_TARGET_MODE /* * Free per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static void ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force) { struct ahd_tmode_tstate *tstate; /* * Don't clean up our "master" tstate. * It has our default user settings. */ if (scsi_id == ahd->our_id && force == FALSE) return; tstate = ahd->enabled_targets[scsi_id]; kfree(tstate); ahd->enabled_targets[scsi_id] = NULL; } #endif /* * Called when we have an active connection to a target on the bus, * this function finds the nearest period to the input period limited * by the capabilities of the bus connectivity of and sync settings for * the target. */ static void ahd_devlimited_syncrate(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) { struct ahd_transinfo *transinfo; u_int maxsync; if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) { maxsync = AHD_SYNCRATE_PACED; } else { maxsync = AHD_SYNCRATE_ULTRA; /* Can't do DT related options on an SE bus */ *ppr_options &= MSG_EXT_PPR_QAS_REQ; } /* * Never allow a value higher than our current goal * period otherwise we may allow a target initiated * negotiation to go above the limit as set by the * user. In the case of an initiator initiated * sync negotiation, we limit based on the user * setting. This allows the system to still accept * incoming negotiations even if target initiated * negotiation is not performed. */ if (role == ROLE_TARGET) transinfo = &tinfo->user; else transinfo = &tinfo->goal; *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN); if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { maxsync = max(maxsync, (u_int)AHD_SYNCRATE_ULTRA2); *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } if (transinfo->period == 0) { *period = 0; *ppr_options = 0; } else { *period = max(*period, (u_int)transinfo->period); ahd_find_syncrate(ahd, period, ppr_options, maxsync); } } /* * Look up the valid period to SCSIRATE conversion in our table. * Return the period and offset that should be sent to the target * if this was the beginning of an SDTR. */ void ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, u_int *ppr_options, u_int maxsync) { if (*period < maxsync) *period = maxsync; if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0 && *period > AHD_SYNCRATE_MIN_DT) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; if (*period > AHD_SYNCRATE_MIN) *period = 0; /* Honor PPR option conformance rules. */ if (*period > AHD_SYNCRATE_PACED) *ppr_options &= ~MSG_EXT_PPR_RTI; if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0) *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ); if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0) *ppr_options &= MSG_EXT_PPR_QAS_REQ; /* Skip all PACED only entries if IU is not available */ if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0 && *period < AHD_SYNCRATE_DT) *period = AHD_SYNCRATE_DT; /* Skip all DT only entries if DT is not available */ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && *period < AHD_SYNCRATE_ULTRA2) *period = AHD_SYNCRATE_ULTRA2; } /* * Truncate the given synchronous offset to a value the * current adapter type and syncrate are capable of. */ static void ahd_validate_offset(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int period, u_int *offset, int wide, role_t role) { u_int maxoffset; /* Limit offset to what we can do */ if (period == 0) maxoffset = 0; else if (period <= AHD_SYNCRATE_PACED) { if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) maxoffset = MAX_OFFSET_PACED_BUG; else maxoffset = MAX_OFFSET_PACED; } else maxoffset = MAX_OFFSET_NON_PACED; *offset = min(*offset, maxoffset); if (tinfo != NULL) { if (role == ROLE_TARGET) *offset = min(*offset, (u_int)tinfo->user.offset); else *offset = min(*offset, (u_int)tinfo->goal.offset); } } /* * Truncate the given transfer width parameter to a value the * current adapter type is capable of. */ static void ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *bus_width, role_t role) { switch (*bus_width) { default: if (ahd->features & AHD_WIDE) { /* Respond Wide */ *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } fallthrough; case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } if (tinfo != NULL) { if (role == ROLE_TARGET) *bus_width = min((u_int)tinfo->user.width, *bus_width); else *bus_width = min((u_int)tinfo->goal.width, *bus_width); } } /* * Update the bitmask of targets for which the controller should * negotiate with at the next convenient opportunity. This currently * means the next time we send the initial identify messages for * a new transaction. */ int ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_tmode_tstate *tstate, struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type) { u_int auto_negotiate_orig; auto_negotiate_orig = tstate->auto_negotiate; if (neg_type == AHD_NEG_ALWAYS) { /* * Force our "current" settings to be * unknown so that unless a bus reset * occurs the need to renegotiate is * recorded persistently. */ if ((ahd->features & AHD_WIDE) != 0) tinfo->curr.width = AHD_WIDTH_UNKNOWN; tinfo->curr.period = AHD_PERIOD_UNKNOWN; tinfo->curr.offset = AHD_OFFSET_UNKNOWN; } if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options || (neg_type == AHD_NEG_IF_NON_ASYNC && (tinfo->goal.offset != 0 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT || tinfo->goal.ppr_options != 0))) tstate->auto_negotiate |= devinfo->target_mask; else tstate->auto_negotiate &= ~devinfo->target_mask; return (auto_negotiate_orig != tstate->auto_negotiate); } /* * Update the user/goal/curr tables of synchronous negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int old_period; u_int old_offset; u_int old_ppr; int active; int update_needed; active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; update_needed = 0; if (period == 0 || offset == 0) { period = 0; offset = 0; } tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHD_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; tinfo->user.ppr_options = ppr_options; } if ((type & AHD_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; tinfo->goal.ppr_options = ppr_options; } old_period = tinfo->curr.period; old_offset = tinfo->curr.offset; old_ppr = tinfo->curr.ppr_options; if ((type & AHD_TRANS_CUR) != 0 && (old_period != period || old_offset != offset || old_ppr != ppr_options)) { update_needed++; tinfo->curr.period = period; tinfo->curr.offset = offset; tinfo->curr.ppr_options = ppr_options; ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { if (offset != 0) { int options; printk("%s: target %d synchronous with " "period = 0x%x, offset = 0x%x", ahd_name(ahd), devinfo->target, period, offset); options = 0; if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { printk("(RDSTRM"); options++; } if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { printk("%s", options ? "|DT" : "(DT"); options++; } if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { printk("%s", options ? "|IU" : "(IU"); options++; } if ((ppr_options & MSG_EXT_PPR_RTI) != 0) { printk("%s", options ? "|RTI" : "(RTI"); options++; } if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { printk("%s", options ? "|QAS" : "(QAS"); options++; } if (options != 0) printk(")\n"); else printk("\n"); } else { printk("%s: target %d using " "asynchronous transfers%s\n", ahd_name(ahd), devinfo->target, (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0 ? "(QAS)" : ""); } } } /* * Always refresh the neg-table to handle the case of the * sequencer setting the ENATNO bit for a MK_MESSAGE request. * We will always renegotiate in that case if this is a * packetized request. Also manage the busfree expected flag * from this common routine so that we catch changes due to * WDTR or SDTR messages. */ if ((type & AHD_TRANS_CUR) != 0) { if (!paused) ahd_pause(ahd); ahd_update_neg_table(ahd, devinfo, &tinfo->curr); if (!paused) ahd_unpause(ahd); if (ahd->msg_type != MSG_TYPE_NONE) { if ((old_ppr & MSG_EXT_PPR_IU_REQ) != (ppr_options & MSG_EXT_PPR_IU_REQ)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, devinfo); printk("Expecting IU Change busfree\n"); } #endif ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE | MSG_FLAG_IU_REQ_CHANGED; } if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("PPR with IU_REQ outstanding\n"); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE; } } } update_needed += ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_TO_GOAL); if (update_needed && active) ahd_update_pending_scbs(ahd); } /* * Update the user/goal/curr tables of wide negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int width, u_int type, int paused) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int oldwidth; int active; int update_needed; active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; update_needed = 0; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHD_TRANS_USER) != 0) tinfo->user.width = width; if ((type & AHD_TRANS_GOAL) != 0) tinfo->goal.width = width; oldwidth = tinfo->curr.width; if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) { update_needed++; tinfo->curr.width = width; ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { printk("%s: target %d using %dbit transfers\n", ahd_name(ahd), devinfo->target, 8 * (0x01 << width)); } } if ((type & AHD_TRANS_CUR) != 0) { if (!paused) ahd_pause(ahd); ahd_update_neg_table(ahd, devinfo, &tinfo->curr); if (!paused) ahd_unpause(ahd); } update_needed += ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_TO_GOAL); if (update_needed && active) ahd_update_pending_scbs(ahd); } /* * Update the current state of tagged queuing for a given target. */ static void ahd_set_tags(struct ahd_softc *ahd, struct scsi_cmnd *cmd, struct ahd_devinfo *devinfo, ahd_queue_alg alg) { struct scsi_device *sdev = cmd->device; ahd_platform_set_tags(ahd, sdev, devinfo, alg); ahd_send_async(ahd, devinfo->channel, devinfo->target, devinfo->lun, AC_TRANSFER_NEG); } static void ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_transinfo *tinfo) { ahd_mode_state saved_modes; u_int period; u_int ppr_opts; u_int con_opts; u_int offset; u_int saved_negoaddr; uint8_t iocell_opts[sizeof(ahd->iocell_opts)]; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_negoaddr = ahd_inb(ahd, NEGOADDR); ahd_outb(ahd, NEGOADDR, devinfo->target); period = tinfo->period; offset = tinfo->offset; memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts)); ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI); con_opts = 0; if (period == 0) period = AHD_SYNCRATE_ASYNC; if (period == AHD_SYNCRATE_160) { if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { /* * When the SPI4 spec was finalized, PACE transfers * was not made a configurable option in the PPR * message. Instead it is assumed to be enabled for * any syncrate faster than 80MHz. Nevertheless, * Harpoon2A4 allows this to be configurable. * * Harpoon2A4 also assumes at most 2 data bytes per * negotiated REQ/ACK offset. Paced transfers take * 4, so we must adjust our offset. */ ppr_opts |= PPROPT_PACE; offset *= 2; /* * Harpoon2A assumed that there would be a * fallback rate between 160MHz and 80MHz, * so 7 is used as the period factor rather * than 8 for 160MHz. */ period = AHD_SYNCRATE_REVA_160; } if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0) iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; } else { /* * Precomp should be disabled for non-paced transfers. */ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0 && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) { /* * Slow down our CRC interval to be * compatible with non-packetized * U160 devices that can't handle a * CRC at full speed. */ con_opts |= ENSLOWCRC; } if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { /* * On H2A4, revert to a slower slewrate * on non-paced transfers. */ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_SLEWRATE_MASK; } } ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW); ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]); ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE); ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]); ahd_outb(ahd, NEGPERIOD, period); ahd_outb(ahd, NEGPPROPTS, ppr_opts); ahd_outb(ahd, NEGOFFSET, offset); if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT) con_opts |= WIDEXFER; /* * Slow down our CRC interval to be * compatible with packetized U320 devices * that can't handle a CRC at full speed */ if (ahd->features & AHD_AIC79XXB_SLOWCRC) { con_opts |= ENSLOWCRC; } /* * During packetized transfers, the target will * give us the opportunity to send command packets * without us asserting attention. */ if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0) con_opts |= ENAUTOATNO; ahd_outb(ahd, NEGCONOPTS, con_opts); ahd_outb(ahd, NEGOADDR, saved_negoaddr); ahd_restore_modes(ahd, saved_modes); } /* * When the transfer settings for a connection change, setup for * negotiation in pending SCBs to effect the change as quickly as * possible. We also cancel any negotiations that are scheduled * for inflight SCBs that have not been started yet. */ static void ahd_update_pending_scbs(struct ahd_softc *ahd) { struct scb *pending_scb; int pending_scb_count; int paused; u_int saved_scbptr; ahd_mode_state saved_modes; /* * Traverse the pending SCB list and ensure that all of the * SCBs there have the proper settings. We can only safely * clear the negotiation required flag (setting requires the * execution queue to be modified) and this is only possible * if we are not already attempting to select out for this * SCB. For this reason, all callers only call this routine * if we are changing the negotiation settings for the currently * active transaction on the bus. */ pending_scb_count = 0; LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { struct ahd_devinfo devinfo; struct ahd_tmode_tstate *tstate; ahd_scb_devinfo(ahd, &devinfo, pending_scb); ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if ((tstate->auto_negotiate & devinfo.target_mask) == 0 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; pending_scb->hscb->control &= ~MK_MESSAGE; } ahd_sync_scb(ahd, pending_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); pending_scb_count++; } if (pending_scb_count == 0) return; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } /* * Force the sequencer to reinitialize the selection for * the command at the head of the execution queue if it * has already been setup. The negotiation changes may * effect whether we select-out with ATN. It is only * safe to clear ENSELO when the bus is not free and no * selection is in progres or completed. */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if ((ahd_inb(ahd, SCSISIGI) & BSYI) != 0 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0) ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); saved_scbptr = ahd_get_scbptr(ahd); /* Ensure that the hscbs down on the card match the new information */ LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { u_int scb_tag; u_int control; scb_tag = SCB_GET_TAG(pending_scb); ahd_set_scbptr(ahd, scb_tag); control = ahd_inb_scbram(ahd, SCB_CONTROL); control &= ~MK_MESSAGE; control |= pending_scb->hscb->control & MK_MESSAGE; ahd_outb(ahd, SCB_CONTROL, control); } ahd_set_scbptr(ahd, saved_scbptr); ahd_restore_modes(ahd, saved_modes); if (paused == 0) ahd_unpause(ahd); } /**************************** Pathing Information *****************************/ static void ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { ahd_mode_state saved_modes; u_int saved_scsiid; role_t role; int our_id; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if (ahd_inb(ahd, SSTAT0) & TARGET) role = ROLE_TARGET; else role = ROLE_INITIATOR; if (role == ROLE_TARGET && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { /* We were selected, so pull our id from TARGIDIN */ our_id = ahd_inb(ahd, TARGIDIN) & OID; } else if (role == ROLE_TARGET) our_id = ahd_inb(ahd, TOWNID); else our_id = ahd_inb(ahd, IOWNID); saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); ahd_compile_devinfo(devinfo, our_id, SCSIID_TARGET(ahd, saved_scsiid), ahd_inb(ahd, SAVED_LUN), SCSIID_CHANNEL(ahd, saved_scsiid), role); ahd_restore_modes(ahd, saved_modes); } void ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { printk("%s:%c:%d:%d: ", ahd_name(ahd), 'A', devinfo->target, devinfo->lun); } static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase) { const struct ahd_phase_table_entry *entry; const struct ahd_phase_table_entry *last_entry; /* * num_phases doesn't include the default entry which * will be returned if the phase doesn't match. */ last_entry = &ahd_phase_table[num_phases]; for (entry = ahd_phase_table; entry < last_entry; entry++) { if (phase == entry->phase) break; } return (entry); } void ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role) { devinfo->our_scsiid = our_id; devinfo->target = target; devinfo->lun = lun; devinfo->target_offset = target; devinfo->channel = channel; devinfo->role = role; if (channel == 'B') devinfo->target_offset += 8; devinfo->target_mask = (0x01 << devinfo->target_offset); } static void ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { role_t role; int our_id; our_id = SCSIID_OUR_ID(scb->hscb->scsiid); role = ROLE_INITIATOR; if ((scb->hscb->control & TARGET_SCB) != 0) role = ROLE_TARGET; ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role); } /************************ Message Phase Processing ****************************/ /* * When an initiator transaction with the MK_MESSAGE flag either reconnects * or enters the initial message out phase, we are interrupted. Fill our * outgoing message buffer with the appropriate message and beging handing * the message phase(s) manually. */ static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahd->msgout_index = 0; ahd->msgout_len = 0; if (ahd_currently_packetized(ahd)) ahd->msg_flags |= MSG_FLAG_PACKETIZED; if (ahd->send_msg_perror && ahd_inb(ahd, MSG_OUT) == HOST_MSG) { ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror; ahd->msgout_len++; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("Setting up for Parity Error delivery\n"); #endif return; } else if (scb == NULL) { printk("%s: WARNING. No pending message for " "I_T msgin. Issuing NO-OP\n", ahd_name(ahd)); ahd->msgout_buf[ahd->msgout_index++] = NOP; ahd->msgout_len++; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; return; } if ((scb->flags & SCB_DEVICE_RESET) == 0 && (scb->flags & SCB_PACKETIZED) == 0 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) { u_int identify_msg; identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); if ((scb->hscb->control & DISCENB) != 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; ahd->msgout_buf[ahd->msgout_index++] = identify_msg; ahd->msgout_len++; if ((scb->hscb->control & TAG_ENB) != 0) { ahd->msgout_buf[ahd->msgout_index++] = scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb); ahd->msgout_len += 2; } } if (scb->flags & SCB_DEVICE_RESET) { ahd->msgout_buf[ahd->msgout_index++] = TARGET_RESET; ahd->msgout_len++; ahd_print_path(ahd, scb); printk("Bus Device Reset Message Sent\n"); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahd_outb(ahd, SCSISEQ0, 0); } else if ((scb->flags & SCB_ABORT) != 0) { if ((scb->hscb->control & TAG_ENB) != 0) { ahd->msgout_buf[ahd->msgout_index++] = ABORT_TASK; } else { ahd->msgout_buf[ahd->msgout_index++] = ABORT_TASK_SET; } ahd->msgout_len++; ahd_print_path(ahd, scb); printk("Abort%s Message Sent\n", (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahd_outb(ahd, SCSISEQ0, 0); } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { ahd_build_transfer_msg(ahd, devinfo); /* * Clear our selection hardware in advance of potential * PPR IU status change busfree. We may have an entry in * the waiting Q for this target, and we don't want to go * about selecting while we handle the busfree and blow * it away. */ ahd_outb(ahd, SCSISEQ0, 0); } else { printk("ahd_intr: AWAITING_MSG for an SCB that " "does not have a waiting message\n"); printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, devinfo->target_mask); panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x " "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control, ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT), scb->flags); } /* * Clear the MK_MESSAGE flag from the SCB so we aren't * asked to send this message again. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); scb->hscb->control &= ~MK_MESSAGE; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } /* * Build an appropriate transfer negotiation message for the * currently active target. */ static void ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { /* * We need to initiate transfer negotiations. * If our current and goal settings are identical, * we want to renegotiate due to a check condition. */ struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int dowide; int dosync; int doppr; u_int period; u_int ppr_options; u_int offset; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Filter our period based on the current connection. * If we can't perform DT transfers on this segment (not in LVD * mode for instance), then our decision to issue a PPR message * may change. */ period = tinfo->goal.period; offset = tinfo->goal.offset; ppr_options = tinfo->goal.ppr_options; /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) ppr_options = 0; ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); dowide = tinfo->curr.width != tinfo->goal.width; dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; /* * Only use PPR if we have options that need it, even if the device * claims to support it. There might be an expander in the way * that doesn't. */ doppr = ppr_options != 0; if (!dowide && !dosync && !doppr) { dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; dosync = tinfo->goal.offset != 0; } if (!dowide && !dosync && !doppr) { /* * Force async with a WDTR message if we have a wide bus, * or just issue an SDTR with a 0 offset. */ if ((ahd->features & AHD_WIDE) != 0) dowide = 1; else dosync = 1; if (bootverbose) { ahd_print_devinfo(ahd, devinfo); printk("Ensuring async\n"); } } /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) doppr = 0; /* * Both the PPR message and SDTR message require the * goal syncrate to be limited to what the target device * is capable of handling (based on whether an LVD->SE * expander is on the bus), so combine these two cases. * Regardless, guarantee that if we are using WDTR and SDTR * messages that WDTR comes first. */ if (doppr || (dosync && !dowide)) { offset = tinfo->goal.offset; ahd_validate_offset(ahd, tinfo, period, &offset, doppr ? tinfo->goal.width : tinfo->curr.width, devinfo->role); if (doppr) { ahd_construct_ppr(ahd, devinfo, period, offset, tinfo->goal.width, ppr_options); } else { ahd_construct_sdtr(ahd, devinfo, period, offset); } } else { ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width); } } /* * Build a synchronous negotiation message in our message * buffer based on the input parameters. */ static void ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset) { if (offset == 0) period = AHD_ASYNC_XFER_PERIOD; ahd->msgout_index += spi_populate_sync_msg( ahd->msgout_buf + ahd->msgout_index, period, offset); ahd->msgout_len += 5; if (bootverbose) { printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, period, offset); } } /* * Build a wide negotiateion message in our message * buffer based on the input parameters. */ static void ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int bus_width) { ahd->msgout_index += spi_populate_width_msg( ahd->msgout_buf + ahd->msgout_index, bus_width); ahd->msgout_len += 4; if (bootverbose) { printk("(%s:%c:%d:%d): Sending WDTR %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, bus_width); } } /* * Build a parallel protocol request message in our message * buffer based on the input parameters. */ static void ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) { /* * Always request precompensation from * the other target if we are running * at paced syncrates. */ if (period <= AHD_SYNCRATE_PACED) ppr_options |= MSG_EXT_PPR_PCOMP_EN; if (offset == 0) period = AHD_ASYNC_XFER_PERIOD; ahd->msgout_index += spi_populate_ppr_msg( ahd->msgout_buf + ahd->msgout_index, period, offset, bus_width, ppr_options); ahd->msgout_len += 8; if (bootverbose) { printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " "offset %x, ppr_options %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, bus_width, period, offset, ppr_options); } } /* * Clear any active message state. */ static void ahd_clear_msg_state(struct ahd_softc *ahd) { ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd->send_msg_perror = 0; ahd->msg_flags = MSG_FLAG_NONE; ahd->msgout_len = 0; ahd->msgin_index = 0; ahd->msg_type = MSG_TYPE_NONE; if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * The target didn't care to respond to our * message request, so clear ATN. */ ahd_outb(ahd, CLRSINT1, CLRATNO); } ahd_outb(ahd, MSG_OUT, NOP); ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); ahd_restore_modes(ahd, saved_modes); } /* * Manual message loop handler. */ static void ahd_handle_message_phase(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; u_int bus_phase; int end_session; ahd_fetch_devinfo(ahd, &devinfo); end_session = FALSE; bus_phase = ahd_inb(ahd, LASTPHASE); if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) { printk("LQIRETRY for LQIPHASE_OUTPKT\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } reswitch: switch (ahd->msg_type) { case MSG_TYPE_INITIATOR_MSGOUT: { int lastbyte; int phasemis; int msgdone; if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0) panic("HOST_MSG_LOOP interrupt with no active message"); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printk("INITIATOR_MSG_OUT"); } #endif phasemis = bus_phase != P_MESGOUT; if (phasemis) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahd_lookup_phase_entry(bus_phase) ->phasemsg); } #endif if (bus_phase == P_MESGIN) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahd_outb(ahd, CLRSINT1, CLRATNO); ahd->send_msg_perror = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahd->msgin_index = 0; goto reswitch; } end_session = TRUE; break; } if (ahd->send_msg_perror) { ahd_outb(ahd, CLRSINT1, CLRATNO); ahd_outb(ahd, CLRSINT1, CLRREQINIT); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahd->send_msg_perror); #endif /* * If we are notifying the target of a CRC error * during packetized operations, the target is * within its rights to acknowledge our message * with a busfree. */ if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0 && ahd->send_msg_perror == INITIATOR_ERROR) ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE; ahd_outb(ahd, RETURN_2, ahd->send_msg_perror); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); break; } msgdone = ahd->msgout_index == ahd->msgout_len; if (msgdone) { /* * The target has requested a retry. * Re-assert ATN, reset our message index to * 0, and try again. */ ahd->msgout_index = 0; ahd_assert_atn(ahd); } lastbyte = ahd->msgout_index == (ahd->msgout_len - 1); if (lastbyte) { /* Last byte is signified by dropping ATN */ ahd_outb(ahd, CLRSINT1, CLRATNO); } /* * Clear our interrupt status and present * the next byte on the bus. */ ahd_outb(ahd, CLRSINT1, CLRREQINIT); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahd->msgout_buf[ahd->msgout_index]); #endif ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); break; } case MSG_TYPE_INITIATOR_MSGIN: { int phasemis; int message_done; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printk("INITIATOR_MSG_IN"); } #endif phasemis = bus_phase != P_MESGIN; if (phasemis) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahd_lookup_phase_entry(bus_phase) ->phasemsg); } #endif ahd->msgin_index = 0; if (bus_phase == P_MESGOUT && (ahd->send_msg_perror != 0 || (ahd->msgout_len != 0 && ahd->msgout_index == 0))) { ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; goto reswitch; } end_session = TRUE; break; } /* Pull the byte in without acking it */ ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahd->msgin_buf[ahd->msgin_index]); #endif message_done = ahd_parse_msg(ahd, &devinfo); if (message_done) { /* * Clear our incoming message buffer in case there * is another message following this one. */ ahd->msgin_index = 0; /* * If this message illicited a response, * assert ATN so the target takes us to the * message out phase. */ if (ahd->msgout_len != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printk("Asserting ATN for response\n"); } #endif ahd_assert_atn(ahd); } } else ahd->msgin_index++; if (message_done == MSGLOOP_TERMINATED) { end_session = TRUE; } else { /* Ack the byte */ ahd_outb(ahd, CLRSINT1, CLRREQINIT); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ); } break; } case MSG_TYPE_TARGET_MSGIN: { int msgdone; int msgout_request; /* * By default, the message loop will continue. */ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); if (ahd->msgout_len == 0) panic("Target MSGIN with no active message"); /* * If we interrupted a mesgout session, the initiator * will not know this until our first REQ. So, we * only honor mesgout requests after we've sent our * first byte. */ if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0 && ahd->msgout_index > 0) msgout_request = TRUE; else msgout_request = FALSE; if (msgout_request) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO); ahd->msgin_index = 0; /* Dummy read to REQ for first byte */ ahd_inb(ahd, SCSIDAT); ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); break; } msgdone = ahd->msgout_index == ahd->msgout_len; if (msgdone) { ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); end_session = TRUE; break; } /* * Present the next byte on the bus. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]); break; } case MSG_TYPE_TARGET_MSGOUT: { int lastbyte; int msgdone; /* * By default, the message loop will continue. */ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); /* * The initiator signals that this is * the last byte by dropping ATN. */ lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0; /* * Read the latched byte, but turn off SPIOEN first * so that we don't inadvertently cause a REQ for the * next byte. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT); msgdone = ahd_parse_msg(ahd, &devinfo); if (msgdone == MSGLOOP_TERMINATED) { /* * The message is *really* done in that it caused * us to go to bus free. The sequencer has already * been reset at this point, so pull the ejection * handle. */ return; } ahd->msgin_index++; /* * XXX Read spec about initiator dropping ATN too soon * and use msgdone to detect it. */ if (msgdone == MSGLOOP_MSGCOMPLETE) { ahd->msgin_index = 0; /* * If this message illicited a response, transition * to the Message in phase and send it. */ if (ahd->msgout_len != 0) { ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO); ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); ahd->msg_type = MSG_TYPE_TARGET_MSGIN; ahd->msgin_index = 0; break; } } if (lastbyte) end_session = TRUE; else { /* Ask for the next byte. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); } break; } default: panic("Unknown REQINIT message type"); } if (end_session) { if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) { printk("%s: Returning to Idle Loop\n", ahd_name(ahd)); ahd_clear_msg_state(ahd); /* * Perform the equivalent of a clear_target_state. */ ahd_outb(ahd, LASTPHASE, P_BUSFREE); ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT); ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); } else { ahd_clear_msg_state(ahd); ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP); } } } /* * See if we sent a particular extended message to the target. * If "full" is true, return true only if the target saw the full * message. If "full" is false, return true if the target saw at * least the first byte of the message. */ static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full) { int found; u_int index; found = FALSE; index = 0; while (index < ahd->msgout_len) { if (ahd->msgout_buf[index] == EXTENDED_MESSAGE) { u_int end_index; end_index = index + 1 + ahd->msgout_buf[index + 1]; if (ahd->msgout_buf[index+2] == msgval && type == AHDMSG_EXT) { if (full) { if (ahd->msgout_index > end_index) found = TRUE; } else if (ahd->msgout_index > index) found = TRUE; } index = end_index; } else if (ahd->msgout_buf[index] >= SIMPLE_QUEUE_TAG && ahd->msgout_buf[index] <= IGNORE_WIDE_RESIDUE) { /* Skip tag type and tag id or residue param*/ index += 2; } else { /* Single byte message */ if (type == AHDMSG_1B && ahd->msgout_index > index && (ahd->msgout_buf[index] == msgval || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 && msgval == MSG_IDENTIFYFLAG))) found = TRUE; index++; } if (found) break; } return (found); } /* * Wait for a complete incoming message, parse it, and respond accordingly. */ static int ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int reject; int done; int response; done = MSGLOOP_IN_PROG; response = FALSE; reject = FALSE; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Parse as much of the message as is available, * rejecting it if we don't support it. When * the entire message is available and has been * handled, return MSGLOOP_MSGCOMPLETE, indicating * that we have parsed an entire message. * * In the case of extended messages, we accept the length * byte outright and perform more checking once we know the * extended message type. */ switch (ahd->msgin_buf[0]) { case DISCONNECT: case SAVE_POINTERS: case COMMAND_COMPLETE: case RESTORE_POINTERS: case IGNORE_WIDE_RESIDUE: /* * End our message loop as these are messages * the sequencer handles on its own. */ done = MSGLOOP_TERMINATED; break; case MESSAGE_REJECT: response = ahd_handle_msg_reject(ahd, devinfo); fallthrough; case NOP: done = MSGLOOP_MSGCOMPLETE; break; case EXTENDED_MESSAGE: { /* Wait for enough of the message to begin validation */ if (ahd->msgin_index < 2) break; switch (ahd->msgin_buf[2]) { case EXTENDED_SDTR: { u_int period; u_int ppr_options; u_int offset; u_int saved_offset; if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = ahd->msgin_buf[3]; ppr_options = 0; saved_offset = offset = ahd->msgin_buf[4]; ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); ahd_validate_offset(ahd, tinfo, period, &offset, tinfo->curr.width, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received " "SDTR period %x, offset %x\n\t" "Filtered to period %x, offset %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, ahd->msgin_buf[3], saved_offset, period, offset); } ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated SDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_sdtr(ahd, devinfo, period, offset); ahd->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case EXTENDED_WDTR: { u_int bus_width; u_int saved_width; u_int sending_reply; sending_reply = FALSE; if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) { reject = TRUE; break; } /* * Wait until we have our arg before validating * and acting on this message. * * Add one to MSG_EXT_WDTR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1)) break; bus_width = ahd->msgin_buf[3]; saved_width = bus_width; ahd_validate_width(ahd, tinfo, &bus_width, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received WDTR " "%x filtered to %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, saved_width, bus_width); } if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, TRUE)) { /* * Don't send a WDTR back to the * target, since we asked first. * If the width went higher than our * request, reject it. */ if (saved_width > bus_width) { reject = TRUE; printk("(%s:%c:%d:%d): requested %dBit " "transfers. Rejecting...\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, 8 * (0x01 << bus_width)); bus_width = 0; } } else { /* * Send our own WDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated WDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_wdtr(ahd, devinfo, bus_width); ahd->msgout_index = 0; response = TRUE; sending_reply = TRUE; } /* * After a wide message, we are async, but * some devices don't seem to honor this portion * of the spec. Force a renegotiation of the * sync component of our transfer agreement even * if our goal is async. By updating our width * after forcing the negotiation, we avoid * renegotiating for width. */ ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_ALWAYS); ahd_set_width(ahd, devinfo, bus_width, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); if (sending_reply == FALSE && reject == FALSE) { /* * We will always have an SDTR to send. */ ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case EXTENDED_PPR: { u_int period; u_int offset; u_int bus_width; u_int ppr_options; u_int saved_width; u_int saved_offset; u_int saved_ppr_options; if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) { reject = TRUE; break; } /* * Wait until we have all args before validating * and acting on this message. * * Add one to MSG_EXT_PPR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1)) break; period = ahd->msgin_buf[3]; offset = ahd->msgin_buf[5]; bus_width = ahd->msgin_buf[6]; saved_width = bus_width; ppr_options = ahd->msgin_buf[7]; /* * According to the spec, a DT only * period factor with no DT option * set implies async. */ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && period <= 9) offset = 0; saved_ppr_options = ppr_options; saved_offset = offset; /* * Transfer options are only available if we * are negotiating wide. */ if (bus_width == 0) ppr_options &= MSG_EXT_PPR_QAS_REQ; ahd_validate_width(ahd, tinfo, &bus_width, devinfo->role); ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); ahd_validate_offset(ahd, tinfo, period, &offset, bus_width, devinfo->role); if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, TRUE)) { /* * If we are unable to do any of the * requested options (we went too low), * then we'll have to reject the message. */ if (saved_width > bus_width || saved_offset != offset || saved_ppr_options != ppr_options) { reject = TRUE; period = 0; offset = 0; bus_width = 0; ppr_options = 0; } } else { if (devinfo->role != ROLE_TARGET) printk("(%s:%c:%d:%d): Target " "Initiated PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); else printk("(%s:%c:%d:%d): Initiator " "Initiated PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_ppr(ahd, devinfo, period, offset, bus_width, ppr_options); ahd->msgout_index = 0; response = TRUE; } if (bootverbose) { printk("(%s:%c:%d:%d): Received PPR width %x, " "period %x, offset %x,options %x\n" "\tFiltered to width %x, period %x, " "offset %x, options %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, saved_width, ahd->msgin_buf[3], saved_offset, saved_ppr_options, bus_width, period, offset, ppr_options); } ahd_set_width(ahd, devinfo, bus_width, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); done = MSGLOOP_MSGCOMPLETE; break; } default: /* Unknown extended message. Reject it. */ reject = TRUE; break; } break; } #ifdef AHD_TARGET_MODE case TARGET_RESET: ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD, CAM_BDR_SENT, "Bus Device Reset Received", /*verbose_level*/0); ahd_restart(ahd); done = MSGLOOP_TERMINATED; break; case ABORT_TASK: case ABORT_TASK_SET: case CLEAR_TASK_SET: { int tag; /* Target mode messages */ if (devinfo->role != ROLE_TARGET) { reject = TRUE; break; } tag = SCB_LIST_NULL; if (ahd->msgin_buf[0] == ABORT_TASK) tag = ahd_inb(ahd, INITIATOR_TAG); ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, devinfo->lun, tag, ROLE_TARGET, CAM_REQ_ABORTED); tstate = ahd->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[devinfo->lun]; if (lstate != NULL) { ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, ahd->msgin_buf[0], /*arg*/tag); ahd_send_lstate_events(ahd, lstate); } } ahd_restart(ahd); done = MSGLOOP_TERMINATED; break; } #endif case QAS_REQUEST: #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("%s: QAS request. SCSISIGI == 0x%x\n", ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; fallthrough; case TERMINATE_IO_PROC: default: reject = TRUE; break; } if (reject) { /* * Setup to reject the message. */ ahd->msgout_index = 0; ahd->msgout_len = 1; ahd->msgout_buf[0] = MESSAGE_REJECT; done = MSGLOOP_MSGCOMPLETE; response = TRUE; } if (done != MSGLOOP_IN_PROG && !response) /* Clear the outgoing message buffer */ ahd->msgout_len = 0; return (done); } /* * Process a message reject message. */ static int ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { /* * What we care about here is if we had an * outstanding SDTR or WDTR message for this * target. If we did, this is a signal that * the target is refusing negotiation. */ struct scb *scb; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int scb_index; u_int last_msg; int response = 0; scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* Might be necessary */ last_msg = ahd_inb(ahd, LAST_MSG); if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, /*full*/FALSE)) { if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, /*full*/TRUE) && tinfo->goal.period <= AHD_SYNCRATE_PACED) { /* * Target may not like our SPI-4 PPR Options. * Attempt to negotiate 80MHz which will turn * off these options. */ if (bootverbose) { printk("(%s:%c:%d:%d): PPR Rejected. " "Trying simple U160 PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.period = AHD_SYNCRATE_DT; tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ | MSG_EXT_PPR_QAS_REQ | MSG_EXT_PPR_DT_REQ; } else { /* * Target does not support the PPR message. * Attempt to negotiate SPI-2 style. */ if (bootverbose) { printk("(%s:%c:%d:%d): PPR Rejected. " "Trying WDTR/SDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.ppr_options = 0; tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, /*full*/FALSE)) { /* note 8bit xfers */ printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using " "8bit transfers\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); /* * No need to clear the sync rate. If the target * did not accept the command, our syncrate is * unaffected. If the target started the negotiation, * but rejected our response, we already cleared the * sync rate before sending our WDTR. */ if (tinfo->goal.offset != tinfo->curr.offset) { /* Start the sync negotiation */ ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); printk("(%s:%c:%d:%d): refuses synchronous negotiation. " "Using asynchronous transfers\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } else if ((scb->hscb->control & SIMPLE_QUEUE_TAG) != 0) { int tag_type; int mask; tag_type = (scb->hscb->control & SIMPLE_QUEUE_TAG); if (tag_type == SIMPLE_QUEUE_TAG) { printk("(%s:%c:%d:%d): refuses tagged commands. " "Performing non-tagged I/O\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE); mask = ~0x23; } else { printk("(%s:%c:%d:%d): refuses %s tagged commands. " "Performing simple queue tagged I/O only\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, tag_type == ORDERED_QUEUE_TAG ? "ordered" : "head of queue"); ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC); mask = ~0x03; } /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & mask); scb->hscb->control &= mask; ahd_set_transaction_tag(scb, /*enabled*/FALSE, /*type*/SIMPLE_QUEUE_TAG); ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG); ahd_assert_atn(ahd); ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), SCB_GET_TAG(scb)); /* * Requeue all tagged commands for this target * currently in our possession so they can be * converted to untagged commands. */ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) { /* * Most likely the device believes that we had * previously negotiated packetized. */ ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE | MSG_FLAG_IU_REQ_CHANGED; ahd_force_renegotiation(ahd, devinfo); ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } else { /* * Otherwise, we ignore it. */ printk("%s:%c:%d: Message reject for %x -- ignored\n", ahd_name(ahd), devinfo->channel, devinfo->target, last_msg); } return (response); } /* * Process an ingnore wide residue message. */ static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { u_int scb_index; struct scb *scb; scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); /* * XXX Actually check data direction in the sequencer? * Perhaps add datadir to some spare bits in the hscb? */ if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0 || ahd_get_transfer_dir(scb) != CAM_DIR_IN) { /* * Ignore the message if we haven't * seen an appropriate data phase yet. */ } else { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. Otherwise, subtract a byte * and update the residual count accordingly. */ uint32_t sgptr; sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); if ((sgptr & SG_LIST_NULL) != 0 && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) & SCB_XFERLEN_ODD) != 0) { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. */ } else { uint32_t data_cnt; uint64_t data_addr; uint32_t sglen; /* Pull in the rest of the sgptr */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT); if ((sgptr & SG_LIST_NULL) != 0) { /* * The residual data count is not updated * for the command run to completion case. * Explicitly zero the count. */ data_cnt &= ~AHD_SG_LEN_MASK; } data_addr = ahd_inq(ahd, SHADDR); data_cnt += 1; data_addr -= 1; sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHD_SG_LEN_MASK)) { sg--; sglen = ahd_le32toh(sg->len); /* * Preserve High Address and SG_LIST * bits while setting the count to 1. */ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); data_addr = ahd_le64toh(sg->addr) + (sglen & AHD_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahd_sg_virt_to_bus(ahd, scb, sg); } } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHD_SG_LEN_MASK)) { sg--; sglen = ahd_le32toh(sg->len); /* * Preserve High Address and SG_LIST * bits while setting the count to 1. */ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); data_addr = ahd_le32toh(sg->addr) + (sglen & AHD_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahd_sg_virt_to_bus(ahd, scb, sg); } } /* * Toggle the "oddness" of the transfer length * to handle this mid-transfer ignore wide * residue. This ensures that the oddness is * correct for subsequent data transfers. */ ahd_outb(ahd, SCB_TASK_ATTRIBUTE, ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) ^ SCB_XFERLEN_ODD); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt); /* * The FIFO's pointers will be updated if/when the * sequencer re-enters a data phase. */ } } } /* * Reinitialize the data pointers for the active transfer * based on its current residual. */ static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int scb_index; u_int wait; uint32_t sgptr; uint32_t resid; uint64_t dataptr; AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK); scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); /* * Release and reacquire the FIFO so we * have a clean slate. */ ahd_outb(ahd, DFFSXFRCTL, CLRCHN); wait = 1000; while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE)) ahd_delay(100); if (wait == 0) { ahd_print_path(ahd, scb); printk("ahd_reinitialize_dataptrs: Forcing FIFO free.\n"); ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, DFFSTAT, ahd_inb(ahd, DFFSTAT) | (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0)); /* * Determine initial values for data_addr and data_cnt * for resuming the data phase. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16) | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8) | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT); if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; dataptr = ahd_le64toh(sg->addr) + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) - resid; ahd_outl(ahd, HADDR + 4, dataptr >> 32); } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; dataptr = ahd_le32toh(sg->addr) + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) - resid; ahd_outb(ahd, HADDR + 4, (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24); } ahd_outl(ahd, HADDR, dataptr); ahd_outb(ahd, HCNT + 2, resid >> 16); ahd_outb(ahd, HCNT + 1, resid >> 8); ahd_outb(ahd, HCNT, resid); } /* * Handle the effects of issuing a bus device reset message. */ static void ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int lun, cam_status status, char *message, int verbose_level) { #ifdef AHD_TARGET_MODE struct ahd_tmode_tstate* tstate; #endif int found; found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, lun, SCB_LIST_NULL, devinfo->role, status); #ifdef AHD_TARGET_MODE /* * Send an immediate notify ccb to all target mord peripheral * drivers affected by this action. */ tstate = ahd->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { u_int cur_lun; u_int max_lun; if (lun != CAM_LUN_WILDCARD) { cur_lun = 0; max_lun = AHD_NUM_LUNS - 1; } else { cur_lun = lun; max_lun = lun; } for (;cur_lun <= max_lun; cur_lun++) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[cur_lun]; if (lstate == NULL) continue; ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, TARGET_RESET, /*arg*/0); ahd_send_lstate_events(ahd, lstate); } } #endif /* * Go back to async/narrow transfers and renegotiate. */ ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); if (status != CAM_SEL_TIMEOUT) ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_SENT_BDR); if (message != NULL && bootverbose) printk("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), message, devinfo->channel, devinfo->target, found); } #ifdef AHD_TARGET_MODE static void ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahd->msgout_index = 0; ahd->msgout_len = 0; if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) ahd_build_transfer_msg(ahd, devinfo); else panic("ahd_intr: AWAITING target message with no message"); ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_TARGET_MSGIN; } #endif /**************************** Initialization **********************************/ static u_int ahd_sglist_size(struct ahd_softc *ahd) { bus_size_t list_size; list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG; return (list_size); } /* * Calculate the optimum S/G List allocation size. S/G elements used * for a given transaction must be physically contiguous. Assume the * OS will allocate full pages to us, so it doesn't make sense to request * less than a page. */ static u_int ahd_sglist_allocsize(struct ahd_softc *ahd) { bus_size_t sg_list_increment; bus_size_t sg_list_size; bus_size_t max_list_size; bus_size_t best_list_size; /* Start out with the minimum required for AHD_NSEG. */ sg_list_increment = ahd_sglist_size(ahd); sg_list_size = sg_list_increment; /* Get us as close as possible to a page in size. */ while ((sg_list_size + sg_list_increment) <= PAGE_SIZE) sg_list_size += sg_list_increment; /* * Try to reduce the amount of wastage by allocating * multiple pages. */ best_list_size = sg_list_size; max_list_size = roundup(sg_list_increment, PAGE_SIZE); if (max_list_size < 4 * PAGE_SIZE) max_list_size = 4 * PAGE_SIZE; if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment)) max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment); while ((sg_list_size + sg_list_increment) <= max_list_size && (sg_list_size % PAGE_SIZE) != 0) { bus_size_t new_mod; bus_size_t best_mod; sg_list_size += sg_list_increment; new_mod = sg_list_size % PAGE_SIZE; best_mod = best_list_size % PAGE_SIZE; if (new_mod > best_mod || new_mod == 0) { best_list_size = sg_list_size; } } return (best_list_size); } /* * Allocate a controller structure for a new device * and perform initial initializion. */ struct ahd_softc * ahd_alloc(void *platform_arg, char *name) { struct ahd_softc *ahd; ahd = kzalloc(sizeof(*ahd), GFP_ATOMIC); if (!ahd) { printk("aic7xxx: cannot malloc softc!\n"); kfree(name); return NULL; } ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC); if (ahd->seep_config == NULL) { kfree(ahd); kfree(name); return (NULL); } LIST_INIT(&ahd->pending_scbs); /* We don't know our unit number until the OSM sets it */ ahd->name = name; ahd->unit = -1; ahd->description = NULL; ahd->bus_description = NULL; ahd->channel = 'A'; ahd->chip = AHD_NONE; ahd->features = AHD_FENONE; ahd->bugs = AHD_BUGNONE; ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; timer_setup(&ahd->stat_timer, ahd_stat_timer, 0); ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT; ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT; ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT; ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT; ahd->int_coalescing_stop_threshold = AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MEMORY) != 0) { printk("%s: scb size = 0x%x, hscb size = 0x%x\n", ahd_name(ahd), (u_int)sizeof(struct scb), (u_int)sizeof(struct hardware_scb)); } #endif if (ahd_platform_alloc(ahd, platform_arg) != 0) { ahd_free(ahd); ahd = NULL; } return (ahd); } int ahd_softc_init(struct ahd_softc *ahd) { ahd->unpause = 0; ahd->pause = PAUSE; return (0); } void ahd_set_unit(struct ahd_softc *ahd, int unit) { ahd->unit = unit; } void ahd_set_name(struct ahd_softc *ahd, char *name) { kfree(ahd->name); ahd->name = name; } void ahd_free(struct ahd_softc *ahd) { int i; switch (ahd->init_level) { default: case 5: ahd_shutdown(ahd); fallthrough; case 4: ahd_dmamap_unload(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); fallthrough; case 3: ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, ahd->shared_data_map.dmamap); ahd_dmamap_destroy(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); fallthrough; case 2: ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); break; case 1: break; case 0: break; } ahd_platform_free(ahd); ahd_fini_scbdata(ahd); for (i = 0; i < AHD_NUM_TARGETS; i++) { struct ahd_tmode_tstate *tstate; tstate = ahd->enabled_targets[i]; if (tstate != NULL) { #ifdef AHD_TARGET_MODE int j; for (j = 0; j < AHD_NUM_LUNS; j++) { struct ahd_tmode_lstate *lstate; lstate = tstate->enabled_luns[j]; if (lstate != NULL) { xpt_free_path(lstate->path); kfree(lstate); } } #endif kfree(tstate); } } #ifdef AHD_TARGET_MODE if (ahd->black_hole != NULL) { xpt_free_path(ahd->black_hole->path); kfree(ahd->black_hole); } #endif kfree(ahd->name); kfree(ahd->seep_config); kfree(ahd->saved_stack); kfree(ahd); return; } static void ahd_shutdown(void *arg) { struct ahd_softc *ahd; ahd = (struct ahd_softc *)arg; /* * Stop periodic timer callbacks. */ del_timer_sync(&ahd->stat_timer); /* This will reset most registers to 0, but not all */ ahd_reset(ahd, /*reinit*/FALSE); } /* * Reset the controller and record some information about it * that is only available just after a reset. If "reinit" is * non-zero, this reset occurred after initial configuration * and the caller requests that the chip be fully reinitialized * to a runable state. Chip interrupts are *not* enabled after * a reinitialization. The caller must enable interrupts via * ahd_intr_enable(). */ int ahd_reset(struct ahd_softc *ahd, int reinit) { u_int sxfrctl1; int wait; uint32_t cmd; /* * Preserve the value of the SXFRCTL1 register for all channels. * It contains settings that affect termination and we don't want * to disturb the integrity of the bus. */ ahd_pause(ahd); ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); sxfrctl1 = ahd_inb(ahd, SXFRCTL1); cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { uint32_t mod_cmd; /* * A4 Razor #632 * During the assertion of CHIPRST, the chip * does not disable its parity logic prior to * the start of the reset. This may cause a * parity error to be detected and thus a * spurious SERR or PERR assertion. Disable * PERR and SERR responses during the CHIPRST. */ mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, mod_cmd, /*bytes*/2); } ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause); /* * Ensure that the reset has finished. We delay 1000us * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 1000; do { ahd_delay(1000); } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK)); if (wait == 0) { printk("%s: WARNING - Failed chip reset! " "Trying to initialize anyway.\n", ahd_name(ahd)); } ahd_outb(ahd, HCNTRL, ahd->pause); if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { /* * Clear any latched PCI error status and restore * previous SERR and PERR response enables. */ ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, 0xFF, /*bytes*/1); ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); } /* * Mode should be SCSI after a chip reset, but lets * set it just to be safe. We touch the MODE_PTR * register directly so as to bypass the lazy update * code in ahd_set_modes(). */ ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI)); /* * Restore SXFRCTL1. * * We must always initialize STPWEN to 1 before we * restore the saved values. STPWEN is initialized * to a tri-state condition which can only be cleared * by turning it on. */ ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); ahd_outb(ahd, SXFRCTL1, sxfrctl1); /* Determine chip configuration */ ahd->features &= ~AHD_WIDE; if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0) ahd->features |= AHD_WIDE; /* * If a recovery action has forced a chip reset, * re-initialize the chip to our liking. */ if (reinit != 0) ahd_chip_init(ahd); return (0); } /* * Determine the number of SCBs available on the controller */ static int ahd_probe_scbs(struct ahd_softc *ahd) { int i; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); for (i = 0; i < AHD_SCB_MAX; i++) { int j; ahd_set_scbptr(ahd, i); ahd_outw(ahd, SCB_BASE, i); for (j = 2; j < 64; j++) ahd_outb(ahd, SCB_BASE+j, 0); /* Start out life as unallocated (needing an abort) */ ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE); if (ahd_inw_scbram(ahd, SCB_BASE) != i) break; ahd_set_scbptr(ahd, 0); if (ahd_inw_scbram(ahd, SCB_BASE) != 0) break; } return (i); } static void ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { dma_addr_t *baddr; baddr = (dma_addr_t *)arg; *baddr = segs->ds_addr; } static void ahd_initialize_hscbs(struct ahd_softc *ahd) { int i; for (i = 0; i < ahd->scb_data.maxhscbs; i++) { ahd_set_scbptr(ahd, i); /* Clear the control byte. */ ahd_outb(ahd, SCB_CONTROL, 0); /* Set the next pointer */ ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL); } } static int ahd_init_scbdata(struct ahd_softc *ahd) { struct scb_data *scb_data; int i; scb_data = &ahd->scb_data; TAILQ_INIT(&scb_data->free_scbs); for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++) LIST_INIT(&scb_data->free_scb_lists[i]); LIST_INIT(&scb_data->any_dev_free_scb_list); SLIST_INIT(&scb_data->hscb_maps); SLIST_INIT(&scb_data->sg_maps); SLIST_INIT(&scb_data->sense_maps); /* Determine the number of hardware SCBs and initialize them */ scb_data->maxhscbs = ahd_probe_scbs(ahd); if (scb_data->maxhscbs == 0) { printk("%s: No SCB space found\n", ahd_name(ahd)); return (ENXIO); } ahd_initialize_hscbs(ahd); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for our hardware scb structures */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->hscb_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* DMA tag for our S/G structures. */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, ahd_sglist_allocsize(ahd), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sg_dmat) != 0) { goto error_exit; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MEMORY) != 0) printk("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd), ahd_sglist_allocsize(ahd)); #endif scb_data->init_level++; /* DMA tag for our sense buffers. We allocate in page sized chunks */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sense_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Perform initial CCB allocation */ ahd_alloc_scbs(ahd); if (scb_data->numscbs == 0) { printk("%s: ahd_init_scbdata - " "Unable to allocate initial scbs\n", ahd_name(ahd)); goto error_exit; } /* * Note that we were successful */ return (0); error_exit: return (ENOMEM); } static struct scb * ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag) { struct scb *scb; /* * Look on the pending list. */ LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { if (SCB_GET_TAG(scb) == tag) return (scb); } /* * Then on all of the collision free lists. */ TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { struct scb *list_scb; list_scb = scb; do { if (SCB_GET_TAG(list_scb) == tag) return (list_scb); list_scb = LIST_NEXT(list_scb, collision_links); } while (list_scb); } /* * And finally on the generic free list. */ LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { if (SCB_GET_TAG(scb) == tag) return (scb); } return (NULL); } static void ahd_fini_scbdata(struct ahd_softc *ahd) { struct scb_data *scb_data; scb_data = &ahd->scb_data; if (scb_data == NULL) return; switch (scb_data->init_level) { default: case 7: { struct map_node *sns_map; while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->sense_maps, links); ahd_dmamap_unload(ahd, scb_data->sense_dmat, sns_map->dmamap); ahd_dmamem_free(ahd, scb_data->sense_dmat, sns_map->vaddr, sns_map->dmamap); kfree(sns_map); } ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); } fallthrough; case 6: { struct map_node *sg_map; while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); ahd_dmamap_unload(ahd, scb_data->sg_dmat, sg_map->dmamap); ahd_dmamem_free(ahd, scb_data->sg_dmat, sg_map->vaddr, sg_map->dmamap); kfree(sg_map); } ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); } fallthrough; case 5: { struct map_node *hscb_map; while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links); ahd_dmamap_unload(ahd, scb_data->hscb_dmat, hscb_map->dmamap); ahd_dmamem_free(ahd, scb_data->hscb_dmat, hscb_map->vaddr, hscb_map->dmamap); kfree(hscb_map); } ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat); } fallthrough; case 4: case 3: case 2: case 1: case 0: break; } } /* * DSP filter Bypass must be enabled until the first selection * after a change in bus mode (Razor #491 and #493). */ static void ahd_setup_iocell_workaround(struct ahd_softc *ahd) { ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS); ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI)); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: Setting up iocell workaround\n", ahd_name(ahd)); #endif ahd_restore_modes(ahd, saved_modes); ahd->flags &= ~AHD_HAD_FIRST_SEL; } static void ahd_iocell_first_selection(struct ahd_softc *ahd) { ahd_mode_state saved_modes; u_int sblkctl; if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0) return; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); sblkctl = ahd_inb(ahd, SBLKCTL); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: iocell first selection\n", ahd_name(ahd)); #endif if ((sblkctl & ENAB40) != 0) { ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: BYPASS now disabled\n", ahd_name(ahd)); #endif } ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI)); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_restore_modes(ahd, saved_modes); ahd->flags |= AHD_HAD_FIRST_SEL; } /*************************** SCB Management ***********************************/ static void ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx) { struct scb_list *free_list; struct scb_tailq *free_tailq; struct scb *first_scb; scb->flags |= SCB_ON_COL_LIST; AHD_SET_SCB_COL_IDX(scb, col_idx); free_list = &ahd->scb_data.free_scb_lists[col_idx]; free_tailq = &ahd->scb_data.free_scbs; first_scb = LIST_FIRST(free_list); if (first_scb != NULL) { LIST_INSERT_AFTER(first_scb, scb, collision_links); } else { LIST_INSERT_HEAD(free_list, scb, collision_links); TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe); } } static void ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb) { struct scb_list *free_list; struct scb_tailq *free_tailq; struct scb *first_scb; u_int col_idx; scb->flags &= ~SCB_ON_COL_LIST; col_idx = AHD_GET_SCB_COL_IDX(ahd, scb); free_list = &ahd->scb_data.free_scb_lists[col_idx]; free_tailq = &ahd->scb_data.free_scbs; first_scb = LIST_FIRST(free_list); if (first_scb == scb) { struct scb *next_scb; /* * Maintain order in the collision free * lists for fairness if this device has * other colliding tags active. */ next_scb = LIST_NEXT(scb, collision_links); if (next_scb != NULL) { TAILQ_INSERT_AFTER(free_tailq, scb, next_scb, links.tqe); } TAILQ_REMOVE(free_tailq, scb, links.tqe); } LIST_REMOVE(scb, collision_links); } /* * Get a free scb. If there are none, see if we can allocate a new SCB. */ struct scb * ahd_get_scb(struct ahd_softc *ahd, u_int col_idx) { struct scb *scb; int tries; tries = 0; look_again: TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) { ahd_rem_col_list(ahd, scb); goto found; } } if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) { if (tries++ != 0) return (NULL); ahd_alloc_scbs(ahd); goto look_again; } LIST_REMOVE(scb, links.le); if (col_idx != AHD_NEVER_COL_IDX && (scb->col_scb != NULL) && (scb->col_scb->flags & SCB_ACTIVE) == 0) { LIST_REMOVE(scb->col_scb, links.le); ahd_add_col_list(ahd, scb->col_scb, col_idx); } found: scb->flags |= SCB_ACTIVE; return (scb); } /* * Return an SCB resource to the free list. */ void ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) { /* Clean up for the next user */ scb->flags = SCB_FLAG_NONE; scb->hscb->control = 0; ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL; if (scb->col_scb == NULL) { /* * No collision possible. Just free normally. */ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) { /* * The SCB we might have collided with is on * a free collision list. Put both SCBs on * the generic list. */ ahd_rem_col_list(ahd, scb->col_scb); LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb->col_scb, links.le); } else if ((scb->col_scb->flags & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE && (scb->col_scb->hscb->control & TAG_ENB) != 0) { /* * The SCB we might collide with on the next allocation * is still active in a non-packetized, tagged, context. * Put us on the SCB collision list. */ ahd_add_col_list(ahd, scb, AHD_GET_SCB_COL_IDX(ahd, scb->col_scb)); } else { /* * The SCB we might collide with on the next allocation * is either active in a packetized context, or free. * Since we can't collide, put this SCB on the generic * free list. */ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); } ahd_platform_scb_free(ahd, scb); } static void ahd_alloc_scbs(struct ahd_softc *ahd) { struct scb_data *scb_data; struct scb *next_scb; struct hardware_scb *hscb; struct map_node *hscb_map; struct map_node *sg_map; struct map_node *sense_map; uint8_t *segs; uint8_t *sense_data; dma_addr_t hscb_busaddr; dma_addr_t sg_busaddr; dma_addr_t sense_busaddr; int newcount; int i; scb_data = &ahd->scb_data; if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC) /* Can't allocate any more */ return; if (scb_data->scbs_left != 0) { int offset; offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left; hscb_map = SLIST_FIRST(&scb_data->hscb_maps); hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb)); } else { hscb_map = kmalloc(sizeof(*hscb_map), GFP_ATOMIC); if (hscb_map == NULL) return; /* Allocate the next batch of hardware SCBs */ if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat, (void **)&hscb_map->vaddr, BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) { kfree(hscb_map); return; } SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links); ahd_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap, hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, &hscb_map->physaddr, /*flags*/0); hscb = (struct hardware_scb *)hscb_map->vaddr; hscb_busaddr = hscb_map->physaddr; scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb); } if (scb_data->sgs_left != 0) { int offset; offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd)) - scb_data->sgs_left) * ahd_sglist_size(ahd); sg_map = SLIST_FIRST(&scb_data->sg_maps); segs = sg_map->vaddr + offset; sg_busaddr = sg_map->physaddr + offset; } else { sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC); if (sg_map == NULL) return; /* Allocate the next batch of S/G lists */ if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat, (void **)&sg_map->vaddr, BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) { kfree(sg_map); return; } SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); ahd_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap, sg_map->vaddr, ahd_sglist_allocsize(ahd), ahd_dmamap_cb, &sg_map->physaddr, /*flags*/0); segs = sg_map->vaddr; sg_busaddr = sg_map->physaddr; scb_data->sgs_left = ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_MEMORY) printk("Mapped SG data\n"); #endif } if (scb_data->sense_left != 0) { int offset; offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left); sense_map = SLIST_FIRST(&scb_data->sense_maps); sense_data = sense_map->vaddr + offset; sense_busaddr = sense_map->physaddr + offset; } else { sense_map = kmalloc(sizeof(*sense_map), GFP_ATOMIC); if (sense_map == NULL) return; /* Allocate the next batch of sense buffers */ if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat, (void **)&sense_map->vaddr, BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) { kfree(sense_map); return; } SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links); ahd_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap, sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, &sense_map->physaddr, /*flags*/0); sense_data = sense_map->vaddr; sense_busaddr = sense_map->physaddr; scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE; #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_MEMORY) printk("Mapped sense data\n"); #endif } newcount = min(scb_data->sense_left, scb_data->scbs_left); newcount = min(newcount, scb_data->sgs_left); newcount = min(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs)); for (i = 0; i < newcount; i++) { struct scb_platform_data *pdata; u_int col_tag; next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC); if (next_scb == NULL) break; pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); if (pdata == NULL) { kfree(next_scb); break; } next_scb->platform_data = pdata; next_scb->hscb_map = hscb_map; next_scb->sg_map = sg_map; next_scb->sense_map = sense_map; next_scb->sg_list = segs; next_scb->sense_data = sense_data; next_scb->sense_busaddr = sense_busaddr; memset(hscb, 0, sizeof(*hscb)); next_scb->hscb = hscb; hscb->hscb_busaddr = ahd_htole32(hscb_busaddr); /* * The sequencer always starts with the second entry. * The first entry is embedded in the scb. */ next_scb->sg_list_busaddr = sg_busaddr; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) next_scb->sg_list_busaddr += sizeof(struct ahd_dma64_seg); else next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); next_scb->ahd_softc = ahd; next_scb->flags = SCB_FLAG_NONE; next_scb->hscb->tag = ahd_htole16(scb_data->numscbs); col_tag = scb_data->numscbs ^ 0x100; next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag); if (next_scb->col_scb != NULL) next_scb->col_scb->col_scb = next_scb; ahd_free_scb(ahd, next_scb); hscb++; hscb_busaddr += sizeof(*hscb); segs += ahd_sglist_size(ahd); sg_busaddr += ahd_sglist_size(ahd); sense_data += AHD_SENSE_BUFSIZE; sense_busaddr += AHD_SENSE_BUFSIZE; scb_data->numscbs++; scb_data->sense_left--; scb_data->scbs_left--; scb_data->sgs_left--; } } void ahd_controller_info(struct ahd_softc *ahd, char *buf) { const char *speed; const char *type; int len; len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]); buf += len; speed = "Ultra320 "; if ((ahd->features & AHD_WIDE) != 0) { type = "Wide "; } else { type = "Single "; } len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ", speed, type, ahd->channel, ahd->our_id); buf += len; sprintf(buf, "%s, %d SCBs", ahd->bus_description, ahd->scb_data.maxhscbs); } static const char *channel_strings[] = { "Primary Low", "Primary High", "Secondary Low", "Secondary High" }; static const char *termstat_strings[] = { "Terminated Correctly", "Over Terminated", "Under Terminated", "Not Configured" }; /***************************** Timer Facilities *******************************/ static void ahd_timer_reset(struct timer_list *timer, int usec) { del_timer(timer); timer->expires = jiffies + (usec * HZ)/1000000; add_timer(timer); } /* * Start the board, ready for normal operation */ int ahd_init(struct ahd_softc *ahd) { uint8_t *next_vaddr; dma_addr_t next_baddr; size_t driver_data_size; int i; int error; u_int warn_user; uint8_t current_sensing; uint8_t fstat; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd->stack_size = ahd_probe_stack_size(ahd); ahd->saved_stack = kmalloc_array(ahd->stack_size, sizeof(uint16_t), GFP_ATOMIC); if (ahd->saved_stack == NULL) return (ENOMEM); /* * Verify that the compiler hasn't over-aggressively * padded important structures. */ if (sizeof(struct hardware_scb) != 64) panic("Hardware SCB size is incorrect"); #ifdef AHD_DEBUG if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0) ahd->flags |= AHD_SEQUENCER_DEBUG; #endif /* * Default to allowing initiator operations. */ ahd->flags |= AHD_INITIATORROLE; /* * Only allow target mode features if this unit has them enabled. */ if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) ahd->features &= ~AHD_TARGETMODE; ahd->init_level++; /* * DMA tag for our command fifos and other data in system memory * the card's sequencer must be able to access. For initiator * roles, we need to allocate space for the qoutfifo. When providing * for the target mode role, we must additionally provide space for * the incoming target command fifo. */ driver_data_size = AHD_SCB_MAX * sizeof(*ahd->qoutfifo) + sizeof(struct hardware_scb); if ((ahd->features & AHD_TARGETMODE) != 0) driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd); if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) driver_data_size += PKT_OVERRUN_BUFSIZE; if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, driver_data_size, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahd->shared_data_dmat) != 0) { return (ENOMEM); } ahd->init_level++; /* Allocation of driver data */ if (ahd_dmamem_alloc(ahd, ahd->shared_data_dmat, (void **)&ahd->shared_data_map.vaddr, BUS_DMA_NOWAIT, &ahd->shared_data_map.dmamap) != 0) { return (ENOMEM); } ahd->init_level++; /* And permanently map it in */ ahd_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd->shared_data_map.vaddr, driver_data_size, ahd_dmamap_cb, &ahd->shared_data_map.physaddr, /*flags*/0); ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr; next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE]; next_baddr = ahd->shared_data_map.physaddr + AHD_QOUT_SIZE*sizeof(struct ahd_completion); if ((ahd->features & AHD_TARGETMODE) != 0) { ahd->targetcmds = (struct target_cmd *)next_vaddr; next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); } if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { ahd->overrun_buf = next_vaddr; next_vaddr += PKT_OVERRUN_BUFSIZE; next_baddr += PKT_OVERRUN_BUFSIZE; } /* * We need one SCB to serve as the "next SCB". Since the * tag identifier in this SCB will never be used, there is * no point in using a valid HSCB tag from an SCB pulled from * the standard free pool. So, we allocate this "sentinel" * specially from the DMA safe memory chunk used for the QOUTFIFO. */ ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr; ahd->next_queued_hscb_map = &ahd->shared_data_map; ahd->next_queued_hscb->hscb_busaddr = ahd_htole32(next_baddr); ahd->init_level++; /* Allocate SCB data now that buffer_dmat is initialized */ if (ahd_init_scbdata(ahd) != 0) return (ENOMEM); if ((ahd->flags & AHD_INITIATORROLE) == 0) ahd->flags &= ~AHD_RESET_BUS_A; /* * Before committing these settings to the chip, give * the OSM one last chance to modify our configuration. */ ahd_platform_init(ahd); /* Bring up the chip. */ ahd_chip_init(ahd); AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if ((ahd->flags & AHD_CURRENT_SENSING) == 0) goto init_done; /* * Verify termination based on current draw and * warn user if the bus is over/under terminated. */ error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, CURSENSE_ENB); if (error != 0) { printk("%s: current sensing timeout 1\n", ahd_name(ahd)); goto init_done; } for (i = 20, fstat = FLX_FSTAT_BUSY; (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) { error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat); if (error != 0) { printk("%s: current sensing timeout 2\n", ahd_name(ahd)); goto init_done; } } if (i == 0) { printk("%s: Timedout during current-sensing test\n", ahd_name(ahd)); goto init_done; } /* Latch Current Sensing status. */ error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, &current_sensing); if (error != 0) { printk("%s: current sensing timeout 3\n", ahd_name(ahd)); goto init_done; } /* Diable current sensing. */ ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) { printk("%s: current_sensing == 0x%x\n", ahd_name(ahd), current_sensing); } #endif warn_user = 0; for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) { u_int term_stat; term_stat = (current_sensing & FLX_CSTAT_MASK); switch (term_stat) { case FLX_CSTAT_OVER: case FLX_CSTAT_UNDER: warn_user++; fallthrough; case FLX_CSTAT_INVALID: case FLX_CSTAT_OKAY: if (warn_user == 0 && bootverbose == 0) break; printk("%s: %s Channel %s\n", ahd_name(ahd), channel_strings[i], termstat_strings[term_stat]); break; } } if (warn_user) { printk("%s: WARNING. Termination is not configured correctly.\n" "%s: WARNING. SCSI bus operations may FAIL.\n", ahd_name(ahd), ahd_name(ahd)); } init_done: ahd_restart(ahd); ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US); return (0); } /* * (Re)initialize chip state after a chip reset. */ static void ahd_chip_init(struct ahd_softc *ahd) { uint32_t busaddr; u_int sxfrctl1; u_int scsiseq_template; u_int wait; u_int i; u_int target; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* * Take the LED out of diagnostic mode */ ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON)); /* * Return HS_MAILBOX to its default value. */ ahd->hs_mailbox = 0; ahd_outb(ahd, HS_MAILBOX, 0); /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */ ahd_outb(ahd, IOWNID, ahd->our_id); ahd_outb(ahd, TOWNID, ahd->our_id); sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0; sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0; if ((ahd->bugs & AHD_LONG_SETIMO_BUG) && (ahd->seltime != STIMESEL_MIN)) { /* * The selection timer duration is twice as long * as it should be. Halve it by adding "1" to * the user specified setting. */ sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ; } else { sxfrctl1 |= ahd->seltime; } ahd_outb(ahd, SXFRCTL0, DFON); ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN); ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); /* * Now that termination is set, wait for up * to 500ms for our transceivers to settle. If * the adapter does not have a cable attached, * the transceivers may never settle, so don't * complain if we fail here. */ for (wait = 10000; (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; wait--) ahd_delay(100); /* Clear any false bus resets due to the transceivers settling */ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); ahd_outb(ahd, CLRINT, CLRSCSIINT); /* Initialize mode specific S/G state. */ for (i = 0; i < 2; i++) { ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SG_STATE, 0); ahd_outb(ahd, CLRSEQINTSRC, 0xFF); ahd_outb(ahd, SEQIMODE, ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD); } ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN); ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75); ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN); ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR); if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE); } else { ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE); } ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS); if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX) /* * Do not issue a target abort when a split completion * error occurs. Let our PCIX interrupt handler deal * with it instead. H2A4 Razor #625 */ ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS); if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0) ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER); /* * Tweak IOCELL settings. */ if ((ahd->flags & AHD_HP_BOARD) != 0) { for (i = 0; i < NUMDSPS; i++) { ahd_outb(ahd, DSPSELECT, i); ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT); } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd), WRTBIASCTL_HP_DEFAULT); #endif } ahd_setup_iocell_workaround(ahd); /* * Enable LQI Manager interrupts. */ ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI | ENLQIOVERI_LQ|ENLQIOVERI_NLQ); ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC); /* * We choose to have the sequencer catch LQOPHCHGINPKT errors * manually for the command phase at the start of a packetized * selection case. ENLQOBUSFREE should be made redundant by * the BUSFREE interrupt, but it seems that some LQOBUSFREE * events fail to assert the BUSFREE interrupt so we must * also enable LQOBUSFREE interrupts. */ ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE); /* * Setup sequencer interrupt handlers. */ ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr)); ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr)); /* * Setup SCB Offset registers. */ if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, pkt_long_lun)); } else { ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun)); } ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len)); ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute)); ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management)); ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb, shared_data.idata.cdb)); ahd_outb(ahd, QNEXTPTR, offsetof(struct hardware_scb, next_hscb_busaddr)); ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET); ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control)); if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { ahd_outb(ahd, LUNLEN, sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1); } else { ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN); } ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1); ahd_outb(ahd, MAXCMD, 0xFF); ahd_outb(ahd, SCBAUTOPTR, AUSCBPTR_EN | offsetof(struct hardware_scb, tag)); /* We haven't been enabled for target mode yet. */ ahd_outb(ahd, MULTARGID, 0); ahd_outb(ahd, MULTARGID + 1, 0); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* Initialize the negotiation table. */ if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) { /* * Clear the spare bytes in the neg table to avoid * spurious parity errors. */ for (target = 0; target < AHD_NUM_TARGETS; target++) { ahd_outb(ahd, NEGOADDR, target); ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0); for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++) ahd_outb(ahd, ANNEXDAT, 0); } } for (target = 0; target < AHD_NUM_TARGETS; target++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, target, &tstate); ahd_compile_devinfo(&devinfo, ahd->our_id, target, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); } ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR); ahd_outb(ahd, CLRINT, CLRSCSIINT); #ifdef NEEDS_MORE_TESTING /* * Always enable abort on incoming L_Qs if this feature is * supported. We use this to catch invalid SCB references. */ if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0) ahd_outb(ahd, LQCTL1, ABORTPENDING); else #endif ahd_outb(ahd, LQCTL1, 0); /* All of our queues are empty */ ahd->qoutfifonext = 0; ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID; ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID); for (i = 0; i < AHD_QOUT_SIZE; i++) ahd->qoutfifo[i].valid_tag = 0; ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD); ahd->qinfifonext = 0; for (i = 0; i < AHD_QIN_SIZE; i++) ahd->qinfifo[i] = SCB_LIST_NULL; if ((ahd->features & AHD_TARGETMODE) != 0) { /* All target command blocks start out invalid. */ for (i = 0; i < AHD_TMODE_CMDS; i++) ahd->targetcmds[i].cmd_valid = 0; ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD); ahd->tqinfifonext = 1; ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1); ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); } /* Initialize Scratch Ram. */ ahd_outb(ahd, SEQ_FLAGS, 0); ahd_outb(ahd, SEQ_FLAGS2, 0); /* We don't have any waiting selections */ ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL); ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL); ahd_outw(ahd, MK_MESSAGE_SCB, SCB_LIST_NULL); ahd_outw(ahd, MK_MESSAGE_SCSIID, 0xFF); for (i = 0; i < AHD_NUM_TARGETS; i++) ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL); /* * Nobody is waiting to be DMAed into the QOUTFIFO. */ ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); /* * The Freeze Count is 0. */ ahd->qfreeze_cnt = 0; ahd_outw(ahd, QFREEZE_COUNT, 0); ahd_outw(ahd, KERNEL_QFREEZE_COUNT, 0); /* * Tell the sequencer where it can find our arrays in memory. */ busaddr = ahd->shared_data_map.physaddr; ahd_outl(ahd, SHARED_DATA_ADDR, busaddr); ahd_outl(ahd, QOUTFIFO_NEXT_ADDR, busaddr); /* * Setup the allowed SCSI Sequences based on operational mode. * If we are a target, we'll enable select in operations once * we've had a lun enabled. */ scsiseq_template = ENAUTOATNP; if ((ahd->flags & AHD_INITIATORROLE) != 0) scsiseq_template |= ENRSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template); /* There are no busy SCBs yet. */ for (target = 0; target < AHD_NUM_TARGETS; target++) { int lun; for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++) ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun)); } /* * Initialize the group code to command length table. * Vendor Unique codes are set to 0 so we only capture * the first byte of the cdb. These can be overridden * when target mode is enabled. */ ahd_outb(ahd, CMDSIZE_TABLE, 5); ahd_outb(ahd, CMDSIZE_TABLE + 1, 9); ahd_outb(ahd, CMDSIZE_TABLE + 2, 9); ahd_outb(ahd, CMDSIZE_TABLE + 3, 0); ahd_outb(ahd, CMDSIZE_TABLE + 4, 15); ahd_outb(ahd, CMDSIZE_TABLE + 5, 11); ahd_outb(ahd, CMDSIZE_TABLE + 6, 0); ahd_outb(ahd, CMDSIZE_TABLE + 7, 0); /* Tell the sequencer of our initial queue positions */ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512); ahd->qinfifonext = 0; ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); ahd_set_hescb_qoff(ahd, 0); ahd_set_snscb_qoff(ahd, 0); ahd_set_sescb_qoff(ahd, 0); ahd_set_sdscb_qoff(ahd, 0); /* * Tell the sequencer which SCB will be the next one it receives. */ busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); /* * Default to coalescing disabled. */ ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0); ahd_outw(ahd, CMDS_PENDING, 0); ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer, ahd->int_coalescing_maxcmds, ahd->int_coalescing_mincmds); ahd_enable_coalescing(ahd, FALSE); ahd_loadseq(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if (ahd->features & AHD_AIC79XXB_SLOWCRC) { u_int negodat3 = ahd_inb(ahd, NEGCONOPTS); negodat3 |= ENSLOWCRC; ahd_outb(ahd, NEGCONOPTS, negodat3); negodat3 = ahd_inb(ahd, NEGCONOPTS); if (!(negodat3 & ENSLOWCRC)) printk("aic79xx: failed to set the SLOWCRC bit\n"); else printk("aic79xx: SLOWCRC bit set\n"); } } /* * Setup default device and controller settings. * This should only be called if our probe has * determined that no configuration data is available. */ int ahd_default_config(struct ahd_softc *ahd) { int targ; ahd->our_id = 7; /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { printk("%s: unable to allocate ahd_tmode_tstate. " "Failing attach\n", ahd_name(ahd)); return (ENOMEM); } for (targ = 0; targ < AHD_NUM_TARGETS; targ++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; uint16_t target_mask; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, targ, &tstate); /* * We support SPC2 and SPI4. */ tinfo->user.protocol_version = 4; tinfo->user.transport_version = 4; target_mask = 0x01 << targ; ahd->user_discenable |= target_mask; tstate->discenable |= target_mask; ahd->user_tagenable |= target_mask; #ifdef AHD_FORCE_160 tinfo->user.period = AHD_SYNCRATE_DT; #else tinfo->user.period = AHD_SYNCRATE_160; #endif tinfo->user.offset = MAX_OFFSET; tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM | MSG_EXT_PPR_WR_FLOW | MSG_EXT_PPR_HOLD_MCS | MSG_EXT_PPR_IU_REQ | MSG_EXT_PPR_QAS_REQ | MSG_EXT_PPR_DT_REQ; if ((ahd->features & AHD_RTI) != 0) tinfo->user.ppr_options |= MSG_EXT_PPR_RTI; tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; /* * Start out Async/Narrow/Untagged and with * conservative protocol support. */ tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; ahd_compile_devinfo(&devinfo, ahd->our_id, targ, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); tstate->tagenable &= ~target_mask; ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); } return (0); } /* * Parse device configuration information. */ int ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc) { int targ; int max_targ; max_targ = sc->max_targets & CFMAXTARG; ahd->our_id = sc->brtime_id & CFSCSIID; /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { printk("%s: unable to allocate ahd_tmode_tstate. " "Failing attach\n", ahd_name(ahd)); return (ENOMEM); } for (targ = 0; targ < max_targ; targ++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_transinfo *user_tinfo; struct ahd_tmode_tstate *tstate; uint16_t target_mask; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, targ, &tstate); user_tinfo = &tinfo->user; /* * We support SPC2 and SPI4. */ tinfo->user.protocol_version = 4; tinfo->user.transport_version = 4; target_mask = 0x01 << targ; ahd->user_discenable &= ~target_mask; tstate->discenable &= ~target_mask; ahd->user_tagenable &= ~target_mask; if (sc->device_flags[targ] & CFDISC) { tstate->discenable |= target_mask; ahd->user_discenable |= target_mask; ahd->user_tagenable |= target_mask; } else { /* * Cannot be packetized without disconnection. */ sc->device_flags[targ] &= ~CFPACKETIZED; } user_tinfo->ppr_options = 0; user_tinfo->period = (sc->device_flags[targ] & CFXFER); if (user_tinfo->period < CFXFER_ASYNC) { if (user_tinfo->period <= AHD_PERIOD_10MHz) user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ; user_tinfo->offset = MAX_OFFSET; } else { user_tinfo->offset = 0; user_tinfo->period = AHD_ASYNC_XFER_PERIOD; } #ifdef AHD_FORCE_160 if (user_tinfo->period <= AHD_SYNCRATE_160) user_tinfo->period = AHD_SYNCRATE_DT; #endif if ((sc->device_flags[targ] & CFPACKETIZED) != 0) { user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM | MSG_EXT_PPR_WR_FLOW | MSG_EXT_PPR_HOLD_MCS | MSG_EXT_PPR_IU_REQ; if ((ahd->features & AHD_RTI) != 0) user_tinfo->ppr_options |= MSG_EXT_PPR_RTI; } if ((sc->device_flags[targ] & CFQAS) != 0) user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ; if ((sc->device_flags[targ] & CFWIDEB) != 0) user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT; else user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width, user_tinfo->period, user_tinfo->offset, user_tinfo->ppr_options); #endif /* * Start out Async/Narrow/Untagged and with * conservative protocol support. */ tstate->tagenable &= ~target_mask; tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; ahd_compile_devinfo(&devinfo, ahd->our_id, targ, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); } ahd->flags &= ~AHD_SPCHK_ENB_A; if (sc->bios_control & CFSPARITY) ahd->flags |= AHD_SPCHK_ENB_A; ahd->flags &= ~AHD_RESET_BUS_A; if (sc->bios_control & CFRESETB) ahd->flags |= AHD_RESET_BUS_A; ahd->flags &= ~AHD_EXTENDED_TRANS_A; if (sc->bios_control & CFEXTEND) ahd->flags |= AHD_EXTENDED_TRANS_A; ahd->flags &= ~AHD_BIOS_ENABLED; if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED) ahd->flags |= AHD_BIOS_ENABLED; ahd->flags &= ~AHD_STPWLEVEL_A; if ((sc->adapter_control & CFSTPWLEVEL) != 0) ahd->flags |= AHD_STPWLEVEL_A; return (0); } /* * Parse device configuration information. */ int ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd) { int error; error = ahd_verify_vpd_cksum(vpd); if (error == 0) return (EINVAL); if ((vpd->bios_flags & VPDBOOTHOST) != 0) ahd->flags |= AHD_BOOT_CHANNEL; return (0); } void ahd_intr_enable(struct ahd_softc *ahd, int enable) { u_int hcntrl; hcntrl = ahd_inb(ahd, HCNTRL); hcntrl &= ~INTEN; ahd->pause &= ~INTEN; ahd->unpause &= ~INTEN; if (enable) { hcntrl |= INTEN; ahd->pause |= INTEN; ahd->unpause |= INTEN; } ahd_outb(ahd, HCNTRL, hcntrl); } static void ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, u_int mincmds) { if (timer > AHD_TIMER_MAX_US) timer = AHD_TIMER_MAX_US; ahd->int_coalescing_timer = timer; if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX) maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX; if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX) mincmds = AHD_INT_COALESCING_MINCMDS_MAX; ahd->int_coalescing_maxcmds = maxcmds; ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK); ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds); ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds); } static void ahd_enable_coalescing(struct ahd_softc *ahd, int enable) { ahd->hs_mailbox &= ~ENINT_COALESCE; if (enable) ahd->hs_mailbox |= ENINT_COALESCE; ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox); ahd_flush_device_writes(ahd); ahd_run_qoutfifo(ahd); } /* * Ensure that the card is paused in a location * outside of all critical sections and that all * pending work is completed prior to returning. * This routine should only be called from outside * an interrupt context. */ void ahd_pause_and_flushwork(struct ahd_softc *ahd) { u_int intstat; u_int maxloops; maxloops = 1000; ahd->flags |= AHD_ALL_INTERRUPTS; ahd_pause(ahd); /* * Freeze the outgoing selections. We do this only * until we are safely paused without further selections * pending. */ ahd->qfreeze_cnt--; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN); do { ahd_unpause(ahd); /* * Give the sequencer some time to service * any active selections. */ ahd_delay(500); ahd_intr(ahd); ahd_pause(ahd); intstat = ahd_inb(ahd, INTSTAT); if ((intstat & INT_PEND) == 0) { ahd_clear_critical_section(ahd); intstat = ahd_inb(ahd, INTSTAT); } } while (--maxloops && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0) && ((intstat & INT_PEND) != 0 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)); if (maxloops == 0) { printk("Infinite interrupt loop, INTSTAT = %x", ahd_inb(ahd, INTSTAT)); } ahd->qfreeze_cnt++; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); ahd_flush_qoutfifo(ahd); ahd->flags &= ~AHD_ALL_INTERRUPTS; } int __maybe_unused ahd_suspend(struct ahd_softc *ahd) { ahd_pause_and_flushwork(ahd); if (LIST_FIRST(&ahd->pending_scbs) != NULL) { ahd_unpause(ahd); return (EBUSY); } ahd_shutdown(ahd); return (0); } void __maybe_unused ahd_resume(struct ahd_softc *ahd) { ahd_reset(ahd, /*reinit*/TRUE); ahd_intr_enable(ahd, TRUE); ahd_restart(ahd); } /************************** Busy Target Table *********************************/ /* * Set SCBPTR to the SCB that contains the busy * table entry for TCL. Return the offset into * the SCB that contains the entry for TCL. * saved_scbid is dereferenced and set to the * scbid that should be restored once manipualtion * of the TCL entry is complete. */ static inline u_int ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl) { /* * Index to the SCB that contains the busy entry. */ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); *saved_scbid = ahd_get_scbptr(ahd); ahd_set_scbptr(ahd, TCL_LUN(tcl) | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4)); /* * And now calculate the SCB offset to the entry. * Each entry is 2 bytes wide, hence the * multiplication by 2. */ return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS); } /* * Return the untagged transaction id for a given target/channel lun. */ static u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl) { u_int scbid; u_int scb_offset; u_int saved_scbptr; scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); scbid = ahd_inw_scbram(ahd, scb_offset); ahd_set_scbptr(ahd, saved_scbptr); return (scbid); } static void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid) { u_int scb_offset; u_int saved_scbptr; scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); ahd_outw(ahd, scb_offset, scbid); ahd_set_scbptr(ahd, saved_scbptr); } /************************** SCB and SCB queue management **********************/ static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role) { int targ = SCB_GET_TARGET(ahd, scb); char chan = SCB_GET_CHANNEL(ahd, scb); int slun = SCB_GET_LUN(scb); int match; match = ((chan == channel) || (channel == ALL_CHANNELS)); if (match != 0) match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); if (match != 0) match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); if (match != 0) { #ifdef AHD_TARGET_MODE int group; group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); if (role == ROLE_INITIATOR) { match = (group != XPT_FC_GROUP_TMODE) && ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); } else if (role == ROLE_TARGET) { match = (group == XPT_FC_GROUP_TMODE) && ((tag == scb->io_ctx->csio.tag_id) || (tag == SCB_LIST_NULL)); } #else /* !AHD_TARGET_MODE */ match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); #endif /* AHD_TARGET_MODE */ } return match; } static void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb) { int target; char channel; int lun; target = SCB_GET_TARGET(ahd, scb); lun = SCB_GET_LUN(scb); channel = SCB_GET_CHANNEL(ahd, scb); ahd_search_qinfifo(ahd, target, channel, lun, /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahd_platform_freeze_devq(ahd, scb); } void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb) { struct scb *prev_scb; ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); prev_scb = NULL; if (ahd_qinfifo_count(ahd) != 0) { u_int prev_tag; u_int prev_pos; prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1); prev_tag = ahd->qinfifo[prev_pos]; prev_scb = ahd_lookup_scb(ahd, prev_tag); } ahd_qinfifo_requeue(ahd, prev_scb, scb); ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); ahd_restore_modes(ahd, saved_modes); } static void ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, struct scb *scb) { if (prev_scb == NULL) { uint32_t busaddr; busaddr = ahd_le32toh(scb->hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); } else { prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; ahd_sync_scb(ahd, prev_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); ahd->qinfifonext++; scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr; ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } static int ahd_qinfifo_count(struct ahd_softc *ahd) { u_int qinpos; u_int wrap_qinpos; u_int wrap_qinfifonext; AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); qinpos = ahd_get_snscb_qoff(ahd); wrap_qinpos = AHD_QIN_WRAP(qinpos); wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext); if (wrap_qinfifonext >= wrap_qinpos) return (wrap_qinfifonext - wrap_qinpos); else return (wrap_qinfifonext + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos); } static void ahd_reset_cmds_pending(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int pending_cmds; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Don't count any commands as outstanding that the * sequencer has already marked for completion. */ ahd_flush_qoutfifo(ahd); pending_cmds = 0; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { pending_cmds++; } ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd)); ahd_restore_modes(ahd, saved_modes); ahd->flags &= ~AHD_UPDATE_PEND_CMDS; } static void ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) { cam_status ostat; cam_status cstat; ostat = ahd_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahd_set_transaction_status(scb, status); cstat = ahd_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahd_freeze_scb(scb); ahd_done(ahd, scb); } int ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action) { struct scb *scb; struct scb *mk_msg_scb; struct scb *prev_scb; ahd_mode_state saved_modes; u_int qinstart; u_int qinpos; u_int qintail; u_int tid_next; u_int tid_prev; u_int scbid; u_int seq_flags2; u_int savedscbptr; uint32_t busaddr; int found; int targets; /* Must be in CCHAN mode */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Halt any pending SCB DMA. The sequencer will reinitiate * this dma if the qinfifo is not empty once we unpause. */ if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR)) == (CCARREN|CCSCBEN|CCSCBDIR)) { ahd_outb(ahd, CCSCBCTL, ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN)); while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0) ; } /* Determine sequencer's position in the qinfifo. */ qintail = AHD_QIN_WRAP(ahd->qinfifonext); qinstart = ahd_get_snscb_qoff(ahd); qinpos = AHD_QIN_WRAP(qinstart); found = 0; prev_scb = NULL; if (action == SEARCH_PRINT) { printk("qinstart = %d qinfifonext = %d\nQINFIFO:", qinstart, ahd->qinfifonext); } /* * Start with an empty queue. Entries that are not chosen * for removal will be re-added to the queue as we go. */ ahd->qinfifonext = qinstart; busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); while (qinpos != qintail) { scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]); if (scb == NULL) { printk("qinpos = %d, SCB index = %d\n", qinpos, ahd->qinfifo[qinpos]); panic("Loop 1\n"); } if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in qinfifo\n"); ahd_done_with_status(ahd, scb, status); fallthrough; case SEARCH_REMOVE: break; case SEARCH_PRINT: printk(" 0x%x", ahd->qinfifo[qinpos]); fallthrough; case SEARCH_COUNT: ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; break; } } else { ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; } qinpos = AHD_QIN_WRAP(qinpos+1); } ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); if (action == SEARCH_PRINT) printk("\nWAITING_TID_QUEUES:\n"); /* * Search waiting for selection lists. We traverse the * list of "their ids" waiting for selection and, if * appropriate, traverse the SCBs of each "their id" * looking for matches. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); seq_flags2 = ahd_inb(ahd, SEQ_FLAGS2); if ((seq_flags2 & PENDING_MK_MESSAGE) != 0) { scbid = ahd_inw(ahd, MK_MESSAGE_SCB); mk_msg_scb = ahd_lookup_scb(ahd, scbid); } else mk_msg_scb = NULL; savedscbptr = ahd_get_scbptr(ahd); tid_next = ahd_inw(ahd, WAITING_TID_HEAD); tid_prev = SCB_LIST_NULL; targets = 0; for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) { u_int tid_head; u_int tid_tail; targets++; if (targets > AHD_NUM_TARGETS) panic("TID LIST LOOP"); if (scbid >= ahd->scb_data.numscbs) { printk("%s: Waiting TID List inconsistency. " "SCB index == 0x%x, yet numscbs == 0x%x.", ahd_name(ahd), scbid, ahd->scb_data.numscbs); ahd_dump_card_state(ahd); panic("for safety"); } scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: SCB = 0x%x Not Active!\n", ahd_name(ahd), scbid); panic("Waiting TID List traversal\n"); } ahd_set_scbptr(ahd, scbid); tid_next = ahd_inw_scbram(ahd, SCB_NEXT2); if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN) == 0) { tid_prev = scbid; continue; } /* * We found a list of scbs that needs to be searched. */ if (action == SEARCH_PRINT) printk(" %d ( ", SCB_GET_TARGET(ahd, scb)); tid_head = scbid; found += ahd_search_scb_list(ahd, target, channel, lun, tag, role, status, action, &tid_head, &tid_tail, SCB_GET_TARGET(ahd, scb)); /* * Check any MK_MESSAGE SCB that is still waiting to * enter this target's waiting for selection queue. */ if (mk_msg_scb != NULL && ahd_match_scb(ahd, mk_msg_scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: if ((mk_msg_scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB pending MK_MSG\n"); ahd_done_with_status(ahd, mk_msg_scb, status); fallthrough; case SEARCH_REMOVE: { u_int tail_offset; printk("Removing MK_MSG scb\n"); /* * Reset our tail to the tail of the * main per-target list. */ tail_offset = WAITING_SCB_TAILS + (2 * SCB_GET_TARGET(ahd, mk_msg_scb)); ahd_outw(ahd, tail_offset, tid_tail); seq_flags2 &= ~PENDING_MK_MESSAGE; ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING)-1); mk_msg_scb = NULL; break; } case SEARCH_PRINT: printk(" 0x%x", SCB_GET_TAG(scb)); fallthrough; case SEARCH_COUNT: break; } } if (mk_msg_scb != NULL && SCBID_IS_NULL(tid_head) && ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN)) { /* * When removing the last SCB for a target * queue with a pending MK_MESSAGE scb, we * must queue the MK_MESSAGE scb. */ printk("Queueing mk_msg_scb\n"); tid_head = ahd_inw(ahd, MK_MESSAGE_SCB); seq_flags2 &= ~PENDING_MK_MESSAGE; ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); mk_msg_scb = NULL; } if (tid_head != scbid) ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next); if (!SCBID_IS_NULL(tid_head)) tid_prev = tid_head; if (action == SEARCH_PRINT) printk(")\n"); } /* Restore saved state. */ ahd_set_scbptr(ahd, savedscbptr); ahd_restore_modes(ahd, saved_modes); return (found); } static int ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action, u_int *list_head, u_int *list_tail, u_int tid) { struct scb *scb; u_int scbid; u_int next; u_int prev; int found; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); found = 0; prev = SCB_LIST_NULL; next = *list_head; *list_tail = SCB_LIST_NULL; for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) { if (scbid >= ahd->scb_data.numscbs) { printk("%s:SCB List inconsistency. " "SCB == 0x%x, yet numscbs == 0x%x.", ahd_name(ahd), scbid, ahd->scb_data.numscbs); ahd_dump_card_state(ahd); panic("for safety"); } scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: SCB = %d Not Active!\n", ahd_name(ahd), scbid); panic("Waiting List traversal\n"); } ahd_set_scbptr(ahd, scbid); *list_tail = scbid; next = ahd_inw_scbram(ahd, SCB_NEXT); if (ahd_match_scb(ahd, scb, target, channel, lun, SCB_LIST_NULL, role) == 0) { prev = scbid; continue; } found++; switch (action) { case SEARCH_COMPLETE: if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in Waiting List\n"); ahd_done_with_status(ahd, scb, status); fallthrough; case SEARCH_REMOVE: ahd_rem_wscb(ahd, scbid, prev, next, tid); *list_tail = prev; if (SCBID_IS_NULL(prev)) *list_head = next; break; case SEARCH_PRINT: printk("0x%x ", scbid); fallthrough; case SEARCH_COUNT: prev = scbid; break; } if (found > AHD_SCB_MAX) panic("SCB LIST LOOP"); } if (action == SEARCH_COMPLETE || action == SEARCH_REMOVE) ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found); return (found); } static void ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, u_int tid_cur, u_int tid_next) { AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (SCBID_IS_NULL(tid_cur)) { /* Bypass current TID list */ if (SCBID_IS_NULL(tid_prev)) { ahd_outw(ahd, WAITING_TID_HEAD, tid_next); } else { ahd_set_scbptr(ahd, tid_prev); ahd_outw(ahd, SCB_NEXT2, tid_next); } if (SCBID_IS_NULL(tid_next)) ahd_outw(ahd, WAITING_TID_TAIL, tid_prev); } else { /* Stitch through tid_cur */ if (SCBID_IS_NULL(tid_prev)) { ahd_outw(ahd, WAITING_TID_HEAD, tid_cur); } else { ahd_set_scbptr(ahd, tid_prev); ahd_outw(ahd, SCB_NEXT2, tid_cur); } ahd_set_scbptr(ahd, tid_cur); ahd_outw(ahd, SCB_NEXT2, tid_next); if (SCBID_IS_NULL(tid_next)) ahd_outw(ahd, WAITING_TID_TAIL, tid_cur); } } /* * Manipulate the waiting for selection list and return the * scb that follows the one that we remove. */ static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid) { u_int tail_offset; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (!SCBID_IS_NULL(prev)) { ahd_set_scbptr(ahd, prev); ahd_outw(ahd, SCB_NEXT, next); } /* * SCBs that have MK_MESSAGE set in them may * cause the tail pointer to be updated without * setting the next pointer of the previous tail. * Only clear the tail if the removed SCB was * the tail. */ tail_offset = WAITING_SCB_TAILS + (2 * tid); if (SCBID_IS_NULL(next) && ahd_inw(ahd, tail_offset) == scbid) ahd_outw(ahd, tail_offset, prev); ahd_add_scb_to_free_list(ahd, scbid); return (next); } /* * Add the SCB as selected by SCBPTR onto the on chip list of * free hardware SCBs. This list is empty/unused if we are not * performing SCB paging. */ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid) { /* XXX Need some other mechanism to designate "free". */ /* * Invalidate the tag so that our abort * routines don't think it's active. ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL); */ } /******************************** Error Handling ******************************/ /* * Abort all SCBs that match the given description (target/channel/lun/tag), * setting their status to the passed in status if the status has not already * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer * is paused before it is called. */ static int ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { struct scb *scbp; struct scb *scbp_next; u_int i, j; u_int maxtarget; u_int minlun; u_int maxlun; int found; ahd_mode_state saved_modes; /* restore this when we're done */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL, role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); /* * Clean out the busy target table for any untagged commands. */ i = 0; maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } if (lun == CAM_LUN_WILDCARD) { minlun = 0; maxlun = AHD_NUM_LUNS_NONPKT; } else if (lun >= AHD_NUM_LUNS_NONPKT) { minlun = maxlun = 0; } else { minlun = lun; maxlun = lun + 1; } if (role != ROLE_TARGET) { for (;i < maxtarget; i++) { for (j = minlun;j < maxlun; j++) { u_int scbid; u_int tcl; tcl = BUILD_TCL_RAW(i, 'A', j); scbid = ahd_find_busy_tcl(ahd, tcl); scbp = ahd_lookup_scb(ahd, scbid); if (scbp == NULL || ahd_match_scb(ahd, scbp, target, channel, lun, tag, role) == 0) continue; ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j)); } } } /* * Don't abort commands that have already completed, * but haven't quite made it up to the host yet. */ ahd_flush_qoutfifo(ahd); /* * Go through the pending CCB list and look for * commands for this target that are still active. * These are other tagged commands that were * disconnected when the reset occurred. */ scbp_next = LIST_FIRST(&ahd->pending_scbs); while (scbp_next != NULL) { scbp = scbp_next; scbp_next = LIST_NEXT(scbp, pending_links); if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) { cam_status ostat; ostat = ahd_get_transaction_status(scbp); if (ostat == CAM_REQ_INPROG) ahd_set_transaction_status(scbp, status); if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP) ahd_freeze_scb(scbp); if ((scbp->flags & SCB_ACTIVE) == 0) printk("Inactive SCB on pending list\n"); ahd_done(ahd, scbp); found++; } } ahd_restore_modes(ahd, saved_modes); ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status); ahd->flags |= AHD_UPDATE_PEND_CMDS; return found; } static void ahd_reset_current_bus(struct ahd_softc *ahd) { uint8_t scsiseq; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST); scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO); ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO); ahd_flush_device_writes(ahd); ahd_delay(AHD_BUSRESET_DELAY); /* Turn off the bus reset */ ahd_outb(ahd, SCSISEQ0, scsiseq); ahd_flush_device_writes(ahd); ahd_delay(AHD_BUSRESET_DELAY); if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) { /* * 2A Razor #474 * Certain chip state is not cleared for * SCSI bus resets that we initiate, so * we must reset the chip. */ ahd_reset(ahd, /*reinit*/TRUE); ahd_intr_enable(ahd, /*enable*/TRUE); AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); } ahd_clear_intstat(ahd); } int ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) { struct ahd_devinfo caminfo; u_int initiator; u_int target; u_int max_scsiid; int found; u_int fifo; u_int next_fifo; uint8_t scsiseq; /* * Check if the last bus reset is cleared */ if (ahd->flags & AHD_BUS_RESET_ACTIVE) { printk("%s: bus reset still active\n", ahd_name(ahd)); return 0; } ahd->flags |= AHD_BUS_RESET_ACTIVE; ahd->pending_device = NULL; ahd_compile_devinfo(&caminfo, CAM_TARGET_WILDCARD, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahd_pause(ahd); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* * Run our command complete fifos to ensure that we perform * completion processing on any commands that 'completed' * before the reset occurred. */ ahd_run_qoutfifo(ahd); #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) { ahd_run_tqinfifo(ahd, /*paused*/TRUE); } #endif ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* * Disable selections so no automatic hardware * functions will modify chip state. */ ahd_outb(ahd, SCSISEQ0, 0); ahd_outb(ahd, SCSISEQ1, 0); /* * Safely shut down our DMA engines. Always start with * the FIFO that is not currently active (if any are * actively connected). */ next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; if (next_fifo > CURRFIFO_1) /* If disconneced, arbitrarily start with FIFO1. */ next_fifo = fifo = 0; do { next_fifo ^= CURRFIFO_1; ahd_set_modes(ahd, next_fifo, next_fifo); ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN)); while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) ahd_delay(10); /* * Set CURRFIFO to the now inactive channel. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, DFFSTAT, next_fifo); } while (next_fifo != fifo); /* * Reset the bus if we are initiating this reset */ ahd_clear_msg_state(ahd); ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST)); if (initiate_reset) ahd_reset_current_bus(ahd); ahd_clear_intstat(ahd); /* * Clean up all the state information for the * pending transactions on this bus. */ found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); /* * Cleanup anything left in the FIFOs. */ ahd_clear_fifo(ahd, 0); ahd_clear_fifo(ahd, 1); /* * Clear SCSI interrupt status */ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); /* * Reenable selections */ ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST); scsiseq = ahd_inb(ahd, SCSISEQ_TEMPLATE); ahd_outb(ahd, SCSISEQ1, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; #ifdef AHD_TARGET_MODE /* * Send an immediate notify ccb to all target more peripheral * drivers affected by this action. */ for (target = 0; target <= max_scsiid; target++) { struct ahd_tmode_tstate* tstate; u_int lun; tstate = ahd->enabled_targets[target]; if (tstate == NULL) continue; for (lun = 0; lun < AHD_NUM_LUNS; lun++) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD, EVENT_TYPE_BUS_RESET, /*arg*/0); ahd_send_lstate_events(ahd, lstate); } } #endif /* * Revert to async/narrow transfers until we renegotiate. */ for (target = 0; target <= max_scsiid; target++) { if (ahd->enabled_targets[target] == NULL) continue; for (initiator = 0; initiator <= max_scsiid; initiator++) { struct ahd_devinfo devinfo; ahd_compile_devinfo(&devinfo, target, initiator, CAM_LUN_WILDCARD, 'A', ROLE_UNKNOWN); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); } } /* Notify the XPT that a bus reset occurred */ ahd_send_async(ahd, caminfo.channel, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AC_BUS_RESET); ahd_restart(ahd); return (found); } /**************************** Statistics Processing ***************************/ static void ahd_stat_timer(struct timer_list *t) { struct ahd_softc *ahd = from_timer(ahd, t, stat_timer); u_long s; int enint_coal; ahd_lock(ahd, &s); enint_coal = ahd->hs_mailbox & ENINT_COALESCE; if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold) enint_coal |= ENINT_COALESCE; else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold) enint_coal &= ~ENINT_COALESCE; if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) { ahd_enable_coalescing(ahd, enint_coal); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0) printk("%s: Interrupt coalescing " "now %sabled. Cmds %d\n", ahd_name(ahd), (enint_coal & ENINT_COALESCE) ? "en" : "dis", ahd->cmdcmplt_total); #endif } ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1); ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]; ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0; ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US); ahd_unlock(ahd, &s); } /****************************** Status Processing *****************************/ static void ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *hscb; int paused; /* * The sequencer freezes its select-out queue * anytime a SCSI status error occurs. We must * handle the error and increment our qfreeze count * to allow the sequencer to continue. We don't * bother clearing critical sections here since all * operations are on data structures that the sequencer * is not touching once the queue is frozen. */ hscb = scb->hscb; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } /* Freeze the queue until the client sees the error. */ ahd_freeze_devq(ahd, scb); ahd_freeze_scb(scb); ahd->qfreeze_cnt++; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); if (paused == 0) ahd_unpause(ahd); /* Don't want to clobber the original sense code */ if ((scb->flags & SCB_SENSE) != 0) { /* * Clear the SCB_SENSE Flag and perform * a normal command completion. */ scb->flags &= ~SCB_SENSE; ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); ahd_done(ahd, scb); return; } ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status); switch (hscb->shared_data.istatus.scsi_status) { case STATUS_PKT_SENSE: { struct scsi_status_iu_header *siu; ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD); siu = (struct scsi_status_iu_header *)scb->sense_data; ahd_set_scsi_status(scb, siu->status); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SENSE) != 0) { ahd_print_path(ahd, scb); printk("SCB 0x%x Received PKT Status of 0x%x\n", SCB_GET_TAG(scb), siu->status); printk("\tflags = 0x%x, sense len = 0x%x, " "pktfail = 0x%x\n", siu->flags, scsi_4btoul(siu->sense_length), scsi_4btoul(siu->pkt_failures_length)); } #endif if ((siu->flags & SIU_RSPVALID) != 0) { ahd_print_path(ahd, scb); if (scsi_4btoul(siu->pkt_failures_length) < 4) { printk("Unable to parse pkt_failures\n"); } else { switch (SIU_PKTFAIL_CODE(siu)) { case SIU_PFC_NONE: printk("No packet failure found\n"); break; case SIU_PFC_CIU_FIELDS_INVALID: printk("Invalid Command IU Field\n"); break; case SIU_PFC_TMF_NOT_SUPPORTED: printk("TMF not supported\n"); break; case SIU_PFC_TMF_FAILED: printk("TMF failed\n"); break; case SIU_PFC_INVALID_TYPE_CODE: printk("Invalid L_Q Type code\n"); break; case SIU_PFC_ILLEGAL_REQUEST: printk("Illegal request\n"); break; default: break; } } if (siu->status == SAM_STAT_GOOD) ahd_set_transaction_status(scb, CAM_REQ_CMP_ERR); } if ((siu->flags & SIU_SNSVALID) != 0) { scb->flags |= SCB_PKT_SENSE; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SENSE) != 0) printk("Sense data available\n"); #endif } ahd_done(ahd, scb); break; } case SAM_STAT_COMMAND_TERMINATED: case SAM_STAT_CHECK_CONDITION: { struct ahd_devinfo devinfo; struct ahd_dma_seg *sg; struct scsi_sense *sc; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { ahd_print_path(ahd, scb); printk("SCB %d: requests Check Status\n", SCB_GET_TAG(scb)); } #endif if (ahd_perform_autosense(scb) == 0) break; ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), ROLE_INITIATOR); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; sg = scb->sg_list; sc = (struct scsi_sense *)hscb->shared_data.idata.cdb; /* * Save off the residual if there is one. */ ahd_update_residual(ahd, scb); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { ahd_print_path(ahd, scb); printk("Sending Sense\n"); } #endif scb->sg_count = 0; sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb), ahd_get_sense_bufsize(ahd, scb), /*last*/TRUE); sc->opcode = REQUEST_SENSE; sc->byte2 = 0; if (tinfo->protocol_version <= SCSI_REV_2 && SCB_GET_LUN(scb) < 8) sc->byte2 = SCB_GET_LUN(scb) << 5; sc->unused[0] = 0; sc->unused[1] = 0; sc->length = ahd_get_sense_bufsize(ahd, scb); sc->control = 0; /* * We can't allow the target to disconnect. * This will be an untagged transaction and * having the target disconnect will make this * transaction indestinguishable from outstanding * tagged transactions. */ hscb->control = 0; /* * This request sense could be because the * the device lost power or in some other * way has lost our transfer negotiations. * Renegotiate if appropriate. Unit attention * errors will be reported before any data * phases occur. */ if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) { ahd_update_neg_request(ahd, &devinfo, tstate, targ_info, AHD_NEG_IF_NON_ASYNC); } if (tstate->auto_negotiate & devinfo.target_mask) { hscb->control |= MK_MESSAGE; scb->flags &= ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET); scb->flags |= SCB_AUTO_NEGOTIATE; } hscb->cdb_len = sizeof(*sc); ahd_setup_data_scb(ahd, scb); scb->flags |= SCB_SENSE; ahd_queue_scb(ahd, scb); break; } case SAM_STAT_GOOD: printk("%s: Interrupted for status of 0???\n", ahd_name(ahd)); fallthrough; default: ahd_done(ahd, scb); break; } } static void ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb) { if (scb->hscb->shared_data.istatus.scsi_status != 0) { ahd_handle_scsi_status(ahd, scb); } else { ahd_calc_residual(ahd, scb); ahd_done(ahd, scb); } } /* * Calculate the residual for a just completed SCB. */ static void ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *hscb; struct initiator_status *spkt; uint32_t sgptr; uint32_t resid_sgptr; uint32_t resid; /* * 5 cases. * 1) No residual. * SG_STATUS_VALID clear in sgptr. * 2) Transferless command * 3) Never performed any transfers. * sgptr has SG_FULL_RESID set. * 4) No residual but target did not * save data pointers after the * last transfer, so sgptr was * never updated. * 5) We have a partial residual. * Use residual_sgptr to determine * where we are. */ hscb = scb->hscb; sgptr = ahd_le32toh(hscb->sgptr); if ((sgptr & SG_STATUS_VALID) == 0) /* Case 1 */ return; sgptr &= ~SG_STATUS_VALID; if ((sgptr & SG_LIST_NULL) != 0) /* Case 2 */ return; /* * Residual fields are the same in both * target and initiator status packets, * so we can always use the initiator fields * regardless of the role for this SCB. */ spkt = &hscb->shared_data.istatus; resid_sgptr = ahd_le32toh(spkt->residual_sgptr); if ((sgptr & SG_FULL_RESID) != 0) { /* Case 3 */ resid = ahd_get_transfer_length(scb); } else if ((resid_sgptr & SG_LIST_NULL) != 0) { /* Case 4 */ return; } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) { ahd_print_path(ahd, scb); printk("data overrun detected Tag == 0x%x.\n", SCB_GET_TAG(scb)); ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); ahd_freeze_scb(scb); return; } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); /* NOTREACHED */ } else { struct ahd_dma_seg *sg; /* * Remainder of the SG where the transfer * stopped. */ resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK; sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK); /* The residual sg_ptr always points to the next sg */ sg--; /* * Add up the contents of all residual * SG segments that are after the SG where * the transfer stopped. */ while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) { sg++; resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; } } if ((scb->flags & SCB_SENSE) == 0) ahd_set_residual(scb, resid); else ahd_set_sense_residual(scb, resid); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) { ahd_print_path(ahd, scb); printk("Handled %sResidual of %d bytes\n", (scb->flags & SCB_SENSE) ? "Sense " : "", resid); } #endif } /******************************* Target Mode **********************************/ #ifdef AHD_TARGET_MODE /* * Add a target mode event to this lun's queue */ static void ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg) { struct ahd_tmode_event *event; int pending; xpt_freeze_devq(lstate->path, /*count*/1); if (lstate->event_w_idx >= lstate->event_r_idx) pending = lstate->event_w_idx - lstate->event_r_idx; else pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1 - (lstate->event_r_idx - lstate->event_w_idx); if (event_type == EVENT_TYPE_BUS_RESET || event_type == TARGET_RESET) { /* * Any earlier events are irrelevant, so reset our buffer. * This has the effect of allowing us to deal with reset * floods (an external device holding down the reset line) * without losing the event that is really interesting. */ lstate->event_r_idx = 0; lstate->event_w_idx = 0; xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); } if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) { xpt_print_path(lstate->path); printk("immediate event %x:%x lost\n", lstate->event_buffer[lstate->event_r_idx].event_type, lstate->event_buffer[lstate->event_r_idx].event_arg); lstate->event_r_idx++; if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); } event = &lstate->event_buffer[lstate->event_w_idx]; event->initiator_id = initiator_id; event->event_type = event_type; event->event_arg = event_arg; lstate->event_w_idx++; if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_w_idx = 0; } /* * Send any target mode events queued up waiting * for immediate notify resources. */ void ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate) { struct ccb_hdr *ccbh; struct ccb_immed_notify *inot; while (lstate->event_r_idx != lstate->event_w_idx && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { struct ahd_tmode_event *event; event = &lstate->event_buffer[lstate->event_r_idx]; SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); inot = (struct ccb_immed_notify *)ccbh; switch (event->event_type) { case EVENT_TYPE_BUS_RESET: ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; break; default: ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; inot->message_args[0] = event->event_type; inot->message_args[1] = event->event_arg; break; } inot->initiator_id = event->initiator_id; inot->sense_len = 0; xpt_done((union ccb *)inot); lstate->event_r_idx++; if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; } } #endif /******************** Sequencer Program Patching/Download *********************/ #ifdef AHD_DUMP_SEQ void ahd_dumpseq(struct ahd_softc* ahd) { int i; int max_prog; max_prog = 2048; ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahd_outw(ahd, PRGMCNT, 0); for (i = 0; i < max_prog; i++) { uint8_t ins_bytes[4]; ahd_insb(ahd, SEQRAM, ins_bytes, 4); printk("0x%08x\n", ins_bytes[0] << 24 | ins_bytes[1] << 16 | ins_bytes[2] << 8 | ins_bytes[3]); } } #endif static void ahd_loadseq(struct ahd_softc *ahd) { struct cs cs_table[NUM_CRITICAL_SECTIONS]; u_int begin_set[NUM_CRITICAL_SECTIONS]; u_int end_set[NUM_CRITICAL_SECTIONS]; const struct patch *cur_patch; u_int cs_count; u_int cur_cs; u_int i; int downloaded; u_int skip_addr; u_int sg_prefetch_cnt; u_int sg_prefetch_cnt_limit; u_int sg_prefetch_align; u_int sg_size; u_int cacheline_mask; uint8_t download_consts[DOWNLOAD_CONST_COUNT]; if (bootverbose) printk("%s: Downloading Sequencer Program...", ahd_name(ahd)); #if DOWNLOAD_CONST_COUNT != 8 #error "Download Const Mismatch" #endif /* * Start out with 0 critical sections * that apply to this firmware load. */ cs_count = 0; cur_cs = 0; memset(begin_set, 0, sizeof(begin_set)); memset(end_set, 0, sizeof(end_set)); /* * Setup downloadable constant table. * * The computation for the S/G prefetch variables is * a bit complicated. We would like to always fetch * in terms of cachelined sized increments. However, * if the cacheline is not an even multiple of the * SG element size or is larger than our SG RAM, using * just the cache size might leave us with only a portion * of an SG element at the tail of a prefetch. If the * cacheline is larger than our S/G prefetch buffer less * the size of an SG element, we may round down to a cacheline * that doesn't contain any or all of the S/G of interest * within the bounds of our S/G ram. Provide variables to * the sequencer that will allow it to handle these edge * cases. */ /* Start by aligning to the nearest cacheline. */ sg_prefetch_align = ahd->pci_cachesize; if (sg_prefetch_align == 0) sg_prefetch_align = 8; /* Round down to the nearest power of 2. */ while (powerof2(sg_prefetch_align) == 0) sg_prefetch_align--; cacheline_mask = sg_prefetch_align - 1; /* * If the cacheline boundary is greater than half our prefetch RAM * we risk not being able to fetch even a single complete S/G * segment if we align to that boundary. */ if (sg_prefetch_align > CCSGADDR_MAX/2) sg_prefetch_align = CCSGADDR_MAX/2; /* Start by fetching a single cacheline. */ sg_prefetch_cnt = sg_prefetch_align; /* * Increment the prefetch count by cachelines until * at least one S/G element will fit. */ sg_size = sizeof(struct ahd_dma_seg); if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) sg_size = sizeof(struct ahd_dma64_seg); while (sg_prefetch_cnt < sg_size) sg_prefetch_cnt += sg_prefetch_align; /* * If the cacheline is not an even multiple of * the S/G size, we may only get a partial S/G when * we align. Add a cacheline if this is the case. */ if ((sg_prefetch_align % sg_size) != 0 && (sg_prefetch_cnt < CCSGADDR_MAX)) sg_prefetch_cnt += sg_prefetch_align; /* * Lastly, compute a value that the sequencer can use * to determine if the remainder of the CCSGRAM buffer * has a full S/G element in it. */ sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1); download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit; download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1); download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1); download_consts[SG_SIZEOF] = sg_size; download_consts[PKT_OVERRUN_BUFOFFSET] = (ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256; download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN; download_consts[CACHELINE_MASK] = cacheline_mask; cur_patch = patches; downloaded = 0; skip_addr = 0; ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahd_outw(ahd, PRGMCNT, 0); for (i = 0; i < sizeof(seqprog)/4; i++) { if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) { /* * Don't download this instruction as it * is in a patch that was removed. */ continue; } /* * Move through the CS table until we find a CS * that might apply to this instruction. */ for (; cur_cs < NUM_CRITICAL_SECTIONS; cur_cs++) { if (critical_sections[cur_cs].end <= i) { if (begin_set[cs_count] == TRUE && end_set[cs_count] == FALSE) { cs_table[cs_count].end = downloaded; end_set[cs_count] = TRUE; cs_count++; } continue; } if (critical_sections[cur_cs].begin <= i && begin_set[cs_count] == FALSE) { cs_table[cs_count].begin = downloaded; begin_set[cs_count] = TRUE; } break; } ahd_download_instr(ahd, i, download_consts); downloaded++; } ahd->num_critical_sections = cs_count; if (cs_count != 0) { cs_count *= sizeof(struct cs); ahd->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC); if (ahd->critical_sections == NULL) panic("ahd_loadseq: Could not malloc"); } ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE); if (bootverbose) { printk(" %d instructions downloaded\n", downloaded); printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags); } } static int ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch, u_int start_instr, u_int *skip_addr) { const struct patch *cur_patch; const struct patch *last_patch; u_int num_patches; num_patches = ARRAY_SIZE(patches); last_patch = &patches[num_patches]; cur_patch = *start_patch; while (cur_patch < last_patch && start_instr == cur_patch->begin) { if (cur_patch->patch_func(ahd) == 0) { /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; cur_patch += cur_patch->skip_patch; } else { /* Accepted this patch. Advance to the next * one and wait for our intruction pointer to * hit this point. */ cur_patch++; } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) { const struct patch *cur_patch; int address_offset; u_int skip_addr; u_int i; address_offset = 0; cur_patch = patches; skip_addr = 0; for (i = 0; i < address;) { ahd_check_patch(ahd, &cur_patch, i, &skip_addr); if (skip_addr > i) { int end_addr; end_addr = min(address, skip_addr); address_offset += end_addr - i; i = skip_addr; } else { i++; } } return (address - address_offset); } static void ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) { union ins_formats instr; struct ins_format1 *fmt1_ins; struct ins_format3 *fmt3_ins; u_int opcode; /* * The firmware is always compiled into a little endian format. */ instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); fmt1_ins = &instr.format1; fmt3_ins = NULL; /* Pull the opcode */ opcode = instr.format1.opcode; switch (opcode) { case AIC_OP_JMP: case AIC_OP_JC: case AIC_OP_JNC: case AIC_OP_CALL: case AIC_OP_JNE: case AIC_OP_JNZ: case AIC_OP_JE: case AIC_OP_JZ: { fmt3_ins = &instr.format3; fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); } fallthrough; case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: case AIC_OP_ADD: case AIC_OP_ADC: case AIC_OP_BMOV: if (fmt1_ins->parity != 0) { fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; fallthrough; case AIC_OP_ROL: { int i, count; /* Calculate odd parity for the instruction */ for (i = 0, count = 0; i < 31; i++) { uint32_t mask; mask = 0x01 << i; if ((instr.integer & mask) != 0) count++; } if ((count & 0x01) == 0) instr.format1.parity = 1; /* The sequencer is a little endian cpu */ instr.integer = ahd_htole32(instr.integer); ahd_outsb(ahd, SEQRAM, instr.bytes, 4); break; } default: panic("Unknown opcode encountered in seq program"); break; } } static int ahd_probe_stack_size(struct ahd_softc *ahd) { int last_probe; last_probe = 0; while (1) { int i; /* * We avoid using 0 as a pattern to avoid * confusion if the stack implementation * "back-fills" with zeros when "poping' * entries. */ for (i = 1; i <= last_probe+1; i++) { ahd_outb(ahd, STACK, i & 0xFF); ahd_outb(ahd, STACK, (i >> 8) & 0xFF); } /* Verify */ for (i = last_probe+1; i > 0; i--) { u_int stack_entry; stack_entry = ahd_inb(ahd, STACK) |(ahd_inb(ahd, STACK) << 8); if (stack_entry != i) goto sized; } last_probe++; } sized: return (last_probe); } int ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point) { int printed; u_int printed_mask; if (cur_column != NULL && *cur_column >= wrap_point) { printk("\n"); *cur_column = 0; } printed = printk("%s[0x%x]", name, value); if (table == NULL) { printed += printk(" "); *cur_column += printed; return (printed); } printed_mask = 0; while (printed_mask != 0xFF) { int entry; for (entry = 0; entry < num_entries; entry++) { if (((value & table[entry].mask) != table[entry].value) || ((printed_mask & table[entry].mask) == table[entry].mask)) continue; printed += printk("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; break; } if (entry >= num_entries) break; } if (printed_mask != 0) printed += printk(") "); else printed += printk(" "); if (cur_column != NULL) *cur_column += printed; return (printed); } void ahd_dump_card_state(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int dffstat; int paused; u_int scb_index; u_int saved_scb_index; u_int cur_col; int i; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" "%s: Dumping Card State at program address 0x%x Mode 0x%x\n", ahd_name(ahd), ahd_inw(ahd, CURADDR), ahd_build_mode_state(ahd, ahd->saved_src_mode, ahd->saved_dst_mode)); if (paused) printk("Card was paused\n"); if (ahd_check_cmdcmpltqueues(ahd)) printk("Completions are pending\n"); /* * Mode independent registers. */ cur_col = 0; ahd_intstat_print(ahd_inb(ahd, INTSTAT), &cur_col, 50); ahd_seloid_print(ahd_inb(ahd, SELOID), &cur_col, 50); ahd_selid_print(ahd_inb(ahd, SELID), &cur_col, 50); ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50); ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50); ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50); ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50); ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50); ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50); ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50); ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50); ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50); ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50); ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50); ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50); ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50); ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50); ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50); ahd_qfreeze_count_print(ahd_inw(ahd, QFREEZE_COUNT), &cur_col, 50); ahd_kernel_qfreeze_count_print(ahd_inw(ahd, KERNEL_QFREEZE_COUNT), &cur_col, 50); ahd_mk_message_scb_print(ahd_inw(ahd, MK_MESSAGE_SCB), &cur_col, 50); ahd_mk_message_scsiid_print(ahd_inb(ahd, MK_MESSAGE_SCSIID), &cur_col, 50); ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50); ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50); ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50); ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50); ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50); ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50); ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50); ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50); ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50); ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50); ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50); ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50); printk("\n"); printk("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x " "CURRSCB 0x%x NEXTSCB 0x%x\n", ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING), ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB), ahd_inw(ahd, NEXTSCB)); cur_col = 0; /* QINFIFO */ ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT); saved_scb_index = ahd_get_scbptr(ahd); printk("Pending list:"); i = 0; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { if (i++ > AHD_SCB_MAX) break; cur_col = printk("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb), ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT)); ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL), &cur_col, 60); ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID), &cur_col, 60); } printk("\nTotal %d\n", i); printk("Kernel Free SCB list: "); i = 0; TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { struct scb *list_scb; list_scb = scb; do { printk("%d ", SCB_GET_TAG(list_scb)); list_scb = LIST_NEXT(list_scb, collision_links); } while (list_scb && i++ < AHD_SCB_MAX); } LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { if (i++ > AHD_SCB_MAX) break; printk("%d ", SCB_GET_TAG(scb)); } printk("\n"); printk("Sequencer Complete DMA-inprog list: "); scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printk("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printk("\n"); printk("Sequencer Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printk("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printk("\n"); printk("Sequencer DMA-Up and Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printk("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printk("\n"); printk("Sequencer On QFreeze and Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printk("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printk("\n"); ahd_set_scbptr(ahd, saved_scb_index); dffstat = ahd_inb(ahd, DFFSTAT); for (i = 0; i < 2; i++) { #ifdef AHD_DEBUG struct scb *fifo_scb; #endif u_int fifo_scbptr; ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); fifo_scbptr = ahd_get_scbptr(ahd); printk("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n", ahd_name(ahd), i, (dffstat & (FIFO0FREE << i)) ? "Free" : "Active", ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr); cur_col = 0; ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50); ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50); ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50); ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50); ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW), &cur_col, 50); ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50); ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50); ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50); ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50); if (cur_col > 50) { printk("\n"); cur_col = 0; } cur_col += printk("SHADDR = 0x%x%x, SHCNT = 0x%x ", ahd_inl(ahd, SHADDR+4), ahd_inl(ahd, SHADDR), (ahd_inb(ahd, SHCNT) | (ahd_inb(ahd, SHCNT + 1) << 8) | (ahd_inb(ahd, SHCNT + 2) << 16))); if (cur_col > 50) { printk("\n"); cur_col = 0; } cur_col += printk("HADDR = 0x%x%x, HCNT = 0x%x ", ahd_inl(ahd, HADDR+4), ahd_inl(ahd, HADDR), (ahd_inb(ahd, HCNT) | (ahd_inb(ahd, HCNT + 1) << 8) | (ahd_inb(ahd, HCNT + 2) << 16))); ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SG) != 0) { fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr); if (fifo_scb != NULL) ahd_dump_sglist(fifo_scb); } #endif } printk("\nLQIN: "); for (i = 0; i < 20; i++) printk("0x%x ", ahd_inb(ahd, LQIN + i)); printk("\n"); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); printk("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE), ahd_inb(ahd, OPTIONMODE)); printk("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT), ahd_inb(ahd, MAXCMDCNT)); printk("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN)); ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50); printk("\n"); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); cur_col = 0; ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50); printk("\n"); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); printk("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n", ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX), ahd_inw(ahd, DINDEX)); printk("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n", ahd_name(ahd), ahd_get_scbptr(ahd), ahd_inw_scbram(ahd, SCB_NEXT), ahd_inw_scbram(ahd, SCB_NEXT2)); printk("CDB %x %x %x %x %x %x\n", ahd_inb_scbram(ahd, SCB_CDB_STORE), ahd_inb_scbram(ahd, SCB_CDB_STORE+1), ahd_inb_scbram(ahd, SCB_CDB_STORE+2), ahd_inb_scbram(ahd, SCB_CDB_STORE+3), ahd_inb_scbram(ahd, SCB_CDB_STORE+4), ahd_inb_scbram(ahd, SCB_CDB_STORE+5)); printk("STACK:"); for (i = 0; i < ahd->stack_size; i++) { ahd->saved_stack[i] = ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8); printk(" 0x%x", ahd->saved_stack[i]); } for (i = ahd->stack_size-1; i >= 0; i--) { ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF); ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); } printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahd_restore_modes(ahd, saved_modes); if (paused == 0) ahd_unpause(ahd); } #if 0 void ahd_dump_scbs(struct ahd_softc *ahd) { ahd_mode_state saved_modes; u_int saved_scb_index; int i; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_scb_index = ahd_get_scbptr(ahd); for (i = 0; i < AHD_SCB_MAX; i++) { ahd_set_scbptr(ahd, i); printk("%3d", i); printk("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n", ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb_scbram(ahd, SCB_SCSIID), ahd_inw_scbram(ahd, SCB_NEXT), ahd_inw_scbram(ahd, SCB_NEXT2), ahd_inl_scbram(ahd, SCB_SGPTR), ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR)); } printk("\n"); ahd_set_scbptr(ahd, saved_scb_index); ahd_restore_modes(ahd, saved_modes); } #endif /* 0 */ /**************************** Flexport Logic **********************************/ /* * Read count 16bit words from 16bit word address start_addr from the * SEEPROM attached to the controller, into buf, using the controller's * SEEPROM reading state machine. Optionally treat the data as a byte * stream in terms of byte order. */ int ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count, int bytestream) { u_int cur_addr; u_int end_addr; int error; /* * If we never make it through the loop even once, * we were passed invalid arguments. */ error = EINVAL; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); end_addr = start_addr + count; for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { ahd_outb(ahd, SEEADR, cur_addr); ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART); error = ahd_wait_seeprom(ahd); if (error) break; if (bytestream != 0) { uint8_t *bytestream_ptr; bytestream_ptr = (uint8_t *)buf; *bytestream_ptr++ = ahd_inb(ahd, SEEDAT); *bytestream_ptr = ahd_inb(ahd, SEEDAT+1); } else { /* * ahd_inw() already handles machine byte order. */ *buf = ahd_inw(ahd, SEEDAT); } buf++; } return (error); } /* * Write count 16bit words from buf, into SEEPROM attache to the * controller starting at 16bit word address start_addr, using the * controller's SEEPROM writing state machine. */ int ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count) { u_int cur_addr; u_int end_addr; int error; int retval; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); error = ENOENT; /* Place the chip into write-enable mode */ ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR); ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART); error = ahd_wait_seeprom(ahd); if (error) return (error); /* * Write the data. If we don't get through the loop at * least once, the arguments were invalid. */ retval = EINVAL; end_addr = start_addr + count; for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { ahd_outw(ahd, SEEDAT, *buf++); ahd_outb(ahd, SEEADR, cur_addr); ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART); retval = ahd_wait_seeprom(ahd); if (retval) break; } /* * Disable writes. */ ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR); ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART); error = ahd_wait_seeprom(ahd); if (error) return (error); return (retval); } /* * Wait ~100us for the serial eeprom to satisfy our request. */ static int ahd_wait_seeprom(struct ahd_softc *ahd) { int cnt; cnt = 5000; while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt) ahd_delay(5); if (cnt == 0) return (ETIMEDOUT); return (0); } /* * Validate the two checksums in the per_channel * vital product data struct. */ static int ahd_verify_vpd_cksum(struct vpd_config *vpd) { int i; int maxaddr; uint32_t checksum; uint8_t *vpdarray; vpdarray = (uint8_t *)vpd; maxaddr = offsetof(struct vpd_config, vpd_checksum); checksum = 0; for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++) checksum = checksum + vpdarray[i]; if (checksum == 0 || (-checksum & 0xFF) != vpd->vpd_checksum) return (0); checksum = 0; maxaddr = offsetof(struct vpd_config, checksum); for (i = offsetof(struct vpd_config, default_target_flags); i < maxaddr; i++) checksum = checksum + vpdarray[i]; if (checksum == 0 || (-checksum & 0xFF) != vpd->checksum) return (0); return (1); } int ahd_verify_cksum(struct seeprom_config *sc) { int i; int maxaddr; uint32_t checksum; uint16_t *scarray; maxaddr = (sizeof(*sc)/2) - 1; checksum = 0; scarray = (uint16_t *)sc; for (i = 0; i < maxaddr; i++) checksum = checksum + scarray[i]; if (checksum == 0 || (checksum & 0xFFFF) != sc->checksum) { return (0); } else { return (1); } } int ahd_acquire_seeprom(struct ahd_softc *ahd) { /* * We should be able to determine the SEEPROM type * from the flexport logic, but unfortunately not * all implementations have this logic and there is * no programatic method for determining if the logic * is present. */ return (1); #if 0 uint8_t seetype; int error; error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype); if (error != 0 || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE)) return (0); return (1); #endif } void ahd_release_seeprom(struct ahd_softc *ahd) { /* Currently a no-op */ } /* * Wait at most 2 seconds for flexport arbitration to succeed. */ static int ahd_wait_flexport(struct ahd_softc *ahd) { int cnt; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); cnt = 1000000 * 2 / 5; while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt) ahd_delay(5); if (cnt == 0) return (ETIMEDOUT); return (0); } int ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value) { int error; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (addr > 7) panic("ahd_write_flexport: address out of range"); ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); error = ahd_wait_flexport(ahd); if (error != 0) return (error); ahd_outb(ahd, BRDDAT, value); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3)); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, 0); ahd_flush_device_writes(ahd); return (0); } int ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value) { int error; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (addr > 7) panic("ahd_read_flexport: address out of range"); ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3)); error = ahd_wait_flexport(ahd); if (error != 0) return (error); *value = ahd_inb(ahd, BRDDAT); ahd_outb(ahd, BRDCTL, 0); ahd_flush_device_writes(ahd); return (0); } /************************* Target Mode ****************************************/ #ifdef AHD_TARGET_MODE cam_status ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, struct ahd_tmode_tstate **tstate, struct ahd_tmode_lstate **lstate, int notfound_failure) { if ((ahd->features & AHD_TARGETMODE) == 0) return (CAM_REQ_INVALID); /* * Handle the 'black hole' device that sucks up * requests to unattached luns on enabled targets. */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *tstate = NULL; *lstate = ahd->black_hole; } else { u_int max_id; max_id = (ahd->features & AHD_WIDE) ? 16 : 8; if (ccb->ccb_h.target_id >= max_id) return (CAM_TID_INVALID); if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS) return (CAM_LUN_INVALID); *tstate = ahd->enabled_targets[ccb->ccb_h.target_id]; *lstate = NULL; if (*tstate != NULL) *lstate = (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; } if (notfound_failure != 0 && *lstate == NULL) return (CAM_PATH_INVALID); return (CAM_REQ_CMP); } void ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) { #if NOT_YET struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_en_lun *cel; cam_status status; u_int target; u_int lun; u_int target_mask; u_long s; char channel; status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, /*notfound_failure*/FALSE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if ((ahd->features & AHD_MULTIROLE) != 0) { u_int our_id; our_id = ahd->our_id; if (ccb->ccb_h.target_id != our_id) { if ((ahd->features & AHD_MULTI_TID) != 0 && (ahd->flags & AHD_INITIATORROLE) != 0) { /* * Only allow additional targets if * the initiator role is disabled. * The hardware cannot handle a re-select-in * on the initiator id during a re-select-out * on a different target id. */ status = CAM_TID_INVALID; } else if ((ahd->flags & AHD_INITIATORROLE) != 0 || ahd->enabled_luns > 0) { /* * Only allow our target id to change * if the initiator role is not configured * and there are no enabled luns which * are attached to the currently registered * scsi id. */ status = CAM_TID_INVALID; } } } if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } /* * We now have an id that is valid. * If we aren't in target mode, switch modes. */ if ((ahd->flags & AHD_TARGETROLE) == 0 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { u_long s; printk("Configuring Target Mode\n"); ahd_lock(ahd, &s); if (LIST_FIRST(&ahd->pending_scbs) != NULL) { ccb->ccb_h.status = CAM_BUSY; ahd_unlock(ahd, &s); return; } ahd->flags |= AHD_TARGETROLE; if ((ahd->features & AHD_MULTIROLE) == 0) ahd->flags &= ~AHD_INITIATORROLE; ahd_pause(ahd); ahd_loadseq(ahd); ahd_restart(ahd); ahd_unlock(ahd, &s); } cel = &ccb->cel; target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; channel = SIM_CHANNEL(ahd, sim); target_mask = 0x01 << target; if (channel == 'B') target_mask <<= 8; if (cel->enable != 0) { u_int scsiseq1; /* Are we already enabled?? */ if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printk("Lun already enabled\n"); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { /* * Don't (yet?) support vendor * specific commands. */ ccb->ccb_h.status = CAM_REQ_INVALID; printk("Non-zero Group Codes\n"); return; } /* * Seems to be okay. * Setup our data structures. */ if (target != CAM_TARGET_WILDCARD && tstate == NULL) { tstate = ahd_alloc_tstate(ahd, target, channel); if (tstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate tstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } } lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { kfree(lstate); xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); ahd_lock(ahd, &s); ahd_pause(ahd); if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = lstate; ahd->enabled_luns++; if ((ahd->features & AHD_MULTI_TID) != 0) { u_int targid_mask; targid_mask = ahd_inw(ahd, TARGID); targid_mask |= target_mask; ahd_outw(ahd, TARGID, targid_mask); ahd_update_scsiid(ahd, targid_mask); } else { u_int our_id; char channel; channel = SIM_CHANNEL(ahd, sim); our_id = SIM_SCSI_ID(ahd, sim); /* * This can only happen if selections * are not enabled */ if (target != our_id) { u_int sblkctl; char cur_channel; int swap; sblkctl = ahd_inb(ahd, SBLKCTL); cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A'; if ((ahd->features & AHD_TWIN) == 0) cur_channel = 'A'; swap = cur_channel != channel; ahd->our_id = target; if (swap) ahd_outb(ahd, SBLKCTL, sblkctl ^ SELBUSB); ahd_outb(ahd, SCSIID, target); if (swap) ahd_outb(ahd, SBLKCTL, sblkctl); } } } else ahd->black_hole = lstate; /* Allow select-in operations */ if (ahd->black_hole != NULL && ahd->enabled_luns > 0) { scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); scsiseq1 |= ENSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); scsiseq1 = ahd_inb(ahd, SCSISEQ1); scsiseq1 |= ENSELI; ahd_outb(ahd, SCSISEQ1, scsiseq1); } ahd_unpause(ahd); ahd_unlock(ahd, &s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printk("Lun now enabled for target mode\n"); } else { struct scb *scb; int i, empty; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } ahd_lock(ahd, &s); ccb->ccb_h.status = CAM_REQ_CMP; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { struct ccb_hdr *ccbh; ccbh = &scb->io_ctx->ccb_h; if (ccbh->func_code == XPT_CONT_TARGET_IO && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ printk("CTIO pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; ahd_unlock(ahd, &s); return; } } if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printk("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printk("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { ahd_unlock(ahd, &s); return; } xpt_print_path(ccb->ccb_h.path); printk("Target mode disabled\n"); xpt_free_path(lstate->path); kfree(lstate); ahd_pause(ahd); /* Can we clean up the target too? */ if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = NULL; ahd->enabled_luns--; for (empty = 1, i = 0; i < 8; i++) if (tstate->enabled_luns[i] != NULL) { empty = 0; break; } if (empty) { ahd_free_tstate(ahd, target, channel, /*force*/FALSE); if (ahd->features & AHD_MULTI_TID) { u_int targid_mask; targid_mask = ahd_inw(ahd, TARGID); targid_mask &= ~target_mask; ahd_outw(ahd, TARGID, targid_mask); ahd_update_scsiid(ahd, targid_mask); } } } else { ahd->black_hole = NULL; /* * We can't allow selections without * our black hole device. */ empty = TRUE; } if (ahd->enabled_luns == 0) { /* Disallow select-in */ u_int scsiseq1; scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); scsiseq1 &= ~ENSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); scsiseq1 = ahd_inb(ahd, SCSISEQ1); scsiseq1 &= ~ENSELI; ahd_outb(ahd, SCSISEQ1, scsiseq1); if ((ahd->features & AHD_MULTIROLE) == 0) { printk("Configuring Initiator Mode\n"); ahd->flags &= ~AHD_TARGETROLE; ahd->flags |= AHD_INITIATORROLE; ahd_pause(ahd); ahd_loadseq(ahd); ahd_restart(ahd); /* * Unpaused. The extra unpause * that follows is harmless. */ } } ahd_unpause(ahd); ahd_unlock(ahd, &s); } #endif } static void ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask) { #if NOT_YET u_int scsiid_mask; u_int scsiid; if ((ahd->features & AHD_MULTI_TID) == 0) panic("ahd_update_scsiid called on non-multitid unit\n"); /* * Since we will rely on the TARGID mask * for selection enables, ensure that OID * in SCSIID is not set to some other ID * that we don't want to allow selections on. */ if ((ahd->features & AHD_ULTRA2) != 0) scsiid = ahd_inb(ahd, SCSIID_ULTRA2); else scsiid = ahd_inb(ahd, SCSIID); scsiid_mask = 0x1 << (scsiid & OID); if ((targid_mask & scsiid_mask) == 0) { u_int our_id; /* ffs counts from 1 */ our_id = ffs(targid_mask); if (our_id == 0) our_id = ahd->our_id; else our_id--; scsiid &= TID; scsiid |= our_id; } if ((ahd->features & AHD_ULTRA2) != 0) ahd_outb(ahd, SCSIID_ULTRA2, scsiid); else ahd_outb(ahd, SCSIID, scsiid); #endif } static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) { struct target_cmd *cmd; ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD); while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) { /* * Only advance through the queue if we * have the resources to process the command. */ if (ahd_handle_target_cmd(ahd, cmd) != 0) break; cmd->cmd_valid = 0; ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, ahd->tqinfifonext), sizeof(struct target_cmd), BUS_DMASYNC_PREREAD); ahd->tqinfifonext++; /* * Lazily update our position in the target mode incoming * command queue as seen by the sequencer. */ if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { u_int hs_mailbox; hs_mailbox = ahd_inb(ahd, HS_MAILBOX); hs_mailbox &= ~HOST_TQINPOS; hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS; ahd_outb(ahd, HS_MAILBOX, hs_mailbox); } } } static int ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd) { struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_accept_tio *atio; uint8_t *byte; int initiator; int target; int lun; initiator = SCSIID_TARGET(ahd, cmd->scsiid); target = SCSIID_OUR_ID(cmd->scsiid); lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); byte = cmd->bytes; tstate = ahd->enabled_targets[target]; lstate = NULL; if (tstate != NULL) lstate = tstate->enabled_luns[lun]; /* * Commands for disabled luns go to the black hole driver. */ if (lstate == NULL) lstate = ahd->black_hole; atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); if (atio == NULL) { ahd->flags |= AHD_TQINFIFO_BLOCKED; /* * Wait for more ATIOs from the peripheral driver for this lun. */ return (1); } else ahd->flags &= ~AHD_TQINFIFO_BLOCKED; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TQIN) != 0) printk("Incoming command from %d for %d:%d%s\n", initiator, target, lun, lstate == ahd->black_hole ? "(Black Holed)" : ""); #endif SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); if (lstate == ahd->black_hole) { /* Fill in the wildcards */ atio->ccb_h.target_id = target; atio->ccb_h.target_lun = lun; } /* * Package it up and send it off to * whomever has this lun enabled. */ atio->sense_len = 0; atio->init_id = initiator; if (byte[0] != 0xFF) { /* Tag was included */ atio->tag_action = *byte++; atio->tag_id = *byte++; atio->ccb_h.flags = CAM_TAG_ACTION_VALID; } else { atio->ccb_h.flags = 0; } byte++; /* Okay. Now determine the cdb size based on the command code */ switch (*byte >> CMD_GROUP_CODE_SHIFT) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printk("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { /* * We weren't allowed to disconnect. * We're hanging on the bus until a * continue target I/O comes in response * to this accept tio. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TQIN) != 0) printk("Received Immediate Command %d:%d:%d - %p\n", initiator, target, lun, ahd->pending_device); #endif ahd->pending_device = lstate; ahd_freeze_ccb((union ccb *)atio); atio->ccb_h.flags |= CAM_DIS_DISCONNECT; } xpt_done((union ccb*)atio); return (0); } #endif
linux-master
drivers/scsi/aic7xxx/aic79xx_core.c
/* * Product specific probe and attach routines for: * aic7901 and aic7902 SCSI controllers * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $ */ #include "aic79xx_osm.h" #include "aic79xx_inline.h" #include "aic79xx_pci.h" static inline uint64_t ahd_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor) { uint64_t id; id = subvendor | (subdevice << 16) | ((uint64_t)vendor << 32) | ((uint64_t)device << 48); return (id); } #define ID_AIC7902_PCI_REV_A4 0x3 #define ID_AIC7902_PCI_REV_B0 0x10 #define SUBID_HP 0x0E11 #define DEVID_9005_HOSTRAID(id) ((id) & 0x80) #define DEVID_9005_TYPE(id) ((id) & 0xF) #define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */ #define DEVID_9005_TYPE_HBA_2EXT 0x1 /* 2 External Ports */ #define DEVID_9005_TYPE_IROC 0x8 /* Raid(0,1,10) Card */ #define DEVID_9005_TYPE_MB 0xF /* On Motherboard */ #define DEVID_9005_MFUNC(id) ((id) & 0x10) #define DEVID_9005_PACKETIZED(id) ((id) & 0x8000) #define SUBID_9005_TYPE(id) ((id) & 0xF) #define SUBID_9005_TYPE_HBA 0x0 /* Standard Card */ #define SUBID_9005_TYPE_MB 0xF /* On Motherboard */ #define SUBID_9005_AUTOTERM(id) (((id) & 0x10) == 0) #define SUBID_9005_LEGACYCONN_FUNC(id) ((id) & 0x20) #define SUBID_9005_SEEPTYPE(id) (((id) & 0x0C0) >> 6) #define SUBID_9005_SEEPTYPE_NONE 0x0 #define SUBID_9005_SEEPTYPE_4K 0x1 static ahd_device_setup_t ahd_aic7901_setup; static ahd_device_setup_t ahd_aic7901A_setup; static ahd_device_setup_t ahd_aic7902_setup; static ahd_device_setup_t ahd_aic790X_setup; static const struct ahd_pci_identity ahd_pci_ident_table[] = { /* aic7901 based controllers */ { ID_AHA_29320A, ID_ALL_MASK, "Adaptec 29320A Ultra320 SCSI adapter", ahd_aic7901_setup }, { ID_AHA_29320ALP, ID_ALL_MASK, "Adaptec 29320ALP PCIx Ultra320 SCSI adapter", ahd_aic7901_setup }, { ID_AHA_29320LPE, ID_ALL_MASK, "Adaptec 29320LPE PCIe Ultra320 SCSI adapter", ahd_aic7901_setup }, /* aic7901A based controllers */ { ID_AHA_29320LP, ID_ALL_MASK, "Adaptec 29320LP Ultra320 SCSI adapter", ahd_aic7901A_setup }, /* aic7902 based controllers */ { ID_AHA_29320, ID_ALL_MASK, "Adaptec 29320 Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_29320B, ID_ALL_MASK, "Adaptec 29320B Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320, ID_ALL_MASK, "Adaptec 39320 Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320_B, ID_ALL_MASK, "Adaptec 39320 Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320_B_DELL, ID_ALL_MASK, "Adaptec (Dell OEM) 39320 Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320A, ID_ALL_MASK, "Adaptec 39320A Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320D, ID_ALL_MASK, "Adaptec 39320D Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320D_HP, ID_ALL_MASK, "Adaptec (HP OEM) 39320D Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320D_B, ID_ALL_MASK, "Adaptec 39320D Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320D_B_HP, ID_ALL_MASK, "Adaptec (HP OEM) 39320D Ultra320 SCSI adapter", ahd_aic7902_setup }, /* Generic chip probes for devices we don't know 'exactly' */ { ID_AIC7901 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec AIC7901 Ultra320 SCSI adapter", ahd_aic7901_setup }, { ID_AIC7901A & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec AIC7901A Ultra320 SCSI adapter", ahd_aic7901A_setup }, { ID_AIC7902 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec AIC7902 Ultra320 SCSI adapter", ahd_aic7902_setup } }; static const u_int ahd_num_pci_devs = ARRAY_SIZE(ahd_pci_ident_table); #define DEVCONFIG 0x40 #define PCIXINITPAT 0x0000E000ul #define PCIXINIT_PCI33_66 0x0000E000ul #define PCIXINIT_PCIX50_66 0x0000C000ul #define PCIXINIT_PCIX66_100 0x0000A000ul #define PCIXINIT_PCIX100_133 0x00008000ul #define PCI_BUS_MODES_INDEX(devconfig) \ (((devconfig) & PCIXINITPAT) >> 13) static const char *pci_bus_modes[] = { "PCI bus mode unknown", "PCI bus mode unknown", "PCI bus mode unknown", "PCI bus mode unknown", "PCI-X 101-133MHz", "PCI-X 67-100MHz", "PCI-X 50-66MHz", "PCI 33 or 66MHz" }; #define TESTMODE 0x00000800ul #define IRDY_RST 0x00000200ul #define FRAME_RST 0x00000100ul #define PCI64BIT 0x00000080ul #define MRDCEN 0x00000040ul #define ENDIANSEL 0x00000020ul #define MIXQWENDIANEN 0x00000008ul #define DACEN 0x00000004ul #define STPWLEVEL 0x00000002ul #define QWENDIANSEL 0x00000001ul #define DEVCONFIG1 0x44 #define PREQDIS 0x01 #define CSIZE_LATTIME 0x0c #define CACHESIZE 0x000000fful #define LATTIME 0x0000ff00ul static int ahd_check_extport(struct ahd_softc *ahd); static void ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control); static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat); static void ahd_pci_intr(struct ahd_softc *ahd); const struct ahd_pci_identity * ahd_find_pci_device(ahd_dev_softc_t pci) { uint64_t full_id; uint16_t device; uint16_t vendor; uint16_t subdevice; uint16_t subvendor; const struct ahd_pci_identity *entry; u_int i; vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); subvendor = ahd_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2); subdevice = ahd_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2); full_id = ahd_compose_id(device, vendor, subdevice, subvendor); /* * Controllers, mask out the IROC/HostRAID bit */ full_id &= ID_ALL_IROC_MASK; for (i = 0; i < ahd_num_pci_devs; i++) { entry = &ahd_pci_ident_table[i]; if (entry->full_id == (full_id & entry->id_mask)) { /* Honor exclusion entries. */ if (entry->name == NULL) return (NULL); return (entry); } } return (NULL); } int ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry) { u_int command; uint32_t devconfig; uint16_t subvendor; int error; ahd->description = entry->name; /* * Record if this is an HP board. */ subvendor = ahd_pci_read_config(ahd->dev_softc, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2); if (subvendor == SUBID_HP) ahd->flags |= AHD_HP_BOARD; error = entry->setup(ahd); if (error != 0) return (error); devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); if ((devconfig & PCIXINITPAT) == PCIXINIT_PCI33_66) { ahd->chip |= AHD_PCI; /* Disable PCIX workarounds when running in PCI mode. */ ahd->bugs &= ~AHD_PCIX_BUG_MASK; } else { ahd->chip |= AHD_PCIX; } ahd->bus_description = pci_bus_modes[PCI_BUS_MODES_INDEX(devconfig)]; ahd_power_state_change(ahd, AHD_POWER_STATE_D0); error = ahd_pci_map_registers(ahd); if (error != 0) return (error); /* * If we need to support high memory, enable dual * address cycles. This bit must be set to enable * high address bit generation even if we are on a * 64bit bus (PCI64BIT set in devconfig). */ if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) { if (bootverbose) printk("%s: Enabling 39Bit Addressing\n", ahd_name(ahd)); devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); devconfig |= DACEN; ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); } /* Ensure busmastering is enabled */ command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); command |= PCIM_CMD_BUSMASTEREN; ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, /*bytes*/2); error = ahd_softc_init(ahd); if (error != 0) return (error); ahd->bus_intr = ahd_pci_intr; error = ahd_reset(ahd, /*reinit*/FALSE); if (error != 0) return (ENXIO); ahd->pci_cachesize = ahd_pci_read_config(ahd->dev_softc, CSIZE_LATTIME, /*bytes*/1) & CACHESIZE; ahd->pci_cachesize *= 4; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* See if we have a SEEPROM and perform auto-term */ error = ahd_check_extport(ahd); if (error != 0) return (error); /* Core initialization */ error = ahd_init(ahd); if (error != 0) return (error); ahd->init_level++; /* * Allow interrupts now that we are completely setup. */ return ahd_pci_map_int(ahd); } void __maybe_unused ahd_pci_suspend(struct ahd_softc *ahd) { /* * Save chip register configuration data for chip resets * that occur during runtime and resume events. */ ahd->suspend_state.pci_state.devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); ahd->suspend_state.pci_state.command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/1); ahd->suspend_state.pci_state.csize_lattime = ahd_pci_read_config(ahd->dev_softc, CSIZE_LATTIME, /*bytes*/1); } void __maybe_unused ahd_pci_resume(struct ahd_softc *ahd) { ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, ahd->suspend_state.pci_state.devconfig, /*bytes*/4); ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, ahd->suspend_state.pci_state.command, /*bytes*/1); ahd_pci_write_config(ahd->dev_softc, CSIZE_LATTIME, ahd->suspend_state.pci_state.csize_lattime, /*bytes*/1); } /* * Perform some simple tests that should catch situations where * our registers are invalidly mapped. */ int ahd_pci_test_register_access(struct ahd_softc *ahd) { uint32_t cmd; u_int targpcistat; u_int pci_status1; int error; uint8_t hcntrl; error = EIO; /* * Enable PCI error interrupt status, but suppress NMIs * generated by SERR raised due to target aborts. */ cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2); /* * First a simple test to see if any * registers can be read. Reading * HCNTRL has no side effects and has * at least one bit that is guaranteed to * be zero so it is a good register to * use for this test. */ hcntrl = ahd_inb(ahd, HCNTRL); if (hcntrl == 0xFF) goto fail; /* * Next create a situation where write combining * or read prefetching could be initiated by the * CPU or host bridge. Our device does not support * either, so look for data corruption and/or flaged * PCI errors. First pause without causing another * chip reset. */ hcntrl &= ~CHIPRST; ahd_outb(ahd, HCNTRL, hcntrl|PAUSE); while (ahd_is_paused(ahd) == 0) ; /* Clear any PCI errors that occurred before our driver attached. */ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); targpcistat = ahd_inb(ahd, TARGPCISTAT); ahd_outb(ahd, TARGPCISTAT, targpcistat); pci_status1 = ahd_pci_read_config(ahd->dev_softc, PCIR_STATUS + 1, /*bytes*/1); ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, pci_status1, /*bytes*/1); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRINT, CLRPCIINT); ahd_outb(ahd, SEQCTL0, PERRORDIS); ahd_outl(ahd, SRAM_BASE, 0x5aa555aa); if (ahd_inl(ahd, SRAM_BASE) != 0x5aa555aa) goto fail; if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) { ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); targpcistat = ahd_inb(ahd, TARGPCISTAT); if ((targpcistat & STA) != 0) goto fail; } error = 0; fail: if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) { ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); targpcistat = ahd_inb(ahd, TARGPCISTAT); /* Silently clear any latched errors. */ ahd_outb(ahd, TARGPCISTAT, targpcistat); pci_status1 = ahd_pci_read_config(ahd->dev_softc, PCIR_STATUS + 1, /*bytes*/1); ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, pci_status1, /*bytes*/1); ahd_outb(ahd, CLRINT, CLRPCIINT); } ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS); ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); return (error); } /* * Check the external port logic for a serial eeprom * and termination/cable detection contrls. */ static int ahd_check_extport(struct ahd_softc *ahd) { struct vpd_config vpd; struct seeprom_config *sc; u_int adapter_control; int have_seeprom; int error; sc = ahd->seep_config; have_seeprom = ahd_acquire_seeprom(ahd); if (have_seeprom) { u_int start_addr; /* * Fetch VPD for this function and parse it. */ if (bootverbose) printk("%s: Reading VPD from SEEPROM...", ahd_name(ahd)); /* Address is always in units of 16bit words */ start_addr = ((2 * sizeof(*sc)) + (sizeof(vpd) * (ahd->channel - 'A'))) / 2; error = ahd_read_seeprom(ahd, (uint16_t *)&vpd, start_addr, sizeof(vpd)/2, /*bytestream*/TRUE); if (error == 0) error = ahd_parse_vpddata(ahd, &vpd); if (bootverbose) printk("%s: VPD parsing %s\n", ahd_name(ahd), error == 0 ? "successful" : "failed"); if (bootverbose) printk("%s: Reading SEEPROM...", ahd_name(ahd)); /* Address is always in units of 16bit words */ start_addr = (sizeof(*sc) / 2) * (ahd->channel - 'A'); error = ahd_read_seeprom(ahd, (uint16_t *)sc, start_addr, sizeof(*sc)/2, /*bytestream*/FALSE); if (error != 0) { printk("Unable to read SEEPROM\n"); have_seeprom = 0; } else { have_seeprom = ahd_verify_cksum(sc); if (bootverbose) { if (have_seeprom == 0) printk ("checksum error\n"); else printk ("done.\n"); } } ahd_release_seeprom(ahd); } if (!have_seeprom) { u_int nvram_scb; /* * Pull scratch ram settings and treat them as * if they are the contents of an seeprom if * the 'ADPT', 'BIOS', or 'ASPI' signature is found * in SCB 0xFF. We manually compose the data as 16bit * values to avoid endian issues. */ ahd_set_scbptr(ahd, 0xFF); nvram_scb = ahd_inb_scbram(ahd, SCB_BASE + NVRAM_SCB_OFFSET); if (nvram_scb != 0xFF && ((ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A' && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'D' && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P' && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'T') || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'B' && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'I' && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'O' && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'S') || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A' && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'S' && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P' && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'I'))) { uint16_t *sc_data; int i; ahd_set_scbptr(ahd, nvram_scb); sc_data = (uint16_t *)sc; for (i = 0; i < 64; i += 2) *sc_data++ = ahd_inw_scbram(ahd, SCB_BASE+i); have_seeprom = ahd_verify_cksum(sc); if (have_seeprom) ahd->flags |= AHD_SCB_CONFIG_USED; } } #ifdef AHD_DEBUG if (have_seeprom != 0 && (ahd_debug & AHD_DUMP_SEEPROM) != 0) { uint16_t *sc_data; int i; printk("%s: Seeprom Contents:", ahd_name(ahd)); sc_data = (uint16_t *)sc; for (i = 0; i < (sizeof(*sc)); i += 2) printk("\n\t0x%.4x", sc_data[i]); printk("\n"); } #endif if (!have_seeprom) { if (bootverbose) printk("%s: No SEEPROM available.\n", ahd_name(ahd)); ahd->flags |= AHD_USEDEFAULTS; error = ahd_default_config(ahd); adapter_control = CFAUTOTERM|CFSEAUTOTERM; kfree(ahd->seep_config); ahd->seep_config = NULL; } else { error = ahd_parse_cfgdata(ahd, sc); adapter_control = sc->adapter_control; } if (error != 0) return (error); ahd_configure_termination(ahd, adapter_control); return (0); } static void ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control) { int error; u_int sxfrctl1; uint8_t termctl; uint32_t devconfig; devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); devconfig &= ~STPWLEVEL; if ((ahd->flags & AHD_STPWLEVEL_A) != 0) devconfig |= STPWLEVEL; if (bootverbose) printk("%s: STPWLEVEL is %s\n", ahd_name(ahd), (devconfig & STPWLEVEL) ? "on" : "off"); ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); /* Make sure current sensing is off. */ if ((ahd->flags & AHD_CURRENT_SENSING) != 0) { (void)ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); } /* * Read to sense. Write to set. */ error = ahd_read_flexport(ahd, FLXADDR_TERMCTL, &termctl); if ((adapter_control & CFAUTOTERM) == 0) { if (bootverbose) printk("%s: Manual Primary Termination\n", ahd_name(ahd)); termctl &= ~(FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH); if ((adapter_control & CFSTERM) != 0) termctl |= FLX_TERMCTL_ENPRILOW; if ((adapter_control & CFWSTERM) != 0) termctl |= FLX_TERMCTL_ENPRIHIGH; } else if (error != 0) { printk("%s: Primary Auto-Term Sensing failed! " "Using Defaults.\n", ahd_name(ahd)); termctl = FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH; } if ((adapter_control & CFSEAUTOTERM) == 0) { if (bootverbose) printk("%s: Manual Secondary Termination\n", ahd_name(ahd)); termctl &= ~(FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH); if ((adapter_control & CFSELOWTERM) != 0) termctl |= FLX_TERMCTL_ENSECLOW; if ((adapter_control & CFSEHIGHTERM) != 0) termctl |= FLX_TERMCTL_ENSECHIGH; } else if (error != 0) { printk("%s: Secondary Auto-Term Sensing failed! " "Using Defaults.\n", ahd_name(ahd)); termctl |= FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH; } /* * Now set the termination based on what we found. */ sxfrctl1 = ahd_inb(ahd, SXFRCTL1) & ~STPWEN; ahd->flags &= ~AHD_TERM_ENB_A; if ((termctl & FLX_TERMCTL_ENPRILOW) != 0) { ahd->flags |= AHD_TERM_ENB_A; sxfrctl1 |= STPWEN; } /* Must set the latch once in order to be effective. */ ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); ahd_outb(ahd, SXFRCTL1, sxfrctl1); error = ahd_write_flexport(ahd, FLXADDR_TERMCTL, termctl); if (error != 0) { printk("%s: Unable to set termination settings!\n", ahd_name(ahd)); } else if (bootverbose) { printk("%s: Primary High byte termination %sabled\n", ahd_name(ahd), (termctl & FLX_TERMCTL_ENPRIHIGH) ? "En" : "Dis"); printk("%s: Primary Low byte termination %sabled\n", ahd_name(ahd), (termctl & FLX_TERMCTL_ENPRILOW) ? "En" : "Dis"); printk("%s: Secondary High byte termination %sabled\n", ahd_name(ahd), (termctl & FLX_TERMCTL_ENSECHIGH) ? "En" : "Dis"); printk("%s: Secondary Low byte termination %sabled\n", ahd_name(ahd), (termctl & FLX_TERMCTL_ENSECLOW) ? "En" : "Dis"); } return; } #define DPE 0x80 #define SSE 0x40 #define RMA 0x20 #define RTA 0x10 #define STA 0x08 #define DPR 0x01 static const char *split_status_source[] = { "DFF0", "DFF1", "OVLY", "CMC", }; static const char *pci_status_source[] = { "DFF0", "DFF1", "SG", "CMC", "OVLY", "NONE", "MSI", "TARG" }; static const char *split_status_strings[] = { "%s: Received split response in %s.\n", "%s: Received split completion error message in %s\n", "%s: Receive overrun in %s\n", "%s: Count not complete in %s\n", "%s: Split completion data bucket in %s\n", "%s: Split completion address error in %s\n", "%s: Split completion byte count error in %s\n", "%s: Signaled Target-abort to early terminate a split in %s\n" }; static const char *pci_status_strings[] = { "%s: Data Parity Error has been reported via PERR# in %s\n", "%s: Target initial wait state error in %s\n", "%s: Split completion read data parity error in %s\n", "%s: Split completion address attribute parity error in %s\n", "%s: Received a Target Abort in %s\n", "%s: Received a Master Abort in %s\n", "%s: Signal System Error Detected in %s\n", "%s: Address or Write Phase Parity Error Detected in %s.\n" }; static void ahd_pci_intr(struct ahd_softc *ahd) { uint8_t pci_status[8]; ahd_mode_state saved_modes; u_int pci_status1; u_int intstat; u_int i; u_int reg; intstat = ahd_inb(ahd, INTSTAT); if ((intstat & SPLTINT) != 0) ahd_pci_split_intr(ahd, intstat); if ((intstat & PCIINT) == 0) return; printk("%s: PCI error Interrupt\n", ahd_name(ahd)); saved_modes = ahd_save_modes(ahd); ahd_dump_card_state(ahd); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); for (i = 0, reg = DF0PCISTAT; i < 8; i++, reg++) { if (i == 5) continue; pci_status[i] = ahd_inb(ahd, reg); /* Clear latched errors. So our interrupt deasserts. */ ahd_outb(ahd, reg, pci_status[i]); } for (i = 0; i < 8; i++) { u_int bit; if (i == 5) continue; for (bit = 0; bit < 8; bit++) { if ((pci_status[i] & (0x1 << bit)) != 0) { const char *s; s = pci_status_strings[bit]; if (i == 7/*TARG*/ && bit == 3) s = "%s: Signaled Target Abort\n"; printk(s, ahd_name(ahd), pci_status_source[i]); } } } pci_status1 = ahd_pci_read_config(ahd->dev_softc, PCIR_STATUS + 1, /*bytes*/1); ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, pci_status1, /*bytes*/1); ahd_restore_modes(ahd, saved_modes); ahd_outb(ahd, CLRINT, CLRPCIINT); ahd_unpause(ahd); } static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat) { uint8_t split_status[4]; uint8_t split_status1[4]; uint8_t sg_split_status[2]; uint8_t sg_split_status1[2]; ahd_mode_state saved_modes; u_int i; uint16_t pcix_status; /* * Check for splits in all modes. Modes 0 and 1 * additionally have SG engine splits to look at. */ pcix_status = ahd_pci_read_config(ahd->dev_softc, PCIXR_STATUS, /*bytes*/2); printk("%s: PCI Split Interrupt - PCI-X status = 0x%x\n", ahd_name(ahd), pcix_status); saved_modes = ahd_save_modes(ahd); for (i = 0; i < 4; i++) { ahd_set_modes(ahd, i, i); split_status[i] = ahd_inb(ahd, DCHSPLTSTAT0); split_status1[i] = ahd_inb(ahd, DCHSPLTSTAT1); /* Clear latched errors. So our interrupt deasserts. */ ahd_outb(ahd, DCHSPLTSTAT0, split_status[i]); ahd_outb(ahd, DCHSPLTSTAT1, split_status1[i]); if (i > 1) continue; sg_split_status[i] = ahd_inb(ahd, SGSPLTSTAT0); sg_split_status1[i] = ahd_inb(ahd, SGSPLTSTAT1); /* Clear latched errors. So our interrupt deasserts. */ ahd_outb(ahd, SGSPLTSTAT0, sg_split_status[i]); ahd_outb(ahd, SGSPLTSTAT1, sg_split_status1[i]); } for (i = 0; i < 4; i++) { u_int bit; for (bit = 0; bit < 8; bit++) { if ((split_status[i] & (0x1 << bit)) != 0) printk(split_status_strings[bit], ahd_name(ahd), split_status_source[i]); if (i > 1) continue; if ((sg_split_status[i] & (0x1 << bit)) != 0) printk(split_status_strings[bit], ahd_name(ahd), "SG"); } } /* * Clear PCI-X status bits. */ ahd_pci_write_config(ahd->dev_softc, PCIXR_STATUS, pcix_status, /*bytes*/2); ahd_outb(ahd, CLRINT, CLRSPLTINT); ahd_restore_modes(ahd, saved_modes); } static int ahd_aic7901_setup(struct ahd_softc *ahd) { ahd->chip = AHD_AIC7901; ahd->features = AHD_AIC7901_FE; return (ahd_aic790X_setup(ahd)); } static int ahd_aic7901A_setup(struct ahd_softc *ahd) { ahd->chip = AHD_AIC7901A; ahd->features = AHD_AIC7901A_FE; return (ahd_aic790X_setup(ahd)); } static int ahd_aic7902_setup(struct ahd_softc *ahd) { ahd->chip = AHD_AIC7902; ahd->features = AHD_AIC7902_FE; return (ahd_aic790X_setup(ahd)); } static int ahd_aic790X_setup(struct ahd_softc *ahd) { ahd_dev_softc_t pci; u_int rev; pci = ahd->dev_softc; rev = ahd_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev < ID_AIC7902_PCI_REV_A4) { printk("%s: Unable to attach to unsupported chip revision %d\n", ahd_name(ahd), rev); ahd_pci_write_config(pci, PCIR_COMMAND, 0, /*bytes*/2); return (ENXIO); } ahd->channel = ahd_get_pci_function(pci) + 'A'; if (rev < ID_AIC7902_PCI_REV_B0) { /* * Enable A series workarounds. */ ahd->bugs |= AHD_SENT_SCB_UPDATE_BUG|AHD_ABORT_LQI_BUG | AHD_PKT_BITBUCKET_BUG|AHD_LONG_SETIMO_BUG | AHD_NLQICRC_DELAYED_BUG|AHD_SCSIRST_BUG | AHD_LQO_ATNO_BUG|AHD_AUTOFLUSH_BUG | AHD_CLRLQO_AUTOCLR_BUG|AHD_PCIX_MMAPIO_BUG | AHD_PCIX_CHIPRST_BUG|AHD_PCIX_SCBRAM_RD_BUG | AHD_PKTIZED_STATUS_BUG|AHD_PKT_LUN_BUG | AHD_MDFF_WSCBPTR_BUG|AHD_REG_SLOW_SETTLE_BUG | AHD_SET_MODE_BUG|AHD_BUSFREEREV_BUG | AHD_NONPACKFIFO_BUG|AHD_PACED_NEGTABLE_BUG | AHD_FAINT_LED_BUG; /* * IO Cell parameter setup. */ AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29); if ((ahd->flags & AHD_HP_BOARD) == 0) AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVA); } else { /* This is revision B and newer. */ extern uint32_t aic79xx_slowcrc; u_int devconfig1; ahd->features |= AHD_RTI|AHD_NEW_IOCELL_OPTS | AHD_NEW_DFCNTRL_OPTS|AHD_FAST_CDB_DELIVERY | AHD_BUSFREEREV_BUG; ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG; /* If the user requested that the SLOWCRC bit to be set. */ if (aic79xx_slowcrc) ahd->features |= AHD_AIC79XXB_SLOWCRC; /* * Some issues have been resolved in the 7901B. */ if ((ahd->features & AHD_MULTI_FUNC) != 0) ahd->bugs |= AHD_INTCOLLISION_BUG|AHD_ABORT_LQI_BUG; /* * IO Cell parameter setup. */ AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29); AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVB); AHD_SET_AMPLITUDE(ahd, AHD_AMPLITUDE_DEF); /* * Set the PREQDIS bit for H2B which disables some workaround * that doesn't work on regular PCI busses. * XXX - Find out exactly what this does from the hardware * folks! */ devconfig1 = ahd_pci_read_config(pci, DEVCONFIG1, /*bytes*/1); ahd_pci_write_config(pci, DEVCONFIG1, devconfig1|PREQDIS, /*bytes*/1); devconfig1 = ahd_pci_read_config(pci, DEVCONFIG1, /*bytes*/1); } return (0); }
linux-master
drivers/scsi/aic7xxx/aic79xx_pci.c
/* * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * String handling code courtesy of Gerard Roudier's <[email protected]> * sym driver. * * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_proc.c#19 $ */ #include "aic79xx_osm.h" #include "aic79xx_inline.h" static void ahd_dump_target_state(struct ahd_softc *ahd, struct seq_file *m, u_int our_id, char channel, u_int target_id); static void ahd_dump_device_state(struct seq_file *m, struct scsi_device *sdev); /* * Table of syncrates that don't follow the "divisible by 4" * rule. This table will be expanded in future SCSI specs. */ static const struct { u_int period_factor; u_int period; /* in 100ths of ns */ } scsi_syncrates[] = { { 0x08, 625 }, /* FAST-160 */ { 0x09, 1250 }, /* FAST-80 */ { 0x0a, 2500 }, /* FAST-40 40MHz */ { 0x0b, 3030 }, /* FAST-40 33MHz */ { 0x0c, 5000 } /* FAST-20 */ }; /* * Return the frequency in kHz corresponding to the given * sync period factor. */ static u_int ahd_calc_syncsrate(u_int period_factor) { int i; /* See if the period is in the "exception" table */ for (i = 0; i < ARRAY_SIZE(scsi_syncrates); i++) { if (period_factor == scsi_syncrates[i].period_factor) { /* Period in kHz */ return (100000000 / scsi_syncrates[i].period); } } /* * Wasn't in the table, so use the standard * 4 times conversion. */ return (10000000 / (period_factor * 4 * 10)); } static void ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo) { u_int speed; u_int freq; u_int mb; if (tinfo->period == AHD_PERIOD_UNKNOWN) { seq_puts(m, "Renegotiation Pending\n"); return; } speed = 3300; freq = 0; if (tinfo->offset != 0) { freq = ahd_calc_syncsrate(tinfo->period); speed = freq; } speed *= (0x01 << tinfo->width); mb = speed / 1000; if (mb > 0) seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000); else seq_printf(m, "%dKB/s transfers", speed); if (freq != 0) { int printed_options; printed_options = 0; seq_printf(m, " (%d.%03dMHz", freq / 1000, freq % 1000); if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { seq_puts(m, " RDSTRM"); printed_options++; } if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { seq_puts(m, printed_options ? "|DT" : " DT"); printed_options++; } if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { seq_puts(m, printed_options ? "|IU" : " IU"); printed_options++; } if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) { seq_puts(m, printed_options ? "|RTI" : " RTI"); printed_options++; } if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { seq_puts(m, printed_options ? "|QAS" : " QAS"); printed_options++; } } if (tinfo->width > 0) { if (freq != 0) { seq_puts(m, ", "); } else { seq_puts(m, " ("); } seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); } else if (freq != 0) { seq_putc(m, ')'); } seq_putc(m, '\n'); } static void ahd_dump_target_state(struct ahd_softc *ahd, struct seq_file *m, u_int our_id, char channel, u_int target_id) { struct scsi_target *starget; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int lun; tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id, &tstate); seq_printf(m, "Target %d Negotiation Settings\n", target_id); seq_puts(m, "\tUser: "); ahd_format_transinfo(m, &tinfo->user); starget = ahd->platform_data->starget[target_id]; if (starget == NULL) return; seq_puts(m, "\tGoal: "); ahd_format_transinfo(m, &tinfo->goal); seq_puts(m, "\tCurr: "); ahd_format_transinfo(m, &tinfo->curr); for (lun = 0; lun < AHD_NUM_LUNS; lun++) { struct scsi_device *dev; dev = scsi_device_lookup_by_target(starget, lun); if (dev == NULL) continue; ahd_dump_device_state(m, dev); } } static void ahd_dump_device_state(struct seq_file *m, struct scsi_device *sdev) { struct ahd_linux_device *dev = scsi_transport_device_data(sdev); seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n", sdev->sdev_target->channel + 'A', sdev->sdev_target->id, (u8)sdev->lun); seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued); seq_printf(m, "\t\tCommands Active %d\n", dev->active); seq_printf(m, "\t\tCommand Openings %d\n", dev->openings); seq_printf(m, "\t\tMax Tagged Openings %d\n", dev->maxtags); seq_printf(m, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen); } int ahd_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length) { struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata; ahd_mode_state saved_modes; int have_seeprom; u_long s; int paused; int written; /* Default to failure. */ written = -EINVAL; ahd_lock(ahd, &s); paused = ahd_is_paused(ahd); if (!paused) ahd_pause(ahd); saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if (length != sizeof(struct seeprom_config)) { printk("ahd_proc_write_seeprom: incorrect buffer size\n"); goto done; } have_seeprom = ahd_verify_cksum((struct seeprom_config*)buffer); if (have_seeprom == 0) { printk("ahd_proc_write_seeprom: cksum verification failed\n"); goto done; } have_seeprom = ahd_acquire_seeprom(ahd); if (!have_seeprom) { printk("ahd_proc_write_seeprom: No Serial EEPROM\n"); goto done; } else { u_int start_addr; if (ahd->seep_config == NULL) { ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC); if (ahd->seep_config == NULL) { printk("aic79xx: Unable to allocate serial " "eeprom buffer. Write failing\n"); goto done; } } printk("aic79xx: Writing Serial EEPROM\n"); start_addr = 32 * (ahd->channel - 'A'); ahd_write_seeprom(ahd, (u_int16_t *)buffer, start_addr, sizeof(struct seeprom_config)/2); ahd_read_seeprom(ahd, (uint16_t *)ahd->seep_config, start_addr, sizeof(struct seeprom_config)/2, /*ByteStream*/FALSE); ahd_release_seeprom(ahd); written = length; } done: ahd_restore_modes(ahd, saved_modes); if (!paused) ahd_unpause(ahd); ahd_unlock(ahd, &s); return (written); } /* * Return information to handle /proc support for the driver. */ int ahd_linux_show_info(struct seq_file *m, struct Scsi_Host *shost) { struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata; char ahd_info[256]; u_int max_targ; u_int i; seq_printf(m, "Adaptec AIC79xx driver version: %s\n", AIC79XX_DRIVER_VERSION); seq_printf(m, "%s\n", ahd->description); ahd_controller_info(ahd, ahd_info); seq_printf(m, "%s\n", ahd_info); seq_printf(m, "Allocated SCBs: %d, SG List Length: %d\n\n", ahd->scb_data.numscbs, AHD_NSEG); max_targ = 16; if (ahd->seep_config == NULL) seq_puts(m, "No Serial EEPROM\n"); else { seq_puts(m, "Serial EEPROM:\n"); for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) { if (((i % 8) == 0) && (i != 0)) { seq_putc(m, '\n'); } seq_printf(m, "0x%.4x ", ((uint16_t*)ahd->seep_config)[i]); } seq_putc(m, '\n'); } seq_putc(m, '\n'); if ((ahd->features & AHD_WIDE) == 0) max_targ = 8; for (i = 0; i < max_targ; i++) { ahd_dump_target_state(ahd, m, ahd->our_id, 'A', /*target_id*/i); } return 0; }
linux-master
drivers/scsi/aic7xxx/aic79xx_proc.c
/* * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $ */ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aicasm/aicasm_insformat.h" /***************************** Lookup Tables **********************************/ static const char *const ahc_chip_names[] = { "NONE", "aic7770", "aic7850", "aic7855", "aic7859", "aic7860", "aic7870", "aic7880", "aic7895", "aic7895C", "aic7890/91", "aic7896/97", "aic7892", "aic7899" }; /* * Hardware error codes. */ struct ahc_hard_error_entry { uint8_t errno; const char *errmesg; }; static const struct ahc_hard_error_entry ahc_hard_errors[] = { { ILLHADDR, "Illegal Host Access" }, { ILLSADDR, "Illegal Sequencer Address referenced" }, { ILLOPCODE, "Illegal Opcode in sequencer program" }, { SQPARERR, "Sequencer Parity Error" }, { DPARERR, "Data-path Parity Error" }, { MPARERR, "Scratch or SCB Memory Parity Error" }, { PCIERRSTAT, "PCI Error detected" }, { CIOPARERR, "CIOBUS Parity Error" }, }; static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors); static const struct ahc_phase_table_entry ahc_phase_table[] = { { P_DATAOUT, NOP, "in Data-out phase" }, { P_DATAIN, INITIATOR_ERROR, "in Data-in phase" }, { P_DATAOUT_DT, NOP, "in DT Data-out phase" }, { P_DATAIN_DT, INITIATOR_ERROR, "in DT Data-in phase" }, { P_COMMAND, NOP, "in Command phase" }, { P_MESGOUT, NOP, "in Message-out phase" }, { P_STATUS, INITIATOR_ERROR, "in Status phase" }, { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, { P_BUSFREE, NOP, "while idle" }, { 0, NOP, "in unknown phase" } }; /* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count. */ static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1; /* * Valid SCSIRATE values. (p. 3-17) * Provides a mapping of tranfer periods in ns to the proper value to * stick in the scsixfer reg. */ static const struct ahc_syncrate ahc_syncrates[] = { /* ultra2 fast/ultra period rate */ { 0x42, 0x000, 9, "80.0" }, { 0x03, 0x000, 10, "40.0" }, { 0x04, 0x000, 11, "33.0" }, { 0x05, 0x100, 12, "20.0" }, { 0x06, 0x110, 15, "16.0" }, { 0x07, 0x120, 18, "13.4" }, { 0x08, 0x000, 25, "10.0" }, { 0x19, 0x010, 31, "8.0" }, { 0x1a, 0x020, 37, "6.67" }, { 0x1b, 0x030, 43, "5.7" }, { 0x1c, 0x040, 50, "5.0" }, { 0x00, 0x050, 56, "4.4" }, { 0x00, 0x060, 62, "4.0" }, { 0x00, 0x070, 68, "3.6" }, { 0x00, 0x000, 0, NULL } }; /* Our Sequencer Program */ #include "aic7xxx_seq.h" /**************************** Function Declarations ***************************/ static void ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static struct ahc_tmode_tstate* ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel); #ifdef AHC_TARGET_MODE static void ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force); #endif static const struct ahc_syncrate* ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *, u_int *period, u_int *ppr_options, role_t role); static void ahc_update_pending_scbs(struct ahc_softc *ahc); static void ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); static void ahc_assert_atn(struct ahc_softc *ahc); static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); static void ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset); static void ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int bus_width); static void ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options); static void ahc_clear_msg_state(struct ahc_softc *ahc); static void ahc_handle_proto_violation(struct ahc_softc *ahc); static void ahc_handle_message_phase(struct ahc_softc *ahc); typedef enum { AHCMSG_1B, AHCMSG_2B, AHCMSG_EXT } ahc_msgtype; static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full); static int ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static int ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); static void ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, cam_status status, char *message, int verbose_level); #ifdef AHC_TARGET_MODE static void ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); #endif static bus_dmamap_callback_t ahc_dmamap_cb; static void ahc_build_free_scb_list(struct ahc_softc *ahc); static int ahc_init_scbdata(struct ahc_softc *ahc); static void ahc_fini_scbdata(struct ahc_softc *ahc); static void ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, struct scb *scb); static int ahc_qinfifo_count(struct ahc_softc *ahc); static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr); static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); static u_int ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev); static void ahc_reset_current_bus(struct ahc_softc *ahc); #ifdef AHC_DUMP_SEQ static void ahc_dumpseq(struct ahc_softc *ahc); #endif static int ahc_loadseq(struct ahc_softc *ahc); static int ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch, u_int start_instr, u_int *skip_addr); static void ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts); #ifdef AHC_TARGET_MODE static void ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg); static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask); static int ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd); #endif static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl); static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl); static void ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int busyid); /************************** SCB and SCB queue management **********************/ static void ahc_run_untagged_queues(struct ahc_softc *ahc); static void ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue); /****************************** Initialization ********************************/ static void ahc_alloc_scbs(struct ahc_softc *ahc); static void ahc_shutdown(void *arg); /*************************** Interrupt Services *******************************/ static void ahc_clear_intstat(struct ahc_softc *ahc); static void ahc_run_qoutfifo(struct ahc_softc *ahc); #ifdef AHC_TARGET_MODE static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused); #endif static void ahc_handle_brkadrint(struct ahc_softc *ahc); static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat); static void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat); static void ahc_clear_critical_section(struct ahc_softc *ahc); /***************************** Error Recovery *********************************/ static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb); static int ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); static void ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb); /*********************** Untagged Transaction Routines ************************/ static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); static inline void ahc_release_untagged_queues(struct ahc_softc *ahc); /* * Block our completion routine from starting the next untagged * transaction for this target or target lun. */ static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc) { if ((ahc->flags & AHC_SCB_BTT) == 0) ahc->untagged_queue_lock++; } /* * Allow the next untagged transaction for this target or target lun * to be executed. We use a counting semaphore to allow the lock * to be acquired recursively. Once the count drops to zero, the * transaction queues will be run. */ static inline void ahc_release_untagged_queues(struct ahc_softc *ahc) { if ((ahc->flags & AHC_SCB_BTT) == 0) { ahc->untagged_queue_lock--; if (ahc->untagged_queue_lock == 0) ahc_run_untagged_queues(ahc); } } /************************* Sequencer Execution Control ************************/ /* * Work around any chip bugs related to halting sequencer execution. * On Ultra2 controllers, we must clear the CIOBUS stretch signal by * reading a register that will set this signal and deassert it. * Without this workaround, if the chip is paused, by an interrupt or * manual pause while accessing scb ram, accesses to certain registers * will hang the system (infinite pci retries). */ static void ahc_pause_bug_fix(struct ahc_softc *ahc) { if ((ahc->features & AHC_ULTRA2) != 0) (void)ahc_inb(ahc, CCSCBCTL); } /* * Determine whether the sequencer has halted code execution. * Returns non-zero status if the sequencer is stopped. */ int ahc_is_paused(struct ahc_softc *ahc) { return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); } /* * Request that the sequencer stop and wait, indefinitely, for it * to stop. The sequencer will only acknowledge that it is paused * once it has reached an instruction boundary and PAUSEDIS is * cleared in the SEQCTL register. The sequencer may use PAUSEDIS * for critical sections. */ void ahc_pause(struct ahc_softc *ahc) { ahc_outb(ahc, HCNTRL, ahc->pause); /* * Since the sequencer can disable pausing in a critical section, we * must loop until it actually stops. */ while (ahc_is_paused(ahc) == 0) ; ahc_pause_bug_fix(ahc); } /* * Allow the sequencer to continue program execution. * We check here to ensure that no additional interrupt * sources that would cause the sequencer to halt have been * asserted. If, for example, a SCSI bus reset is detected * while we are fielding a different, pausing, interrupt type, * we don't want to release the sequencer before going back * into our interrupt handler and dealing with this new * condition. */ void ahc_unpause(struct ahc_softc *ahc) { if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) ahc_outb(ahc, HCNTRL, ahc->unpause); } /************************** Memory mapping routines ***************************/ static struct ahc_dma_seg * ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) { int sg_index; sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); /* sg_list_phys points to entry 1, not 0 */ sg_index++; return (&scb->sg_list[sg_index]); } static uint32_t ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) { int sg_index; /* sg_list_phys points to entry 1, not 0 */ sg_index = sg - &scb->sg_list[1]; return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); } static uint32_t ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) { return (ahc->scb_data->hscb_busaddr + (sizeof(struct hardware_scb) * index)); } static void ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op) { ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat, ahc->scb_data->hscb_dmamap, /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb), /*len*/sizeof(*scb->hscb), op); } void ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op) { if (scb->sg_count == 0) return; ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap, /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) * sizeof(struct ahc_dma_seg), /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op); } #ifdef AHC_TARGET_MODE static uint32_t ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index) { return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo); } #endif /*********************** Miscellaneous Support Functions ***********************/ /* * Determine whether the sequencer reported a residual * for this SCB/transaction. */ static void ahc_update_residual(struct ahc_softc *ahc, struct scb *scb) { uint32_t sgptr; sgptr = ahc_le32toh(scb->hscb->sgptr); if ((sgptr & SG_RESID_VALID) != 0) ahc_calc_residual(ahc, scb); } /* * Return pointers to the transfer negotiation information * for the specified our_id/remote_id pair. */ struct ahc_initiator_tinfo * ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, u_int remote_id, struct ahc_tmode_tstate **tstate) { /* * Transfer data structures are stored from the perspective * of the target role. Since the parameters for a connection * in the initiator role to a given target are the same as * when the roles are reversed, we pretend we are the target. */ if (channel == 'B') our_id += 8; *tstate = ahc->enabled_targets[our_id]; return (&(*tstate)->transinfo[remote_id]); } uint16_t ahc_inw(struct ahc_softc *ahc, u_int port) { uint16_t r = ahc_inb(ahc, port+1) << 8; return r | ahc_inb(ahc, port); } void ahc_outw(struct ahc_softc *ahc, u_int port, u_int value) { ahc_outb(ahc, port, value & 0xFF); ahc_outb(ahc, port+1, (value >> 8) & 0xFF); } uint32_t ahc_inl(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) | (ahc_inb(ahc, port+3) << 24)); } void ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value) { ahc_outb(ahc, port, (value) & 0xFF); ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF); ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF); ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF); } uint64_t ahc_inq(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) | (((uint64_t)ahc_inb(ahc, port+3)) << 24) | (((uint64_t)ahc_inb(ahc, port+4)) << 32) | (((uint64_t)ahc_inb(ahc, port+5)) << 40) | (((uint64_t)ahc_inb(ahc, port+6)) << 48) | (((uint64_t)ahc_inb(ahc, port+7)) << 56)); } void ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value) { ahc_outb(ahc, port, value & 0xFF); ahc_outb(ahc, port+1, (value >> 8) & 0xFF); ahc_outb(ahc, port+2, (value >> 16) & 0xFF); ahc_outb(ahc, port+3, (value >> 24) & 0xFF); ahc_outb(ahc, port+4, (value >> 32) & 0xFF); ahc_outb(ahc, port+5, (value >> 40) & 0xFF); ahc_outb(ahc, port+6, (value >> 48) & 0xFF); ahc_outb(ahc, port+7, (value >> 56) & 0xFF); } /* * Get a free scb. If there are none, see if we can allocate a new SCB. */ struct scb * ahc_get_scb(struct ahc_softc *ahc) { struct scb *scb; if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) { ahc_alloc_scbs(ahc); scb = SLIST_FIRST(&ahc->scb_data->free_scbs); if (scb == NULL) return (NULL); } SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); return (scb); } /* * Return an SCB resource to the free list. */ void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *hscb; hscb = scb->hscb; /* Clean up for the next user */ ahc->scb_data->scbindex[hscb->tag] = NULL; scb->flags = SCB_FREE; hscb->control = 0; SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); /* Notify the OSM that a resource is now available. */ ahc_platform_scb_free(ahc, scb); } struct scb * ahc_lookup_scb(struct ahc_softc *ahc, u_int tag) { struct scb* scb; scb = ahc->scb_data->scbindex[tag]; if (scb != NULL) ahc_sync_scb(ahc, scb, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); return (scb); } static void ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *q_hscb; u_int saved_tag; /* * Our queuing method is a bit tricky. The card * knows in advance which HSCB to download, and we * can't disappoint it. To achieve this, the next * SCB to download is saved off in ahc->next_queued_scb. * When we are called to queue "an arbitrary scb", * we copy the contents of the incoming HSCB to the one * the sequencer knows about, swap HSCB pointers and * finally assign the SCB to the tag indexed location * in the scb_array. This makes sure that we can still * locate the correct SCB by SCB_TAG. */ q_hscb = ahc->next_queued_scb->hscb; saved_tag = q_hscb->tag; memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); if ((scb->flags & SCB_CDB32_PTR) != 0) { q_hscb->shared_data.cdb_ptr = ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag) + offsetof(struct hardware_scb, cdb32)); } q_hscb->tag = saved_tag; q_hscb->next = scb->hscb->tag; /* Now swap HSCB pointers. */ ahc->next_queued_scb->hscb = scb->hscb; scb->hscb = q_hscb; /* Now define the mapping from tag to SCB in the scbindex */ ahc->scb_data->scbindex[scb->hscb->tag] = scb; } /* * Tell the sequencer about a new transaction to execute. */ void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) { ahc_swap_with_next_hscb(ahc, scb); if (scb->hscb->tag == SCB_LIST_NULL || scb->hscb->next == SCB_LIST_NULL) panic("Attempt to queue invalid SCB tag %x:%x\n", scb->hscb->tag, scb->hscb->next); /* * Setup data "oddness". */ scb->hscb->lun &= LID; if (ahc_get_transfer_length(scb) & 0x1) scb->hscb->lun |= SCB_XFERLEN_ODD; /* * Keep a history of SCBs we've downloaded in the qinfifo. */ ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; /* * Make sure our data is consistent from the * perspective of the adapter. */ ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* Tell the adapter about the newly queued SCB */ if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { if ((ahc->features & AHC_AUTOPAUSE) == 0) ahc_pause(ahc); ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); if ((ahc->features & AHC_AUTOPAUSE) == 0) ahc_unpause(ahc); } } struct scsi_sense_data * ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb) { int offset; offset = scb - ahc->scb_data->scbarray; return (&ahc->scb_data->sense[offset]); } static uint32_t ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb) { int offset; offset = scb - ahc->scb_data->scbarray; return (ahc->scb_data->sense_busaddr + (offset * sizeof(struct scsi_sense_data))); } /************************** Interrupt Processing ******************************/ static void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op) { ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/0, /*len*/256, op); } static void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op) { #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0) { ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, 0), sizeof(struct target_cmd) * AHC_TMODE_CMDS, op); } #endif } /* * See if the firmware has posted any completed commands * into our in-core command complete fifos. */ #define AHC_RUN_QOUTFIFO 0x1 #define AHC_RUN_TQINFIFO 0x2 static u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc) { u_int retval; retval = 0; ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/ahc->qoutfifonext, /*len*/1, BUS_DMASYNC_POSTREAD); if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) retval |= AHC_RUN_QOUTFIFO; #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) { ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, ahc->tqinfifofnext), /*len*/sizeof(struct target_cmd), BUS_DMASYNC_POSTREAD); if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) retval |= AHC_RUN_TQINFIFO; } #endif return (retval); } /* * Catch an interrupt from the adapter */ int ahc_intr(struct ahc_softc *ahc) { u_int intstat; if ((ahc->pause & INTEN) == 0) { /* * Our interrupt is not enabled on the chip * and may be disabled for re-entrancy reasons, * so just return. This is likely just a shared * interrupt. */ return (0); } /* * Instead of directly reading the interrupt status register, * infer the cause of the interrupt by checking our in-core * completion queues. This avoids a costly PCI bus read in * most cases. */ if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0 && (ahc_check_cmdcmpltqueues(ahc) != 0)) intstat = CMDCMPLT; else { intstat = ahc_inb(ahc, INTSTAT); } if ((intstat & INT_PEND) == 0) { #if AHC_PCI_CONFIG > 0 if (ahc->unsolicited_ints > 500) { ahc->unsolicited_ints = 0; if ((ahc->chip & AHC_PCI) != 0 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) ahc->bus_intr(ahc); } #endif ahc->unsolicited_ints++; return (0); } ahc->unsolicited_ints = 0; if (intstat & CMDCMPLT) { ahc_outb(ahc, CLRINT, CLRCMDINT); /* * Ensure that the chip sees that we've cleared * this interrupt before we walk the output fifo. * Otherwise, we may, due to posted bus writes, * clear the interrupt after we finish the scan, * and after the sequencer has added new entries * and asserted the interrupt again. */ ahc_flush_device_writes(ahc); ahc_run_qoutfifo(ahc); #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0) ahc_run_tqinfifo(ahc, /*paused*/FALSE); #endif } /* * Handle statuses that may invalidate our cached * copy of INTSTAT separately. */ if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) { /* Hot eject. Do nothing */ } else if (intstat & BRKADRINT) { ahc_handle_brkadrint(ahc); } else if ((intstat & (SEQINT|SCSIINT)) != 0) { ahc_pause_bug_fix(ahc); if ((intstat & SEQINT) != 0) ahc_handle_seqint(ahc, intstat); if ((intstat & SCSIINT) != 0) ahc_handle_scsiint(ahc, intstat); } return (1); } /************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero */ static void ahc_restart(struct ahc_softc *ahc) { uint8_t sblkctl; ahc_pause(ahc); /* No more pending messages. */ ahc_clear_msg_state(ahc); ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ ahc_outb(ahc, MSG_OUT, NOP); /* No message to send */ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); ahc_outb(ahc, LASTPHASE, P_BUSFREE); ahc_outb(ahc, SAVED_SCSIID, 0xFF); ahc_outb(ahc, SAVED_LUN, 0xFF); /* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not. */ ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); /* Always allow reselection */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Ensure that no DMA operations are in progress */ ahc_outb(ahc, CCSCBCNT, 0); ahc_outb(ahc, CCSGCTL, 0); ahc_outb(ahc, CCSCBCTL, 0); } /* * If we were in the process of DMA'ing SCB data into * an SCB, replace that SCB on the free list. This prevents * an SCB leak. */ if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { ahc_add_curscb_to_free_list(ahc); ahc_outb(ahc, SEQ_FLAGS2, ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); } /* * Clear any pending sequencer interrupt. It is no * longer relevant since we're resetting the Program * Counter. */ ahc_outb(ahc, CLRINT, CLRSEQINT); ahc_outb(ahc, MWI_RESIDUAL, 0); ahc_outb(ahc, SEQCTL, ahc->seqctl); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); /* * Take the LED out of diagnostic mode on PM resume, too */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON))); ahc_unpause(ahc); } /************************* Input/Output Queues ********************************/ static void ahc_run_qoutfifo(struct ahc_softc *ahc) { struct scb *scb; u_int scb_index; ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { scb_index = ahc->qoutfifo[ahc->qoutfifonext]; if ((ahc->qoutfifonext & 0x03) == 0x03) { u_int modnext; /* * Clear 32bits of QOUTFIFO at a time * so that we don't clobber an incoming * byte DMA to the array on architectures * that only support 32bit load and store * operations. */ modnext = ahc->qoutfifonext & ~0x3; *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/modnext, /*len*/4, BUS_DMASYNC_PREREAD); } ahc->qoutfifonext++; scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printk("%s: WARNING no command for scb %d " "(cmdcmplt)\nQOUTPOS = %d\n", ahc_name(ahc), scb_index, (ahc->qoutfifonext - 1) & 0xFF); continue; } /* * Save off the residual * if there is one. */ ahc_update_residual(ahc, scb); ahc_done(ahc, scb); } } static void ahc_run_untagged_queues(struct ahc_softc *ahc) { int i; for (i = 0; i < 16; i++) ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); } static void ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) { struct scb *scb; if (ahc->untagged_queue_lock != 0) return; if ((scb = TAILQ_FIRST(queue)) != NULL && (scb->flags & SCB_ACTIVE) == 0) { scb->flags |= SCB_ACTIVE; ahc_queue_scb(ahc, scb); } } /************************* Interrupt Handling *********************************/ static void ahc_handle_brkadrint(struct ahc_softc *ahc) { /* * We upset the sequencer :-( * Lookup the error message */ int i; int error; error = ahc_inb(ahc, ERROR); for (i = 0; error != 1 && i < num_errors; i++) error >>= 1; printk("%s: brkadrint, %s at seqaddr = 0x%x\n", ahc_name(ahc), ahc_hard_errors[i].errmesg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); ahc_dump_card_state(ahc); /* Tell everyone that this HBA is no longer available */ ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_NO_HBA); /* Disable all interrupt sources by resetting the controller */ ahc_shutdown(ahc); } static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) { struct scb *scb; struct ahc_devinfo devinfo; ahc_fetch_devinfo(ahc, &devinfo); /* * Clear the upper byte that holds SEQINT status * codes and clear the SEQINT bit. We will unpause * the sequencer, if appropriate, after servicing * the request. */ ahc_outb(ahc, CLRINT, CLRSEQINT); switch (intstat & SEQINT_MASK) { case BAD_STATUS: { u_int scb_index; struct hardware_scb *hscb; /* * Set the default return value to 0 (don't * send sense). The sense code will change * this if needed. */ ahc_outb(ahc, RETURN_1, 0); /* * The sequencer will notify us when a command * has an error that would be of interest to * the kernel. This allows us to leave the sequencer * running in the common case of command completes * without error. The sequencer will already have * dma'd the SCB back up to us, so we can reference * the in kernel copy directly. */ scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { ahc_print_devinfo(ahc, &devinfo); printk("ahc_intr - referenced scb " "not valid during seqint 0x%x scb(%d)\n", intstat, scb_index); ahc_dump_card_state(ahc); panic("for safety"); goto unpause; } hscb = scb->hscb; /* Don't want to clobber the original sense code */ if ((scb->flags & SCB_SENSE) != 0) { /* * Clear the SCB_SENSE Flag and have * the sequencer do a normal command * complete. */ scb->flags &= ~SCB_SENSE; ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); break; } ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); /* Freeze the queue until the client sees the error. */ ahc_freeze_devq(ahc, scb); ahc_freeze_scb(scb); ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); switch (hscb->shared_data.status.scsi_status) { case SAM_STAT_GOOD: printk("%s: Interrupted for status of 0???\n", ahc_name(ahc)); break; case SAM_STAT_COMMAND_TERMINATED: case SAM_STAT_CHECK_CONDITION: { struct ahc_dma_seg *sg; struct scsi_sense *sc; struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; struct ahc_transinfo *tinfo; #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { ahc_print_path(ahc, scb); printk("SCB %d: requests Check Status\n", scb->hscb->tag); } #endif if (ahc_perform_autosense(scb) == 0) break; targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; sg = scb->sg_list; sc = (struct scsi_sense *)(&hscb->shared_data.cdb); /* * Save off the residual if there is one. */ ahc_update_residual(ahc, scb); #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { ahc_print_path(ahc, scb); printk("Sending Sense\n"); } #endif sg->addr = ahc_get_sense_bufaddr(ahc, scb); sg->len = ahc_get_sense_bufsize(ahc, scb); sg->len |= AHC_DMA_LAST_SEG; /* Fixup byte order */ sg->addr = ahc_htole32(sg->addr); sg->len = ahc_htole32(sg->len); sc->opcode = REQUEST_SENSE; sc->byte2 = 0; if (tinfo->protocol_version <= SCSI_REV_2 && SCB_GET_LUN(scb) < 8) sc->byte2 = SCB_GET_LUN(scb) << 5; sc->unused[0] = 0; sc->unused[1] = 0; sc->length = sg->len; sc->control = 0; /* * We can't allow the target to disconnect. * This will be an untagged transaction and * having the target disconnect will make this * transaction indestinguishable from outstanding * tagged transactions. */ hscb->control = 0; /* * This request sense could be because the * the device lost power or in some other * way has lost our transfer negotiations. * Renegotiate if appropriate. Unit attention * errors will be reported before any data * phases occur. */ if (ahc_get_residual(scb) == ahc_get_transfer_length(scb)) { ahc_update_neg_request(ahc, &devinfo, tstate, targ_info, AHC_NEG_IF_NON_ASYNC); } if (tstate->auto_negotiate & devinfo.target_mask) { hscb->control |= MK_MESSAGE; scb->flags &= ~SCB_NEGOTIATE; scb->flags |= SCB_AUTO_NEGOTIATE; } hscb->cdb_len = sizeof(*sc); hscb->dataptr = sg->addr; hscb->datacnt = sg->len; hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; hscb->sgptr = ahc_htole32(hscb->sgptr); scb->sg_count = 1; scb->flags |= SCB_SENSE; ahc_qinfifo_requeue_tail(ahc, scb); ahc_outb(ahc, RETURN_1, SEND_SENSE); /* * Ensure we have enough time to actually * retrieve the sense. */ ahc_scb_timer_reset(scb, 5 * 1000000); break; } default: break; } break; } case NO_MATCH: { /* Ensure we don't leave the selection hardware on */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); printk("%s:%c:%d: no active SCB for reconnecting " "target - issuing BUS DEVICE RESET\n", ahc_name(ahc), devinfo.channel, devinfo.target); printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "ARG_1 == 0x%x ACCUM = 0x%x\n", ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), ahc_index_busy_tcl(ahc, BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN))), ahc_inb(ahc, SINDEX)); printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), ahc_inb(ahc, SCB_CONTROL)); printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); printk("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); printk("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); ahc_dump_card_state(ahc); ahc->msgout_buf[0] = TARGET_RESET; ahc->msgout_len = 1; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; ahc_outb(ahc, MSG_OUT, HOST_MSG); ahc_assert_atn(ahc); break; } case SEND_REJECT: { u_int rejbyte = ahc_inb(ahc, ACCUM); printk("%s:%c:%d: Warning - unknown message received from " "target (0x%x). Rejecting\n", ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); break; } case PROTO_VIOLATION: { ahc_handle_proto_violation(ahc); break; } case IGN_WIDE_RES: ahc_handle_ign_wide_residue(ahc, &devinfo); break; case PDATA_REINIT: ahc_reinitialize_dataptrs(ahc); break; case BAD_PHASE: { u_int lastphase; lastphase = ahc_inb(ahc, LASTPHASE); printk("%s:%c:%d: unknown scsi bus phase %x, " "lastphase = 0x%x. Attempting to continue\n", ahc_name(ahc), devinfo.channel, devinfo.target, lastphase, ahc_inb(ahc, SCSISIGI)); break; } case MISSED_BUSFREE: { u_int lastphase; lastphase = ahc_inb(ahc, LASTPHASE); printk("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n", ahc_name(ahc), devinfo.channel, devinfo.target, lastphase, ahc_inb(ahc, SCSISIGI)); ahc_restart(ahc); return; } case HOST_MSG_LOOP: { /* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is * transferred so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop. */ if (ahc->msg_type == MSG_TYPE_NONE) { struct scb *scb; u_int scb_index; u_int bus_phase; bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN && bus_phase != P_MESGOUT) { printk("ahc_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); /* * Probably transitioned to bus free before * we got here. Just punt the message. */ ahc_clear_intstat(ahc); ahc_restart(ahc); return; } scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (devinfo.role == ROLE_INITIATOR) { if (bus_phase == P_MESGOUT) { if (scb == NULL) panic("HOST_MSG_LOOP with " "invalid SCB %x\n", scb_index); ahc_setup_initiator_msgout(ahc, &devinfo, scb); } else { ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahc->msgin_index = 0; } } #ifdef AHC_TARGET_MODE else { if (bus_phase == P_MESGOUT) { ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; ahc->msgin_index = 0; } else ahc_setup_target_msgin(ahc, &devinfo, scb); } #endif } ahc_handle_message_phase(ahc); break; } case PERR_DETECTED: { /* * If we've cleared the parity error interrupt * but the sequencer still believes that SCSIPERR * is true, it must be that the parity error is * for the currently presented byte on the bus, * and we are not in a phase (data-in) where we will * eventually ack this byte. Ack the byte and * throw it away in the hope that the target will * take us to message out to deliver the appropriate * error message. */ if ((intstat & SCSIINT) == 0 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { if ((ahc->features & AHC_DT) == 0) { u_int curphase; /* * The hardware will only let you ack bytes * if the expected phase in SCSISIGO matches * the current phase. Make sure this is * currently the case. */ curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; ahc_outb(ahc, LASTPHASE, curphase); ahc_outb(ahc, SCSISIGO, curphase); } if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { int wait; /* * In a data phase. Faster to bitbucket * the data than to individually ack each * byte. This is also the only strategy * that will work with AUTOACK enabled. */ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) | BITBUCKET); wait = 5000; while (--wait != 0) { if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) != 0) break; ahc_delay(100); } ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); if (wait == 0) { struct scb *scb; u_int scb_index; ahc_print_devinfo(ahc, &devinfo); printk("Unable to clear parity error. " "Resetting bus.\n"); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb != NULL) ahc_set_transaction_status(scb, CAM_UNCOR_PARITY); ahc_reset_channel(ahc, devinfo.channel, /*init reset*/TRUE); } } else { ahc_inb(ahc, SCSIDATL); } } break; } case DATA_OVERRUN: { /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was. */ u_int scbindex = ahc_inb(ahc, SCB_TAG); u_int lastphase = ahc_inb(ahc, LASTPHASE); u_int i; scb = ahc_lookup_scb(ahc, scbindex); for (i = 0; i < num_phases; i++) { if (lastphase == ahc_phase_table[i].phase) break; } ahc_print_path(ahc, scb); printk("data overrun detected %s." " Tag == 0x%x.\n", ahc_phase_table[i].phasemsg, scb->hscb->tag); ahc_print_path(ahc, scb); printk("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", ahc_get_transfer_length(scb), scb->sg_count); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printk("sg[%d] - Addr 0x%x%x : Length %d\n", i, (ahc_le32toh(scb->sg_list[i].len) >> 24 & SG_HIGH_ADDR_BITS), ahc_le32toh(scb->sg_list[i].addr), ahc_le32toh(scb->sg_list[i].len) & AHC_SG_LEN_MASK); } } /* * Set this and it will take effect when the * target does a command complete. */ ahc_freeze_devq(ahc, scb); if ((scb->flags & SCB_SENSE) == 0) { ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); } else { scb->flags &= ~SCB_SENSE; ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); } ahc_freeze_scb(scb); if ((ahc->features & AHC_ULTRA2) != 0) { /* * Clear the channel in case we return * to data phase later. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); } if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { u_int dscommand1; /* Ensure HHADDR is 0 for future DMA operations. */ dscommand1 = ahc_inb(ahc, DSCOMMAND1); ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); ahc_outb(ahc, HADDR, 0); ahc_outb(ahc, DSCOMMAND1, dscommand1); } break; } case MKMSG_FAILED: { u_int scbindex; printk("%s:%c:%d:%d: Attempt to issue message failed\n", ahc_name(ahc), devinfo.channel, devinfo.target, devinfo.lun); scbindex = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scbindex); if (scb != NULL && (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), scb->hscb->tag, ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); break; } case NO_FREE_SCB: { printk("%s: No free or disconnected SCBs\n", ahc_name(ahc)); ahc_dump_card_state(ahc); panic("for safety"); break; } case SCB_MISMATCH: { u_int scbptr; scbptr = ahc_inb(ahc, SCBPTR); printk("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", scbptr, ahc_inb(ahc, ARG_1), ahc->scb_data->hscbs[scbptr].tag); ahc_dump_card_state(ahc); panic("for safety"); break; } case OUT_OF_RANGE: { printk("%s: BTT calculation out of range\n", ahc_name(ahc)); printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "ARG_1 == 0x%x ACCUM = 0x%x\n", ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n, A == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), ahc_index_busy_tcl(ahc, BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN))), ahc_inb(ahc, SINDEX), ahc_inb(ahc, ACCUM)); printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), ahc_inb(ahc, SCB_CONTROL)); printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); ahc_dump_card_state(ahc); panic("for safety"); break; } default: printk("ahc_intr: seqint, " "intstat == 0x%x, scsisigi = 0x%x\n", intstat, ahc_inb(ahc, SCSISIGI)); break; } unpause: /* * The sequencer is paused immediately on * a SEQINT, so we should restart it when * we're done. */ ahc_unpause(ahc); } static void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) { u_int scb_index; u_int status0; u_int status; struct scb *scb; char cur_channel; char intr_channel; if ((ahc->features & AHC_TWIN) != 0 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) cur_channel = 'B'; else cur_channel = 'A'; intr_channel = cur_channel; if ((ahc->features & AHC_ULTRA2) != 0) status0 = ahc_inb(ahc, SSTAT0) & IOERR; else status0 = 0; status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); if (status == 0 && status0 == 0) { if ((ahc->features & AHC_TWIN) != 0) { /* Try the other channel */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); intr_channel = (cur_channel == 'A') ? 'B' : 'A'; } if (status == 0) { printk("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_unpause(ahc); return; } } /* Make sure the sequencer is in a safe location. */ ahc_clear_critical_section(ahc); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb != NULL && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; if ((ahc->features & AHC_ULTRA2) != 0 && (status0 & IOERR) != 0) { int now_lvd; now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; printk("%s: Transceiver State Has Changed to %s mode\n", ahc_name(ahc), now_lvd ? "LVD" : "SE"); ahc_outb(ahc, CLRSINT0, CLRIOERR); /* * When transitioning to SE mode, the reset line * glitches, triggering an arbitration bug in some * Ultra2 controllers. This bug is cleared when we * assert the reset line. Since a reset glitch has * already occurred with this transition and a * transceiver state change is handled just like * a bus reset anyway, asserting the reset line * ourselves is safe. */ ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/now_lvd == 0); } else if ((status & SCSIRSTI) != 0) { printk("%s: Someone reset channel %c\n", ahc_name(ahc), intr_channel); if (intr_channel != cur_channel) ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); } else if ((status & SCSIPERR) != 0) { /* * Determine the bus phase and queue an appropriate message. * SCSIPERR is latched true as soon as a parity error * occurs. If the sequencer acked the transfer that * caused the parity error and the currently presented * transfer on the bus has correct parity, SCSIPERR will * be cleared by CLRSCSIPERR. Use this to determine if * we should look at the last phase the sequencer recorded, * or the current phase presented on the bus. */ struct ahc_devinfo devinfo; u_int mesg_out; u_int curphase; u_int errorphase; u_int lastphase; u_int scsirate; u_int i; u_int sstat2; int silent; lastphase = ahc_inb(ahc, LASTPHASE); curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; sstat2 = ahc_inb(ahc, SSTAT2); ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); /* * For all phases save DATA, the sequencer won't * automatically ack a byte that has a parity error * in it. So the only way that the current phase * could be 'data-in' is if the parity error is for * an already acked byte in the data phase. During * synchronous data-in transfers, we may actually * ack bytes before latching the current phase in * LASTPHASE, leading to the discrepancy between * curphase and lastphase. */ if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 || curphase == P_DATAIN || curphase == P_DATAIN_DT) errorphase = curphase; else errorphase = lastphase; for (i = 0; i < num_phases; i++) { if (errorphase == ahc_phase_table[i].phase) break; } mesg_out = ahc_phase_table[i].mesg_out; silent = FALSE; if (scb != NULL) { if (SCB_IS_SILENT(scb)) silent = TRUE; else ahc_print_path(ahc, scb); scb->flags |= SCB_TRANSMISSION_ERROR; } else printk("%s:%c:%d: ", ahc_name(ahc), intr_channel, SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); scsirate = ahc_inb(ahc, SCSIRATE); if (silent == FALSE) { printk("parity error detected %s. " "SEQADDR(0x%x) SCSIRATE(0x%x)\n", ahc_phase_table[i].phasemsg, ahc_inw(ahc, SEQADDR0), scsirate); if ((ahc->features & AHC_DT) != 0) { if ((sstat2 & CRCVALERR) != 0) printk("\tCRC Value Mismatch\n"); if ((sstat2 & CRCENDERR) != 0) printk("\tNo terminal CRC packet " "received\n"); if ((sstat2 & CRCREQERR) != 0) printk("\tIllegal CRC packet " "request\n"); if ((sstat2 & DUAL_EDGE_ERR) != 0) printk("\tUnexpected %sDT Data Phase\n", (scsirate & SINGLE_EDGE) ? "" : "non-"); } } if ((ahc->features & AHC_DT) != 0 && (sstat2 & DUAL_EDGE_ERR) != 0) { /* * This error applies regardless of * data direction, so ignore the value * in the phase table. */ mesg_out = INITIATOR_ERROR; } /* * We've set the hardware to assert ATN if we * get a parity error on "in" phases, so all we * need to do is stuff the message buffer with * the appropriate message. "In" phases have set * mesg_out to something other than MSG_NOP. */ if (mesg_out != NOP) { if (ahc->msg_type != MSG_TYPE_NONE) ahc->send_msg_perror = TRUE; else ahc_outb(ahc, MSG_OUT, mesg_out); } /* * Force a renegotiation with this target just in * case we are out of sync for some external reason * unknown (or unreported) by the target. */ ahc_fetch_devinfo(ahc, &devinfo); ahc_force_renegotiation(ahc, &devinfo); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_unpause(ahc); } else if ((status & SELTO) != 0) { u_int scbptr; /* Stop the selection */ ahc_outb(ahc, SCSISEQ, 0); /* No more pending messages */ ahc_clear_msg_state(ahc); /* Clear interrupt state */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy * LED does. SELINGO is only cleared by a successful * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). */ ahc_outb(ahc, CLRSINT0, CLRSELINGO); scbptr = ahc_inb(ahc, WAITING_SCBH); ahc_outb(ahc, SCBPTR, scbptr); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printk("%s: ahc_intr - referenced scb not " "valid during SELTO scb(%d, %d)\n", ahc_name(ahc), scbptr, scb_index); ahc_dump_card_state(ahc); } else { struct ahc_devinfo devinfo; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_SELTO) != 0) { ahc_print_path(ahc, scb); printk("Saw Selection Timeout for SCB 0x%x\n", scb_index); } #endif ahc_scb_devinfo(ahc, &devinfo, scb); ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahc_freeze_devq(ahc, scb); /* * Cancel any pending transactions on the device * now that it seems to be missing. This will * also revert us to async/narrow transfers until * we can renegotiate with the device. */ ahc_handle_devreset(ahc, &devinfo, CAM_SEL_TIMEOUT, "Selection Timeout", /*verbose_level*/1); } ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_restart(ahc); } else if ((status & BUSFREE) != 0 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { struct ahc_devinfo devinfo; u_int lastphase; u_int saved_scsiid; u_int saved_lun; u_int target; u_int initiator_role_id; char channel; int printerror; /* * Clear our selection hardware as soon as possible. * We may have an entry in the waiting Q for this target, * that is affected by this busfree and we don't want to * go about selecting the target while we handle the event. */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); /* * Disable busfree interrupts and clear the busfree * interrupt status. We do this here so that several * bus transactions occur prior to clearing the SCSIINT * latch. It can take a bit for the clearing to take effect. */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); /* * Look at what phase we were last in. * If its message out, chances are pretty good * that the busfree was in response to one of * our abort requests. */ lastphase = ahc_inb(ahc, LASTPHASE); saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); saved_lun = ahc_inb(ahc, SAVED_LUN); target = SCSIID_TARGET(ahc, saved_scsiid); initiator_role_id = SCSIID_OUR_ID(saved_scsiid); channel = SCSIID_CHANNEL(ahc, saved_scsiid); ahc_compile_devinfo(&devinfo, initiator_role_id, target, saved_lun, channel, ROLE_INITIATOR); printerror = 1; if (lastphase == P_MESGOUT) { u_int tag; tag = SCB_LIST_NULL; if (ahc_sent_msg(ahc, AHCMSG_1B, ABORT_TASK, TRUE) || ahc_sent_msg(ahc, AHCMSG_1B, ABORT_TASK_SET, TRUE)) { if (ahc->msgout_buf[ahc->msgout_index - 1] == ABORT_TASK) tag = scb->hscb->tag; ahc_print_path(ahc, scb); printk("SCB %d - Abort%s Completed.\n", scb->hscb->tag, tag == SCB_LIST_NULL ? "" : " Tag"); ahc_abort_scbs(ahc, target, channel, saved_lun, tag, ROLE_INITIATOR, CAM_REQ_ABORTED); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_1B, TARGET_RESET, TRUE)) { ahc_compile_devinfo(&devinfo, initiator_role_id, target, CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); ahc_handle_devreset(ahc, &devinfo, CAM_BDR_SENT, "Bus Device Reset", /*verbose_level*/0); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_PPR, FALSE)) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; /* * PPR Rejected. Try non-ppr negotiation * and retry command. */ tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_WDTR, FALSE)) { /* * Negotiation Rejected. Go-narrow and * retry command. */ ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_SDTR, FALSE)) { /* * Negotiation Rejected. Go-async and * retry command. */ ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } } if (printerror != 0) { u_int i; if (scb != NULL) { u_int tag; if ((scb->hscb->control & TAG_ENB) != 0) tag = scb->hscb->tag; else tag = SCB_LIST_NULL; ahc_print_path(ahc, scb); ahc_abort_scbs(ahc, target, channel, SCB_GET_LUN(scb), tag, ROLE_INITIATOR, CAM_UNEXP_BUSFREE); } else { /* * We had not fully identified this connection, * so we cannot abort anything. */ printk("%s: ", ahc_name(ahc)); } for (i = 0; i < num_phases; i++) { if (lastphase == ahc_phase_table[i].phase) break; } if (lastphase != P_BUSFREE) { /* * Renegotiate with this device at the * next opportunity just in case this busfree * is due to a negotiation mismatch with the * device. */ ahc_force_renegotiation(ahc, &devinfo); } printk("Unexpected busfree %s\n" "SEQADDR == 0x%x\n", ahc_phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); } ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_restart(ahc); } else { printk("%s: Missing case in ahc_handle_scsiint. status = %x\n", ahc_name(ahc), status); ahc_outb(ahc, CLRINT, CLRSCSIINT); } } /* * Force renegotiation to occur the next time we initiate * a command to the current device. */ static void ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; targ_info = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); ahc_update_neg_request(ahc, devinfo, tstate, targ_info, AHC_NEG_IF_NON_ASYNC); } #define AHC_MAX_STEPS 2000 static void ahc_clear_critical_section(struct ahc_softc *ahc) { int stepping; int steps; u_int simode0; u_int simode1; if (ahc->num_critical_sections == 0) return; stepping = FALSE; steps = 0; simode0 = 0; simode1 = 0; for (;;) { struct cs *cs; u_int seqaddr; u_int i; seqaddr = ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8); /* * Seqaddr represents the next instruction to execute, * so we are really executing the instruction just * before it. */ if (seqaddr != 0) seqaddr -= 1; cs = ahc->critical_sections; for (i = 0; i < ahc->num_critical_sections; i++, cs++) { if (cs->begin < seqaddr && cs->end >= seqaddr) break; } if (i == ahc->num_critical_sections) break; if (steps > AHC_MAX_STEPS) { printk("%s: Infinite loop in critical section\n", ahc_name(ahc)); ahc_dump_card_state(ahc); panic("critical section loop"); } steps++; if (stepping == FALSE) { /* * Disable all interrupt sources so that the * sequencer will not be stuck by a pausing * interrupt condition while we attempt to * leave a critical section. */ simode0 = ahc_inb(ahc, SIMODE0); ahc_outb(ahc, SIMODE0, 0); simode1 = ahc_inb(ahc, SIMODE1); if ((ahc->features & AHC_DT) != 0) /* * On DT class controllers, we * use the enhanced busfree logic. * Unfortunately we cannot re-enable * busfree detection within the * current connection, so we must * leave it on while single stepping. */ ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE); else ahc_outb(ahc, SIMODE1, 0); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP); stepping = TRUE; } if ((ahc->features & AHC_DT) != 0) { ahc_outb(ahc, CLRSINT1, CLRBUSFREE); ahc_outb(ahc, CLRINT, CLRSCSIINT); } ahc_outb(ahc, HCNTRL, ahc->unpause); while (!ahc_is_paused(ahc)) ahc_delay(200); } if (stepping) { ahc_outb(ahc, SIMODE0, simode0); ahc_outb(ahc, SIMODE1, simode1); ahc_outb(ahc, SEQCTL, ahc->seqctl); } } /* * Clear any pending interrupt status. */ static void ahc_clear_intstat(struct ahc_softc *ahc) { /* Clear any interrupt conditions this may have caused */ ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| CLRREQINIT); ahc_flush_device_writes(ahc); ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); ahc_flush_device_writes(ahc); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_flush_device_writes(ahc); } /**************************** Debugging Routines ******************************/ #ifdef AHC_DEBUG uint32_t ahc_debug = AHC_DEBUG_OPTS; #endif #if 0 /* unused */ static void ahc_print_scb(struct scb *scb) { int i; struct hardware_scb *hscb = scb->hscb; printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", (void *)scb, hscb->control, hscb->scsiid, hscb->lun, hscb->cdb_len); printk("Shared Data: "); for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) printk("%#02x", hscb->shared_data.cdb[i]); printk(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", ahc_le32toh(hscb->dataptr), ahc_le32toh(hscb->datacnt), ahc_le32toh(hscb->sgptr), hscb->tag); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printk("sg[%d] - Addr 0x%x%x : Length %d\n", i, (ahc_le32toh(scb->sg_list[i].len) >> 24 & SG_HIGH_ADDR_BITS), ahc_le32toh(scb->sg_list[i].addr), ahc_le32toh(scb->sg_list[i].len)); } } } #endif /************************* Transfer Negotiation *******************************/ /* * Allocate per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static struct ahc_tmode_tstate * ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) { struct ahc_tmode_tstate *master_tstate; struct ahc_tmode_tstate *tstate; int i; master_tstate = ahc->enabled_targets[ahc->our_id]; if (channel == 'B') { scsi_id += 8; master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; } if (ahc->enabled_targets[scsi_id] != NULL && ahc->enabled_targets[scsi_id] != master_tstate) panic("%s: ahc_alloc_tstate - Target already allocated", ahc_name(ahc)); tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC); if (tstate == NULL) return (NULL); /* * If we have allocated a master tstate, copy user settings from * the master tstate (taken from SRAM or the EEPROM) for this * channel, but reset our current and goal settings to async/narrow * until an initiator talks to us. */ if (master_tstate != NULL) { memcpy(tstate, master_tstate, sizeof(*tstate)); memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); tstate->ultraenb = 0; for (i = 0; i < AHC_NUM_TARGETS; i++) { memset(&tstate->transinfo[i].curr, 0, sizeof(tstate->transinfo[i].curr)); memset(&tstate->transinfo[i].goal, 0, sizeof(tstate->transinfo[i].goal)); } } else memset(tstate, 0, sizeof(*tstate)); ahc->enabled_targets[scsi_id] = tstate; return (tstate); } #ifdef AHC_TARGET_MODE /* * Free per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static void ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) { struct ahc_tmode_tstate *tstate; /* * Don't clean up our "master" tstate. * It has our default user settings. */ if (((channel == 'B' && scsi_id == ahc->our_id_b) || (channel == 'A' && scsi_id == ahc->our_id)) && force == FALSE) return; if (channel == 'B') scsi_id += 8; tstate = ahc->enabled_targets[scsi_id]; kfree(tstate); ahc->enabled_targets[scsi_id] = NULL; } #endif /* * Called when we have an active connection to a target on the bus, * this function finds the nearest syncrate to the input period limited * by the capabilities of the bus connectivity of and sync settings for * the target. */ static const struct ahc_syncrate * ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) { struct ahc_transinfo *transinfo; u_int maxsync; if ((ahc->features & AHC_ULTRA2) != 0) { if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { maxsync = AHC_SYNCRATE_DT; } else { maxsync = AHC_SYNCRATE_ULTRA; /* Can't do DT on an SE bus */ *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } } else if ((ahc->features & AHC_ULTRA) != 0) { maxsync = AHC_SYNCRATE_ULTRA; } else { maxsync = AHC_SYNCRATE_FAST; } /* * Never allow a value higher than our current goal * period otherwise we may allow a target initiated * negotiation to go above the limit as set by the * user. In the case of an initiator initiated * sync negotiation, we limit based on the user * setting. This allows the system to still accept * incoming negotiations even if target initiated * negotiation is not performed. */ if (role == ROLE_TARGET) transinfo = &tinfo->user; else transinfo = &tinfo->goal; *ppr_options &= transinfo->ppr_options; if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { maxsync = max(maxsync, (u_int)AHC_SYNCRATE_ULTRA2); *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } if (transinfo->period == 0) { *period = 0; *ppr_options = 0; return (NULL); } *period = max(*period, (u_int)transinfo->period); return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); } /* * Look up the valid period to SCSIRATE conversion in our table. * Return the period and offset that should be sent to the target * if this was the beginning of an SDTR. */ const struct ahc_syncrate * ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, u_int *ppr_options, u_int maxsync) { const struct ahc_syncrate *syncrate; if ((ahc->features & AHC_DT) == 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; /* Skip all DT only entries if DT is not available */ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && maxsync < AHC_SYNCRATE_ULTRA2) maxsync = AHC_SYNCRATE_ULTRA2; /* Now set the maxsync based on the card capabilities * DT is already done above */ if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0 && maxsync < AHC_SYNCRATE_ULTRA) maxsync = AHC_SYNCRATE_ULTRA; if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0 && maxsync < AHC_SYNCRATE_FAST) maxsync = AHC_SYNCRATE_FAST; for (syncrate = &ahc_syncrates[maxsync]; syncrate->rate != NULL; syncrate++) { /* * The Ultra2 table doesn't go as low * as for the Fast/Ultra cards. */ if ((ahc->features & AHC_ULTRA2) != 0 && (syncrate->sxfr_u2 == 0)) break; if (*period <= syncrate->period) { /* * When responding to a target that requests * sync, the requested rate may fall between * two rates that we can output, but still be * a rate that we can receive. Because of this, * we want to respond to the target with * the same rate that it sent to us even * if the period we use to send data to it * is lower. Only lower the response period * if we must. */ if (syncrate == &ahc_syncrates[maxsync]) *period = syncrate->period; /* * At some speeds, we only support * ST transfers. */ if ((syncrate->sxfr_u2 & ST_SXFR) != 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; break; } } if ((*period == 0) || (syncrate->rate == NULL) || ((ahc->features & AHC_ULTRA2) != 0 && (syncrate->sxfr_u2 == 0))) { /* Use asynchronous transfers. */ *period = 0; syncrate = NULL; *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } return (syncrate); } /* * Convert from an entry in our syncrate table to the SCSI equivalent * sync "period" factor. */ u_int ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) { const struct ahc_syncrate *syncrate; if ((ahc->features & AHC_ULTRA2) != 0) scsirate &= SXFR_ULTRA2; else scsirate &= SXFR; /* now set maxsync based on card capabilities */ if ((ahc->features & AHC_DT) == 0 && maxsync < AHC_SYNCRATE_ULTRA2) maxsync = AHC_SYNCRATE_ULTRA2; if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0 && maxsync < AHC_SYNCRATE_ULTRA) maxsync = AHC_SYNCRATE_ULTRA; if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0 && maxsync < AHC_SYNCRATE_FAST) maxsync = AHC_SYNCRATE_FAST; syncrate = &ahc_syncrates[maxsync]; while (syncrate->rate != NULL) { if ((ahc->features & AHC_ULTRA2) != 0) { if (syncrate->sxfr_u2 == 0) break; else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) return (syncrate->period); } else if (scsirate == (syncrate->sxfr & SXFR)) { return (syncrate->period); } syncrate++; } return (0); /* async */ } /* * Truncate the given synchronous offset to a value the * current adapter type and syncrate are capable of. */ static void ahc_validate_offset(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, const struct ahc_syncrate *syncrate, u_int *offset, int wide, role_t role) { u_int maxoffset; /* Limit offset to what we can do */ if (syncrate == NULL) { maxoffset = 0; } else if ((ahc->features & AHC_ULTRA2) != 0) { maxoffset = MAX_OFFSET_ULTRA2; } else { if (wide) maxoffset = MAX_OFFSET_16BIT; else maxoffset = MAX_OFFSET_8BIT; } *offset = min(*offset, maxoffset); if (tinfo != NULL) { if (role == ROLE_TARGET) *offset = min(*offset, (u_int)tinfo->user.offset); else *offset = min(*offset, (u_int)tinfo->goal.offset); } } /* * Truncate the given transfer width parameter to a value the * current adapter type is capable of. */ static void ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *bus_width, role_t role) { switch (*bus_width) { default: if (ahc->features & AHC_WIDE) { /* Respond Wide */ *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } fallthrough; case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } if (tinfo != NULL) { if (role == ROLE_TARGET) *bus_width = min((u_int)tinfo->user.width, *bus_width); else *bus_width = min((u_int)tinfo->goal.width, *bus_width); } } /* * Update the bitmask of targets for which the controller should * negotiate with at the next convenient opportunity. This currently * means the next time we send the initial identify messages for * a new transaction. */ int ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct ahc_tmode_tstate *tstate, struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) { u_int auto_negotiate_orig; auto_negotiate_orig = tstate->auto_negotiate; if (neg_type == AHC_NEG_ALWAYS) { /* * Force our "current" settings to be * unknown so that unless a bus reset * occurs the need to renegotiate is * recorded persistently. */ if ((ahc->features & AHC_WIDE) != 0) tinfo->curr.width = AHC_WIDTH_UNKNOWN; tinfo->curr.period = AHC_PERIOD_UNKNOWN; tinfo->curr.offset = AHC_OFFSET_UNKNOWN; } if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options || (neg_type == AHC_NEG_IF_NON_ASYNC && (tinfo->goal.offset != 0 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT || tinfo->goal.ppr_options != 0))) tstate->auto_negotiate |= devinfo->target_mask; else tstate->auto_negotiate &= ~devinfo->target_mask; return (auto_negotiate_orig != tstate->auto_negotiate); } /* * Update the user/goal/curr tables of synchronous negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, const struct ahc_syncrate *syncrate, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int old_period; u_int old_offset; u_int old_ppr; int active; int update_needed; active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; update_needed = 0; if (syncrate == NULL) { period = 0; offset = 0; } tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHC_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; tinfo->user.ppr_options = ppr_options; } if ((type & AHC_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; tinfo->goal.ppr_options = ppr_options; } old_period = tinfo->curr.period; old_offset = tinfo->curr.offset; old_ppr = tinfo->curr.ppr_options; if ((type & AHC_TRANS_CUR) != 0 && (old_period != period || old_offset != offset || old_ppr != ppr_options)) { u_int scsirate; update_needed++; scsirate = tinfo->scsirate; if ((ahc->features & AHC_ULTRA2) != 0) { scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); if (syncrate != NULL) { scsirate |= syncrate->sxfr_u2; if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) scsirate |= ENABLE_CRC; else scsirate |= SINGLE_EDGE; } } else { scsirate &= ~(SXFR|SOFS); /* * Ensure Ultra mode is set properly for * this target. */ tstate->ultraenb &= ~devinfo->target_mask; if (syncrate != NULL) { if (syncrate->sxfr & ULTRA_SXFR) { tstate->ultraenb |= devinfo->target_mask; } scsirate |= syncrate->sxfr & SXFR; scsirate |= offset & SOFS; } if (active) { u_int sxfrctl0; sxfrctl0 = ahc_inb(ahc, SXFRCTL0); sxfrctl0 &= ~FAST20; if (tstate->ultraenb & devinfo->target_mask) sxfrctl0 |= FAST20; ahc_outb(ahc, SXFRCTL0, sxfrctl0); } } if (active) { ahc_outb(ahc, SCSIRATE, scsirate); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIOFFSET, offset); } tinfo->scsirate = scsirate; tinfo->curr.period = period; tinfo->curr.offset = offset; tinfo->curr.ppr_options = ppr_options; ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { if (offset != 0) { printk("%s: target %d synchronous at %sMHz%s, " "offset = 0x%x\n", ahc_name(ahc), devinfo->target, syncrate->rate, (ppr_options & MSG_EXT_PPR_DT_REQ) ? " DT" : "", offset); } else { printk("%s: target %d using " "asynchronous transfers\n", ahc_name(ahc), devinfo->target); } } } update_needed += ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_TO_GOAL); if (update_needed) ahc_update_pending_scbs(ahc); } /* * Update the user/goal/curr tables of wide negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int width, u_int type, int paused) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int oldwidth; int active; int update_needed; active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; update_needed = 0; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHC_TRANS_USER) != 0) tinfo->user.width = width; if ((type & AHC_TRANS_GOAL) != 0) tinfo->goal.width = width; oldwidth = tinfo->curr.width; if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { u_int scsirate; update_needed++; scsirate = tinfo->scsirate; scsirate &= ~WIDEXFER; if (width == MSG_EXT_WDTR_BUS_16_BIT) scsirate |= WIDEXFER; tinfo->scsirate = scsirate; if (active) ahc_outb(ahc, SCSIRATE, scsirate); tinfo->curr.width = width; ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { printk("%s: target %d using %dbit transfers\n", ahc_name(ahc), devinfo->target, 8 * (0x01 << width)); } } update_needed += ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_TO_GOAL); if (update_needed) ahc_update_pending_scbs(ahc); } /* * Update the current state of tagged queuing for a given target. */ static void ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd, struct ahc_devinfo *devinfo, ahc_queue_alg alg) { struct scsi_device *sdev = cmd->device; ahc_platform_set_tags(ahc, sdev, devinfo, alg); ahc_send_async(ahc, devinfo->channel, devinfo->target, devinfo->lun, AC_TRANSFER_NEG); } /* * When the transfer settings for a connection change, update any * in-transit SCBs to contain the new data so the hardware will * be set correctly during future (re)selections. */ static void ahc_update_pending_scbs(struct ahc_softc *ahc) { struct scb *pending_scb; int pending_scb_count; int i; int paused; u_int saved_scbptr; /* * Traverse the pending SCB list and ensure that all of the * SCBs there have the proper settings. */ pending_scb_count = 0; LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { struct ahc_devinfo devinfo; struct hardware_scb *pending_hscb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; ahc_scb_devinfo(ahc, &devinfo, pending_scb); tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); pending_hscb = pending_scb->hscb; pending_hscb->control &= ~ULTRAENB; if ((tstate->ultraenb & devinfo.target_mask) != 0) pending_hscb->control |= ULTRAENB; pending_hscb->scsirate = tinfo->scsirate; pending_hscb->scsioffset = tinfo->curr.offset; if ((tstate->auto_negotiate & devinfo.target_mask) == 0 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; pending_hscb->control &= ~MK_MESSAGE; } ahc_sync_scb(ahc, pending_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); pending_scb_count++; } if (pending_scb_count == 0) return; if (ahc_is_paused(ahc)) { paused = 1; } else { paused = 0; ahc_pause(ahc); } saved_scbptr = ahc_inb(ahc, SCBPTR); /* Ensure that the hscbs down on the card match the new information */ for (i = 0; i < ahc->scb_data->maxhscbs; i++) { struct hardware_scb *pending_hscb; u_int control; u_int scb_tag; ahc_outb(ahc, SCBPTR, i); scb_tag = ahc_inb(ahc, SCB_TAG); pending_scb = ahc_lookup_scb(ahc, scb_tag); if (pending_scb == NULL) continue; pending_hscb = pending_scb->hscb; control = ahc_inb(ahc, SCB_CONTROL); control &= ~(ULTRAENB|MK_MESSAGE); control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); ahc_outb(ahc, SCB_CONTROL, control); ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); } ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) ahc_unpause(ahc); } /**************************** Pathing Information *****************************/ static void ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { u_int saved_scsiid; role_t role; int our_id; if (ahc_inb(ahc, SSTAT0) & TARGET) role = ROLE_TARGET; else role = ROLE_INITIATOR; if (role == ROLE_TARGET && (ahc->features & AHC_MULTI_TID) != 0 && (ahc_inb(ahc, SEQ_FLAGS) & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { /* We were selected, so pull our id from TARGIDIN */ our_id = ahc_inb(ahc, TARGIDIN) & OID; } else if ((ahc->features & AHC_ULTRA2) != 0) our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; else our_id = ahc_inb(ahc, SCSIID) & OID; saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); ahc_compile_devinfo(devinfo, our_id, SCSIID_TARGET(ahc, saved_scsiid), ahc_inb(ahc, SAVED_LUN), SCSIID_CHANNEL(ahc, saved_scsiid), role); } static const struct ahc_phase_table_entry* ahc_lookup_phase_entry(int phase) { const struct ahc_phase_table_entry *entry; const struct ahc_phase_table_entry *last_entry; /* * num_phases doesn't include the default entry which * will be returned if the phase doesn't match. */ last_entry = &ahc_phase_table[num_phases]; for (entry = ahc_phase_table; entry < last_entry; entry++) { if (phase == entry->phase) break; } return (entry); } void ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role) { devinfo->our_scsiid = our_id; devinfo->target = target; devinfo->lun = lun; devinfo->target_offset = target; devinfo->channel = channel; devinfo->role = role; if (channel == 'B') devinfo->target_offset += 8; devinfo->target_mask = (0x01 << devinfo->target_offset); } void ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { printk("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } static void ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { role_t role; int our_id; our_id = SCSIID_OUR_ID(scb->hscb->scsiid); role = ROLE_INITIATOR; if ((scb->flags & SCB_TARGET_SCB) != 0) role = ROLE_TARGET; ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); } /************************ Message Phase Processing ****************************/ static void ahc_assert_atn(struct ahc_softc *ahc) { u_int scsisigo; scsisigo = ATNO; if ((ahc->features & AHC_DT) == 0) scsisigo |= ahc_inb(ahc, SCSISIGI); ahc_outb(ahc, SCSISIGO, scsisigo); } /* * When an initiator transaction with the MK_MESSAGE flag either reconnects * or enters the initial message out phase, we are interrupted. Fill our * outgoing message buffer with the appropriate message and beging handing * the message phase(s) manually. */ static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahc->msgout_index = 0; ahc->msgout_len = 0; if ((scb->flags & SCB_DEVICE_RESET) == 0 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { u_int identify_msg; identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); if ((scb->hscb->control & DISCENB) != 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; ahc->msgout_buf[ahc->msgout_index++] = identify_msg; ahc->msgout_len++; if ((scb->hscb->control & TAG_ENB) != 0) { ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; ahc->msgout_len += 2; } } if (scb->flags & SCB_DEVICE_RESET) { ahc->msgout_buf[ahc->msgout_index++] = TARGET_RESET; ahc->msgout_len++; ahc_print_path(ahc, scb); printk("Bus Device Reset Message Sent\n"); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else if ((scb->flags & SCB_ABORT) != 0) { if ((scb->hscb->control & TAG_ENB) != 0) ahc->msgout_buf[ahc->msgout_index++] = ABORT_TASK; else ahc->msgout_buf[ahc->msgout_index++] = ABORT_TASK_SET; ahc->msgout_len++; ahc_print_path(ahc, scb); printk("Abort%s Message Sent\n", (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { ahc_build_transfer_msg(ahc, devinfo); } else { printk("ahc_intr: AWAITING_MSG for an SCB that " "does not have a waiting message\n"); printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, devinfo->target_mask); panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " "SCB flags = %x", scb->hscb->tag, scb->hscb->control, ahc_inb(ahc, MSG_OUT), scb->flags); } /* * Clear the MK_MESSAGE flag from the SCB so we aren't * asked to send this message again. */ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); scb->hscb->control &= ~MK_MESSAGE; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } /* * Build an appropriate transfer negotiation message for the * currently active target. */ static void ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { /* * We need to initiate transfer negotiations. * If our current and goal settings are identical, * we want to renegotiate due to a check condition. */ struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; const struct ahc_syncrate *rate; int dowide; int dosync; int doppr; u_int period; u_int ppr_options; u_int offset; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Filter our period based on the current connection. * If we can't perform DT transfers on this segment (not in LVD * mode for instance), then our decision to issue a PPR message * may change. */ period = tinfo->goal.period; offset = tinfo->goal.offset; ppr_options = tinfo->goal.ppr_options; /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) ppr_options = 0; rate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); dowide = tinfo->curr.width != tinfo->goal.width; dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; /* * Only use PPR if we have options that need it, even if the device * claims to support it. There might be an expander in the way * that doesn't. */ doppr = ppr_options != 0; if (!dowide && !dosync && !doppr) { dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; dosync = tinfo->goal.offset != 0; } if (!dowide && !dosync && !doppr) { /* * Force async with a WDTR message if we have a wide bus, * or just issue an SDTR with a 0 offset. */ if ((ahc->features & AHC_WIDE) != 0) dowide = 1; else dosync = 1; if (bootverbose) { ahc_print_devinfo(ahc, devinfo); printk("Ensuring async\n"); } } /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) doppr = 0; /* * Both the PPR message and SDTR message require the * goal syncrate to be limited to what the target device * is capable of handling (based on whether an LVD->SE * expander is on the bus), so combine these two cases. * Regardless, guarantee that if we are using WDTR and SDTR * messages that WDTR comes first. */ if (doppr || (dosync && !dowide)) { offset = tinfo->goal.offset; ahc_validate_offset(ahc, tinfo, rate, &offset, doppr ? tinfo->goal.width : tinfo->curr.width, devinfo->role); if (doppr) { ahc_construct_ppr(ahc, devinfo, period, offset, tinfo->goal.width, ppr_options); } else { ahc_construct_sdtr(ahc, devinfo, period, offset); } } else { ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); } } /* * Build a synchronous negotiation message in our message * buffer based on the input parameters. */ static void ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset) { if (offset == 0) period = AHC_ASYNC_XFER_PERIOD; ahc->msgout_index += spi_populate_sync_msg( ahc->msgout_buf + ahc->msgout_index, period, offset); ahc->msgout_len += 5; if (bootverbose) { printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, period, offset); } } /* * Build a wide negotiation message in our message * buffer based on the input parameters. */ static void ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int bus_width) { ahc->msgout_index += spi_populate_width_msg( ahc->msgout_buf + ahc->msgout_index, bus_width); ahc->msgout_len += 4; if (bootverbose) { printk("(%s:%c:%d:%d): Sending WDTR %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, bus_width); } } /* * Build a parallel protocol request message in our message * buffer based on the input parameters. */ static void ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) { if (offset == 0) period = AHC_ASYNC_XFER_PERIOD; ahc->msgout_index += spi_populate_ppr_msg( ahc->msgout_buf + ahc->msgout_index, period, offset, bus_width, ppr_options); ahc->msgout_len += 8; if (bootverbose) { printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " "offset %x, ppr_options %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, bus_width, period, offset, ppr_options); } } /* * Clear any active message state. */ static void ahc_clear_msg_state(struct ahc_softc *ahc) { ahc->msgout_len = 0; ahc->msgin_index = 0; ahc->msg_type = MSG_TYPE_NONE; if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { /* * The target didn't care to respond to our * message request, so clear ATN. */ ahc_outb(ahc, CLRSINT1, CLRATNO); } ahc_outb(ahc, MSG_OUT, NOP); ahc_outb(ahc, SEQ_FLAGS2, ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); } static void ahc_handle_proto_violation(struct ahc_softc *ahc) { struct ahc_devinfo devinfo; struct scb *scb; u_int scbid; u_int seq_flags; u_int curphase; u_int lastphase; int found; ahc_fetch_devinfo(ahc, &devinfo); scbid = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scbid); seq_flags = ahc_inb(ahc, SEQ_FLAGS); curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; lastphase = ahc_inb(ahc, LASTPHASE); if ((seq_flags & NOT_IDENTIFIED) != 0) { /* * The reconnecting target either did not send an * identify message, or did, but we didn't find an SCB * to match. */ ahc_print_devinfo(ahc, &devinfo); printk("Target did not send an IDENTIFY message. " "LASTPHASE = 0x%x.\n", lastphase); scb = NULL; } else if (scb == NULL) { /* * We don't seem to have an SCB active for this * transaction. Print an error and reset the bus. */ ahc_print_devinfo(ahc, &devinfo); printk("No SCB found during protocol violation\n"); goto proto_violation_reset; } else { ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL); if ((seq_flags & NO_CDB_SENT) != 0) { ahc_print_path(ahc, scb); printk("No or incomplete CDB sent to device.\n"); } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { /* * The target never bothered to provide status to * us prior to completing the command. Since we don't * know the disposition of this command, we must attempt * to abort it. Assert ATN and prepare to send an abort * message. */ ahc_print_path(ahc, scb); printk("Completed command without status.\n"); } else { ahc_print_path(ahc, scb); printk("Unknown protocol violation.\n"); ahc_dump_card_state(ahc); } } if ((lastphase & ~P_DATAIN_DT) == 0 || lastphase == P_COMMAND) { proto_violation_reset: /* * Target either went directly to data/command * phase or didn't respond to our ATN. * The only safe thing to do is to blow * it away with a bus reset. */ found = ahc_reset_channel(ahc, 'A', TRUE); printk("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahc_name(ahc), 'A', found); } else { /* * Leave the selection hardware off in case * this abort attempt will affect yet to * be sent commands. */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); ahc_assert_atn(ahc); ahc_outb(ahc, MSG_OUT, HOST_MSG); if (scb == NULL) { ahc_print_devinfo(ahc, &devinfo); ahc->msgout_buf[0] = ABORT_TASK; ahc->msgout_len = 1; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } else { ahc_print_path(ahc, scb); scb->flags |= SCB_ABORT; } printk("Protocol violation %s. Attempting to abort.\n", ahc_lookup_phase_entry(curphase)->phasemsg); } } /* * Manual message loop handler. */ static void ahc_handle_message_phase(struct ahc_softc *ahc) { struct ahc_devinfo devinfo; u_int bus_phase; int end_session; ahc_fetch_devinfo(ahc, &devinfo); end_session = FALSE; bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; reswitch: switch (ahc->msg_type) { case MSG_TYPE_INITIATOR_MSGOUT: { int lastbyte; int phasemis; int msgdone; if (ahc->msgout_len == 0) panic("HOST_MSG_LOOP interrupt with no active message"); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printk("INITIATOR_MSG_OUT"); } #endif phasemis = bus_phase != P_MESGOUT; if (phasemis) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahc_lookup_phase_entry(bus_phase) ->phasemsg); } #endif if (bus_phase == P_MESGIN) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahc_outb(ahc, CLRSINT1, CLRATNO); ahc->send_msg_perror = FALSE; ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahc->msgin_index = 0; goto reswitch; } end_session = TRUE; break; } if (ahc->send_msg_perror) { ahc_outb(ahc, CLRSINT1, CLRATNO); ahc_outb(ahc, CLRSINT1, CLRREQINIT); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahc->send_msg_perror); #endif ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); break; } msgdone = ahc->msgout_index == ahc->msgout_len; if (msgdone) { /* * The target has requested a retry. * Re-assert ATN, reset our message index to * 0, and try again. */ ahc->msgout_index = 0; ahc_assert_atn(ahc); } lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); if (lastbyte) { /* Last byte is signified by dropping ATN */ ahc_outb(ahc, CLRSINT1, CLRATNO); } /* * Clear our interrupt status and present * the next byte on the bus. */ ahc_outb(ahc, CLRSINT1, CLRREQINIT); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahc->msgout_buf[ahc->msgout_index]); #endif ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); break; } case MSG_TYPE_INITIATOR_MSGIN: { int phasemis; int message_done; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printk("INITIATOR_MSG_IN"); } #endif phasemis = bus_phase != P_MESGIN; if (phasemis) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahc_lookup_phase_entry(bus_phase) ->phasemsg); } #endif ahc->msgin_index = 0; if (bus_phase == P_MESGOUT && (ahc->send_msg_perror == TRUE || (ahc->msgout_len != 0 && ahc->msgout_index == 0))) { ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; goto reswitch; } end_session = TRUE; break; } /* Pull the byte in without acking it */ ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahc->msgin_buf[ahc->msgin_index]); #endif message_done = ahc_parse_msg(ahc, &devinfo); if (message_done) { /* * Clear our incoming message buffer in case there * is another message following this one. */ ahc->msgin_index = 0; /* * If this message illicited a response, * assert ATN so the target takes us to the * message out phase. */ if (ahc->msgout_len != 0) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printk("Asserting ATN for response\n"); } #endif ahc_assert_atn(ahc); } } else ahc->msgin_index++; if (message_done == MSGLOOP_TERMINATED) { end_session = TRUE; } else { /* Ack the byte */ ahc_outb(ahc, CLRSINT1, CLRREQINIT); ahc_inb(ahc, SCSIDATL); } break; } case MSG_TYPE_TARGET_MSGIN: { int msgdone; int msgout_request; if (ahc->msgout_len == 0) panic("Target MSGIN with no active message"); /* * If we interrupted a mesgout session, the initiator * will not know this until our first REQ. So, we * only honor mesgout requests after we've sent our * first byte. */ if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 && ahc->msgout_index > 0) msgout_request = TRUE; else msgout_request = FALSE; if (msgout_request) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); ahc->msgin_index = 0; /* Dummy read to REQ for first byte */ ahc_inb(ahc, SCSIDATL); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); break; } msgdone = ahc->msgout_index == ahc->msgout_len; if (msgdone) { ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); end_session = TRUE; break; } /* * Present the next byte on the bus. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); break; } case MSG_TYPE_TARGET_MSGOUT: { int lastbyte; int msgdone; /* * The initiator signals that this is * the last byte by dropping ATN. */ lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; /* * Read the latched byte, but turn off SPIOEN first * so that we don't inadvertently cause a REQ for the * next byte. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); msgdone = ahc_parse_msg(ahc, &devinfo); if (msgdone == MSGLOOP_TERMINATED) { /* * The message is *really* done in that it caused * us to go to bus free. The sequencer has already * been reset at this point, so pull the ejection * handle. */ return; } ahc->msgin_index++; /* * XXX Read spec about initiator dropping ATN too soon * and use msgdone to detect it. */ if (msgdone == MSGLOOP_MSGCOMPLETE) { ahc->msgin_index = 0; /* * If this message illicited a response, transition * to the Message in phase and send it. */ if (ahc->msgout_len != 0) { ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); ahc->msg_type = MSG_TYPE_TARGET_MSGIN; ahc->msgin_index = 0; break; } } if (lastbyte) end_session = TRUE; else { /* Ask for the next byte. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); } break; } default: panic("Unknown REQINIT message type"); } if (end_session) { ahc_clear_msg_state(ahc); ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); } else ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); } /* * See if we sent a particular extended message to the target. * If "full" is true, return true only if the target saw the full * message. If "full" is false, return true if the target saw at * least the first byte of the message. */ static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) { int found; u_int index; found = FALSE; index = 0; while (index < ahc->msgout_len) { if (ahc->msgout_buf[index] == EXTENDED_MESSAGE) { u_int end_index; end_index = index + 1 + ahc->msgout_buf[index + 1]; if (ahc->msgout_buf[index+2] == msgval && type == AHCMSG_EXT) { if (full) { if (ahc->msgout_index > end_index) found = TRUE; } else if (ahc->msgout_index > index) found = TRUE; } index = end_index; } else if (ahc->msgout_buf[index] >= SIMPLE_QUEUE_TAG && ahc->msgout_buf[index] <= IGNORE_WIDE_RESIDUE) { /* Skip tag type and tag id or residue param*/ index += 2; } else { /* Single byte message */ if (type == AHCMSG_1B && ahc->msgout_buf[index] == msgval && ahc->msgout_index > index) found = TRUE; index++; } if (found) break; } return (found); } /* * Wait for a complete incoming message, parse it, and respond accordingly. */ static int ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; int reject; int done; int response; u_int targ_scsirate; done = MSGLOOP_IN_PROG; response = FALSE; reject = FALSE; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); targ_scsirate = tinfo->scsirate; /* * Parse as much of the message as is available, * rejecting it if we don't support it. When * the entire message is available and has been * handled, return MSGLOOP_MSGCOMPLETE, indicating * that we have parsed an entire message. * * In the case of extended messages, we accept the length * byte outright and perform more checking once we know the * extended message type. */ switch (ahc->msgin_buf[0]) { case DISCONNECT: case SAVE_POINTERS: case COMMAND_COMPLETE: case RESTORE_POINTERS: case IGNORE_WIDE_RESIDUE: /* * End our message loop as these are messages * the sequencer handles on its own. */ done = MSGLOOP_TERMINATED; break; case MESSAGE_REJECT: response = ahc_handle_msg_reject(ahc, devinfo); fallthrough; case NOP: done = MSGLOOP_MSGCOMPLETE; break; case EXTENDED_MESSAGE: { /* Wait for enough of the message to begin validation */ if (ahc->msgin_index < 2) break; switch (ahc->msgin_buf[2]) { case EXTENDED_SDTR: { const struct ahc_syncrate *syncrate; u_int period; u_int ppr_options; u_int offset; u_int saved_offset; if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = ahc->msgin_buf[3]; ppr_options = 0; saved_offset = offset = ahc->msgin_buf[4]; syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); ahc_validate_offset(ahc, tinfo, syncrate, &offset, targ_scsirate & WIDEXFER, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received " "SDTR period %x, offset %x\n\t" "Filtered to period %x, offset %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, ahc->msgin_buf[3], saved_offset, period, offset); } ahc_set_syncrate(ahc, devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_SDTR, TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated SDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_sdtr(ahc, devinfo, period, offset); ahc->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case EXTENDED_WDTR: { u_int bus_width; u_int saved_width; u_int sending_reply; sending_reply = FALSE; if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { reject = TRUE; break; } /* * Wait until we have our arg before validating * and acting on this message. * * Add one to MSG_EXT_WDTR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) break; bus_width = ahc->msgin_buf[3]; saved_width = bus_width; ahc_validate_width(ahc, tinfo, &bus_width, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received WDTR " "%x filtered to %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, saved_width, bus_width); } if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_WDTR, TRUE)) { /* * Don't send a WDTR back to the * target, since we asked first. * If the width went higher than our * request, reject it. */ if (saved_width > bus_width) { reject = TRUE; printk("(%s:%c:%d:%d): requested %dBit " "transfers. Rejecting...\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, 8 * (0x01 << bus_width)); bus_width = 0; } } else { /* * Send our own WDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated WDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_wdtr(ahc, devinfo, bus_width); ahc->msgout_index = 0; response = TRUE; sending_reply = TRUE; } /* * After a wide message, we are async, but * some devices don't seem to honor this portion * of the spec. Force a renegotiation of the * sync component of our transfer agreement even * if our goal is async. By updating our width * after forcing the negotiation, we avoid * renegotiating for width. */ ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_ALWAYS); ahc_set_width(ahc, devinfo, bus_width, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); if (sending_reply == FALSE && reject == FALSE) { /* * We will always have an SDTR to send. */ ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case EXTENDED_PPR: { const struct ahc_syncrate *syncrate; u_int period; u_int offset; u_int bus_width; u_int ppr_options; u_int saved_width; u_int saved_offset; u_int saved_ppr_options; if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { reject = TRUE; break; } /* * Wait until we have all args before validating * and acting on this message. * * Add one to MSG_EXT_PPR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) break; period = ahc->msgin_buf[3]; offset = ahc->msgin_buf[5]; bus_width = ahc->msgin_buf[6]; saved_width = bus_width; ppr_options = ahc->msgin_buf[7]; /* * According to the spec, a DT only * period factor with no DT option * set implies async. */ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && period == 9) offset = 0; saved_ppr_options = ppr_options; saved_offset = offset; /* * Mask out any options we don't support * on any controller. Transfer options are * only available if we are negotiating wide. */ ppr_options &= MSG_EXT_PPR_DT_REQ; if (bus_width == 0) ppr_options = 0; ahc_validate_width(ahc, tinfo, &bus_width, devinfo->role); syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); ahc_validate_offset(ahc, tinfo, syncrate, &offset, bus_width, devinfo->role); if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_PPR, TRUE)) { /* * If we are unable to do any of the * requested options (we went too low), * then we'll have to reject the message. */ if (saved_width > bus_width || saved_offset != offset || saved_ppr_options != ppr_options) { reject = TRUE; period = 0; offset = 0; bus_width = 0; ppr_options = 0; syncrate = NULL; } } else { if (devinfo->role != ROLE_TARGET) printk("(%s:%c:%d:%d): Target " "Initiated PPR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); else printk("(%s:%c:%d:%d): Initiator " "Initiated PPR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_ppr(ahc, devinfo, period, offset, bus_width, ppr_options); ahc->msgout_index = 0; response = TRUE; } if (bootverbose) { printk("(%s:%c:%d:%d): Received PPR width %x, " "period %x, offset %x,options %x\n" "\tFiltered to width %x, period %x, " "offset %x, options %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, saved_width, ahc->msgin_buf[3], saved_offset, saved_ppr_options, bus_width, period, offset, ppr_options); } ahc_set_width(ahc, devinfo, bus_width, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_set_syncrate(ahc, devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); done = MSGLOOP_MSGCOMPLETE; break; } default: /* Unknown extended message. Reject it. */ reject = TRUE; break; } break; } #ifdef AHC_TARGET_MODE case TARGET_RESET: ahc_handle_devreset(ahc, devinfo, CAM_BDR_SENT, "Bus Device Reset Received", /*verbose_level*/0); ahc_restart(ahc); done = MSGLOOP_TERMINATED; break; case ABORT_TASK: case ABORT_TASK_SET: case CLEAR_QUEUE_TASK_SET: { int tag; /* Target mode messages */ if (devinfo->role != ROLE_TARGET) { reject = TRUE; break; } tag = SCB_LIST_NULL; if (ahc->msgin_buf[0] == ABORT_TASK) tag = ahc_inb(ahc, INITIATOR_TAG); ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, devinfo->lun, tag, ROLE_TARGET, CAM_REQ_ABORTED); tstate = ahc->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[devinfo->lun]; if (lstate != NULL) { ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, ahc->msgin_buf[0], /*arg*/tag); ahc_send_lstate_events(ahc, lstate); } } ahc_restart(ahc); done = MSGLOOP_TERMINATED; break; } #endif case TERMINATE_IO_PROC: default: reject = TRUE; break; } if (reject) { /* * Setup to reject the message. */ ahc->msgout_index = 0; ahc->msgout_len = 1; ahc->msgout_buf[0] = MESSAGE_REJECT; done = MSGLOOP_MSGCOMPLETE; response = TRUE; } if (done != MSGLOOP_IN_PROG && !response) /* Clear the outgoing message buffer */ ahc->msgout_len = 0; return (done); } /* * Process a message reject message. */ static int ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { /* * What we care about here is if we had an * outstanding SDTR or WDTR message for this * target. If we did, this is a signal that * the target is refusing negotiation. */ struct scb *scb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int scb_index; u_int last_msg; int response = 0; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* Might be necessary */ last_msg = ahc_inb(ahc, LAST_MSG); if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_PPR, /*full*/FALSE)) { /* * Target does not support the PPR message. * Attempt to negotiate SPI-2 style. */ if (bootverbose) { printk("(%s:%c:%d:%d): PPR Rejected. " "Trying WDTR/SDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.ppr_options = 0; tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = 1; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_WDTR, /*full*/FALSE)) { /* note 8bit xfers */ printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using " "8bit transfers\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); /* * No need to clear the sync rate. If the target * did not accept the command, our syncrate is * unaffected. If the target started the negotiation, * but rejected our response, we already cleared the * sync rate before sending our WDTR. */ if (tinfo->goal.offset != tinfo->curr.offset) { /* Start the sync negotiation */ ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = 1; } } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); printk("(%s:%c:%d:%d): refuses synchronous negotiation. " "Using asynchronous transfers\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } else if ((scb->hscb->control & SIMPLE_QUEUE_TAG) != 0) { int tag_type; int mask; tag_type = (scb->hscb->control & SIMPLE_QUEUE_TAG); if (tag_type == SIMPLE_QUEUE_TAG) { printk("(%s:%c:%d:%d): refuses tagged commands. " "Performing non-tagged I/O\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_NONE); mask = ~0x23; } else { printk("(%s:%c:%d:%d): refuses %s tagged commands. " "Performing simple queue tagged I/O only\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, tag_type == ORDERED_QUEUE_TAG ? "ordered" : "head of queue"); ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_BASIC); mask = ~0x03; } /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & mask); scb->hscb->control &= mask; ahc_set_transaction_tag(scb, /*enabled*/FALSE, /*type*/SIMPLE_QUEUE_TAG); ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); ahc_assert_atn(ahc); /* * This transaction is now at the head of * the untagged queue for this target. */ if ((ahc->flags & AHC_SCB_BTT) == 0) { struct scb_tailq *untagged_q; untagged_q = &(ahc->untagged_queues[devinfo->target_offset]); TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); scb->flags |= SCB_UNTAGGEDQ; } ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), scb->hscb->tag); /* * Requeue all tagged commands for this target * currently in our possession so they can be * converted to untagged commands. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); } else { /* * Otherwise, we ignore it. */ printk("%s:%c:%d: Message reject for %x -- ignored\n", ahc_name(ahc), devinfo->channel, devinfo->target, last_msg); } return (response); } /* * Process an ingnore wide residue message. */ static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { u_int scb_index; struct scb *scb; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); /* * XXX Actually check data direction in the sequencer? * Perhaps add datadir to some spare bits in the hscb? */ if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { /* * Ignore the message if we haven't * seen an appropriate data phase yet. */ } else { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. Otherwise, subtract a byte * and update the residual count accordingly. */ uint32_t sgptr; sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); if ((sgptr & SG_LIST_NULL) != 0 && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. */ } else { struct ahc_dma_seg *sg; uint32_t data_cnt; uint32_t data_addr; uint32_t sglen; /* Pull in all of the sgptr */ sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR); data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT); if ((sgptr & SG_LIST_NULL) != 0) { /* * The residual data count is not updated * for the command run to completion case. * Explicitly zero the count. */ data_cnt &= ~AHC_SG_LEN_MASK; } data_addr = ahc_inl(ahc, SHADDR); data_cnt += 1; data_addr -= 1; sgptr &= SG_PTR_MASK; sg = ahc_sg_bus_to_virt(scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHC_SG_LEN_MASK)) { sg--; sglen = ahc_le32toh(sg->len); /* * Preserve High Address and SG_LIST bits * while setting the count to 1. */ data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); data_addr = ahc_le32toh(sg->addr) + (sglen & AHC_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahc_sg_virt_to_bus(scb, sg); } ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr); ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt); /* * Toggle the "oddness" of the transfer length * to handle this mid-transfer ignore wide * residue. This ensures that the oddness is * correct for subsequent data transfers. */ ahc_outb(ahc, SCB_LUN, ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD); } } } /* * Reinitialize the data pointers for the active transfer * based on its current residual. */ static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc) { struct scb *scb; struct ahc_dma_seg *sg; u_int scb_index; uint32_t sgptr; uint32_t resid; uint32_t dataptr; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; sg = ahc_sg_bus_to_virt(scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); dataptr = ahc_le32toh(sg->addr) + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) - resid; if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { u_int dscommand1; dscommand1 = ahc_inb(ahc, DSCOMMAND1); ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); ahc_outb(ahc, HADDR, (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); ahc_outb(ahc, DSCOMMAND1, dscommand1); } ahc_outb(ahc, HADDR + 3, dataptr >> 24); ahc_outb(ahc, HADDR + 2, dataptr >> 16); ahc_outb(ahc, HADDR + 1, dataptr >> 8); ahc_outb(ahc, HADDR, dataptr); ahc_outb(ahc, HCNT + 2, resid >> 16); ahc_outb(ahc, HCNT + 1, resid >> 8); ahc_outb(ahc, HCNT, resid); if ((ahc->features & AHC_ULTRA2) == 0) { ahc_outb(ahc, STCNT + 2, resid >> 16); ahc_outb(ahc, STCNT + 1, resid >> 8); ahc_outb(ahc, STCNT, resid); } } /* * Handle the effects of issuing a bus device reset message. */ static void ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, cam_status status, char *message, int verbose_level) { #ifdef AHC_TARGET_MODE struct ahc_tmode_tstate* tstate; u_int lun; #endif int found; found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, status); #ifdef AHC_TARGET_MODE /* * Send an immediate notify ccb to all target mord peripheral * drivers affected by this action. */ tstate = ahc->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, TARGET_RESET, /*arg*/0); ahc_send_lstate_events(ahc, lstate); } } #endif /* * Go back to async/narrow transfers and renegotiate. */ ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR, /*paused*/TRUE); ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR, /*paused*/TRUE); if (status != CAM_SEL_TIMEOUT) ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_SENT_BDR); if (message != NULL && (verbose_level <= bootverbose)) printk("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), message, devinfo->channel, devinfo->target, found); } #ifdef AHC_TARGET_MODE static void ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahc->msgout_index = 0; ahc->msgout_len = 0; if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) ahc_build_transfer_msg(ahc, devinfo); else panic("ahc_intr: AWAITING target message with no message"); ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_TARGET_MSGIN; } #endif /**************************** Initialization **********************************/ /* * Allocate a controller structure for a new device * and perform initial initializion. */ struct ahc_softc * ahc_alloc(void *platform_arg, char *name) { struct ahc_softc *ahc; int i; ahc = kzalloc(sizeof(*ahc), GFP_ATOMIC); if (!ahc) { printk("aic7xxx: cannot malloc softc!\n"); kfree(name); return NULL; } ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC); if (ahc->seep_config == NULL) { kfree(ahc); kfree(name); return (NULL); } LIST_INIT(&ahc->pending_scbs); /* We don't know our unit number until the OSM sets it */ ahc->name = name; ahc->unit = -1; ahc->description = NULL; ahc->channel = 'A'; ahc->channel_b = 'B'; ahc->chip = AHC_NONE; ahc->features = AHC_FENONE; ahc->bugs = AHC_BUGNONE; ahc->flags = AHC_FNONE; /* * Default to all error reporting enabled with the * sequencer operating at its fastest speed. * The bus attach code may modify this. */ ahc->seqctl = FASTMODE; for (i = 0; i < AHC_NUM_TARGETS; i++) TAILQ_INIT(&ahc->untagged_queues[i]); if (ahc_platform_alloc(ahc, platform_arg) != 0) { ahc_free(ahc); ahc = NULL; } return (ahc); } int ahc_softc_init(struct ahc_softc *ahc) { /* The IRQMS bit is only valid on VL and EISA chips */ if ((ahc->chip & AHC_PCI) == 0) ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; else ahc->unpause = 0; ahc->pause = ahc->unpause | PAUSE; /* XXX The shared scb data stuff should be deprecated */ if (ahc->scb_data == NULL) { ahc->scb_data = kzalloc(sizeof(*ahc->scb_data), GFP_ATOMIC); if (ahc->scb_data == NULL) return (ENOMEM); } return (0); } void ahc_set_unit(struct ahc_softc *ahc, int unit) { ahc->unit = unit; } void ahc_set_name(struct ahc_softc *ahc, char *name) { kfree(ahc->name); ahc->name = name; } void ahc_free(struct ahc_softc *ahc) { int i; switch (ahc->init_level) { default: case 5: ahc_shutdown(ahc); fallthrough; case 4: ahc_dmamap_unload(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); fallthrough; case 3: ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, ahc->shared_data_dmamap); ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); fallthrough; case 2: ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); fallthrough; case 1: break; case 0: break; } ahc_platform_free(ahc); ahc_fini_scbdata(ahc); for (i = 0; i < AHC_NUM_TARGETS; i++) { struct ahc_tmode_tstate *tstate; tstate = ahc->enabled_targets[i]; if (tstate != NULL) { #ifdef AHC_TARGET_MODE int j; for (j = 0; j < AHC_NUM_LUNS; j++) { struct ahc_tmode_lstate *lstate; lstate = tstate->enabled_luns[j]; if (lstate != NULL) { xpt_free_path(lstate->path); kfree(lstate); } } #endif kfree(tstate); } } #ifdef AHC_TARGET_MODE if (ahc->black_hole != NULL) { xpt_free_path(ahc->black_hole->path); kfree(ahc->black_hole); } #endif kfree(ahc->name); kfree(ahc->seep_config); kfree(ahc); return; } static void ahc_shutdown(void *arg) { struct ahc_softc *ahc; int i; ahc = (struct ahc_softc *)arg; /* This will reset most registers to 0, but not all */ ahc_reset(ahc, /*reinit*/FALSE); ahc_outb(ahc, SCSISEQ, 0); ahc_outb(ahc, SXFRCTL0, 0); ahc_outb(ahc, DSPCISTATUS, 0); for (i = TARG_SCSIRATE; i < SCSICONF; i++) ahc_outb(ahc, i, 0); } /* * Reset the controller and record some information about it * that is only available just after a reset. If "reinit" is * non-zero, this reset occurred after initial configuration * and the caller requests that the chip be fully reinitialized * to a runable state. Chip interrupts are *not* enabled after * a reinitialization. The caller must enable interrupts via * ahc_intr_enable(). */ int ahc_reset(struct ahc_softc *ahc, int reinit) { u_int sblkctl; u_int sxfrctl1_a, sxfrctl1_b; int error; int wait; /* * Preserve the value of the SXFRCTL1 register for all channels. * It contains settings that affect termination and we don't want * to disturb the integrity of the bus. */ ahc_pause(ahc); sxfrctl1_b = 0; if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { u_int sblkctl; /* * Save channel B's settings in case this chip * is setup for TWIN channel operation. */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); } sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); /* * Ensure that the reset has finished. We delay 1000us * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 1000; do { ahc_delay(1000); } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); if (wait == 0) { printk("%s: WARNING - Failed chip reset! " "Trying to initialize anyway.\n", ahc_name(ahc)); } ahc_outb(ahc, HCNTRL, ahc->pause); /* Determine channel configuration */ sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); /* No Twin Channel PCI cards */ if ((ahc->chip & AHC_PCI) != 0) sblkctl &= ~SELBUSB; switch (sblkctl) { case 0: /* Single Narrow Channel */ break; case 2: /* Wide Channel */ ahc->features |= AHC_WIDE; break; case 8: /* Twin Channel */ ahc->features |= AHC_TWIN; break; default: printk(" Unsupported adapter type. Ignoring\n"); return(-1); } /* * Reload sxfrctl1. * * We must always initialize STPWEN to 1 before we * restore the saved values. STPWEN is initialized * to a tri-state condition which can only be cleared * by turning it on. */ if ((ahc->features & AHC_TWIN) != 0) { u_int sblkctl; sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); } ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); error = 0; if (reinit != 0) /* * If a recovery action has forced a chip reset, * re-initialize the chip to our liking. */ error = ahc->bus_chip_init(ahc); #ifdef AHC_DUMP_SEQ else ahc_dumpseq(ahc); #endif return (error); } /* * Determine the number of SCBs available on the controller */ int ahc_probe_scbs(struct ahc_softc *ahc) { int i; for (i = 0; i < AHC_SCB_MAX; i++) { ahc_outb(ahc, SCBPTR, i); ahc_outb(ahc, SCB_BASE, i); if (ahc_inb(ahc, SCB_BASE) != i) break; ahc_outb(ahc, SCBPTR, 0); if (ahc_inb(ahc, SCB_BASE) != 0) break; } return (i); } static void ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { dma_addr_t *baddr; baddr = (dma_addr_t *)arg; *baddr = segs->ds_addr; } static void ahc_build_free_scb_list(struct ahc_softc *ahc) { int scbsize; int i; scbsize = 32; if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) scbsize = 64; for (i = 0; i < ahc->scb_data->maxhscbs; i++) { int j; ahc_outb(ahc, SCBPTR, i); /* * Touch all SCB bytes to avoid parity errors * should one of our debugging routines read * an otherwise uninitiatlized byte. */ for (j = 0; j < scbsize; j++) ahc_outb(ahc, SCB_BASE+j, 0xFF); /* Clear the control byte. */ ahc_outb(ahc, SCB_CONTROL, 0); /* Set the next pointer */ if ((ahc->flags & AHC_PAGESCBS) != 0) ahc_outb(ahc, SCB_NEXT, i+1); else ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); /* Make the tag number, SCSIID, and lun invalid */ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); ahc_outb(ahc, SCB_SCSIID, 0xFF); ahc_outb(ahc, SCB_LUN, 0xFF); } if ((ahc->flags & AHC_PAGESCBS) != 0) { /* SCB 0 heads the free list. */ ahc_outb(ahc, FREE_SCBH, 0); } else { /* No free list. */ ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); } /* Make sure that the last SCB terminates the free list */ ahc_outb(ahc, SCBPTR, i-1); ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); } static int ahc_init_scbdata(struct ahc_softc *ahc) { struct scb_data *scb_data; scb_data = ahc->scb_data; SLIST_INIT(&scb_data->free_scbs); SLIST_INIT(&scb_data->sg_maps); /* Allocate SCB resources */ scb_data->scbarray = kcalloc(AHC_SCB_MAX_ALLOC, sizeof(struct scb), GFP_ATOMIC); if (scb_data->scbarray == NULL) return (ENOMEM); /* Determine the number of hardware SCBs and initialize them */ scb_data->maxhscbs = ahc_probe_scbs(ahc); if (ahc->scb_data->maxhscbs == 0) { printk("%s: No SCB space found\n", ahc_name(ahc)); return (ENXIO); } /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for our hardware scb structures */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->hscb_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Allocation for our hscbs */ if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, (void **)&scb_data->hscbs, BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { goto error_exit; } scb_data->init_level++; /* And permanently map them */ ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, scb_data->hscbs, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); scb_data->init_level++; /* DMA tag for our sense buffers */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sense_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Allocate them */ if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, (void **)&scb_data->sense, BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { goto error_exit; } scb_data->init_level++; /* And permanently map them */ ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, scb_data->sense, AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); scb_data->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sg_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Perform initial CCB allocation */ memset(scb_data->hscbs, 0, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); ahc_alloc_scbs(ahc); if (scb_data->numscbs == 0) { printk("%s: ahc_init_scbdata - " "Unable to allocate initial scbs\n", ahc_name(ahc)); goto error_exit; } /* * Reserve the next queued SCB. */ ahc->next_queued_scb = ahc_get_scb(ahc); /* * Note that we were successful */ return (0); error_exit: return (ENOMEM); } static void ahc_fini_scbdata(struct ahc_softc *ahc) { struct scb_data *scb_data; scb_data = ahc->scb_data; if (scb_data == NULL) return; switch (scb_data->init_level) { default: case 7: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); ahc_dmamap_unload(ahc, scb_data->sg_dmat, sg_map->sg_dmamap); ahc_dmamem_free(ahc, scb_data->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); kfree(sg_map); } ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); } fallthrough; case 6: ahc_dmamap_unload(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); fallthrough; case 5: ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, scb_data->sense_dmamap); ahc_dmamap_destroy(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); fallthrough; case 4: ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); fallthrough; case 3: ahc_dmamap_unload(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); fallthrough; case 2: ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, scb_data->hscb_dmamap); ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); fallthrough; case 1: ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); break; case 0: break; } kfree(scb_data->scbarray); } static void ahc_alloc_scbs(struct ahc_softc *ahc) { struct scb_data *scb_data; struct scb *next_scb; struct sg_map_node *sg_map; dma_addr_t physaddr; struct ahc_dma_seg *segs; int newcount; int i; scb_data = ahc->scb_data; if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) /* Can't allocate any more */ return; next_scb = &scb_data->scbarray[scb_data->numscbs]; sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC); if (sg_map == NULL) return; /* Allocate S/G space for the next batch of SCBS */ if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { kfree(sg_map); return; } SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, &sg_map->sg_physaddr, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); for (i = 0; i < newcount; i++) { struct scb_platform_data *pdata; pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); if (pdata == NULL) break; next_scb->platform_data = pdata; next_scb->sg_map = sg_map; next_scb->sg_list = segs; /* * The sequencer always starts with the second entry. * The first entry is embedded in the scb. */ next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); next_scb->ahc_softc = ahc; next_scb->flags = SCB_FREE; next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; next_scb->hscb->tag = ahc->scb_data->numscbs; SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, next_scb, links.sle); segs += AHC_NSEG; physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); next_scb++; ahc->scb_data->numscbs++; } } void ahc_controller_info(struct ahc_softc *ahc, char *buf) { int len; len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); buf += len; if ((ahc->features & AHC_TWIN) != 0) len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " "B SCSI Id=%d, primary %c, ", ahc->our_id, ahc->our_id_b, (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); else { const char *speed; const char *type; speed = ""; if ((ahc->features & AHC_ULTRA) != 0) { speed = "Ultra "; } else if ((ahc->features & AHC_DT) != 0) { speed = "Ultra160 "; } else if ((ahc->features & AHC_ULTRA2) != 0) { speed = "Ultra2 "; } if ((ahc->features & AHC_WIDE) != 0) { type = "Wide"; } else { type = "Single"; } len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", speed, type, ahc->channel, ahc->our_id); } buf += len; if ((ahc->flags & AHC_PAGESCBS) != 0) sprintf(buf, "%d/%d SCBs", ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); else sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); } int ahc_chip_init(struct ahc_softc *ahc) { int term; int error; u_int i; u_int scsi_conf; u_int scsiseq_template; uint32_t physaddr; ahc_outb(ahc, SEQ_FLAGS, 0); ahc_outb(ahc, SEQ_FLAGS2, 0); /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ if (ahc->features & AHC_TWIN) { /* * Setup Channel B first. */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; ahc_outb(ahc, SCSIID, ahc->our_id_b); scsi_conf = ahc_inb(ahc, SCSICONF + 1); ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); /* Select Channel A */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); } term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); else ahc_outb(ahc, SCSIID, ahc->our_id); scsi_conf = ahc_inb(ahc, SCSICONF); ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) |term|ahc->seltime |ENSTIMER|ACTNEGEN); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); /* There are no untagged SCBs active yet. */ for (i = 0; i < 16; i++) { ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); if ((ahc->flags & AHC_SCB_BTT) != 0) { int lun; /* * The SCB based BTT allows an entry per * target and lun pair. */ for (lun = 1; lun < AHC_NUM_LUNS; lun++) ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); } } /* All of our queues are empty */ for (i = 0; i < 256; i++) ahc->qoutfifo[i] = SCB_LIST_NULL; ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); for (i = 0; i < 256; i++) ahc->qinfifo[i] = SCB_LIST_NULL; if ((ahc->features & AHC_MULTI_TID) != 0) { ahc_outb(ahc, TARGID, 0); ahc_outb(ahc, TARGID + 1, 0); } /* * Tell the sequencer where it can find our arrays in memory. */ physaddr = ahc->scb_data->hscb_busaddr; ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); physaddr = ahc->shared_data_busaddr; ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); /* * Initialize the group code to command length table. * This overrides the values in TARG_SCSIRATE, so only * setup the table after we have processed that information. */ ahc_outb(ahc, CMDSIZE_TABLE, 5); ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); if ((ahc->features & AHC_HS_MAILBOX) != 0) ahc_outb(ahc, HS_MAILBOX, 0); /* Tell the sequencer of our initial queue positions */ if ((ahc->features & AHC_TARGETMODE) != 0) { ahc->tqinfifonext = 1; ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); } ahc->qinfifonext = 0; ahc->qoutfifonext = 0; if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext); ahc_outb(ahc, SDSCB_QOFF, 0); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); ahc_outb(ahc, QINPOS, ahc->qinfifonext); ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext); } /* We don't have any waiting selections */ ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); /* Our disconnection list is empty too */ ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); /* Message out buffer starts empty */ ahc_outb(ahc, MSG_OUT, NOP); /* * Setup the allowed SCSI Sequences based on operational mode. * If we are a target, we'll enable select in operations once * we've had a lun enabled. */ scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; if ((ahc->flags & AHC_INITIATORROLE) != 0) scsiseq_template |= ENRSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); /* Initialize our list of free SCBs. */ ahc_build_free_scb_list(ahc); /* * Tell the sequencer which SCB will be the next one it receives. */ ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); /* * Load the Sequencer program and Enable the adapter * in "fast" mode. */ if (bootverbose) printk("%s: Downloading Sequencer Program...", ahc_name(ahc)); error = ahc_loadseq(ahc); if (error != 0) return (error); if ((ahc->features & AHC_ULTRA2) != 0) { int wait; /* * Wait for up to 500ms for our transceivers * to settle. If the adapter does not have * a cable attached, the transceivers may * never settle, so don't complain if we * fail here. */ for (wait = 5000; (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; wait--) ahc_delay(100); } ahc_restart(ahc); return (0); } /* * Start the board, ready for normal operation */ int ahc_init(struct ahc_softc *ahc) { int max_targ; u_int i; u_int scsi_conf; u_int ultraenb; u_int discenable; u_int tagenable; size_t driver_data_size; #ifdef AHC_DEBUG if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) ahc->flags |= AHC_SEQUENCER_DEBUG; #endif #ifdef AHC_PRINT_SRAM printk("Scratch Ram:"); for (i = 0x20; i < 0x5f; i++) { if (((i % 8) == 0) && (i != 0)) { printk ("\n "); } printk (" 0x%x", ahc_inb(ahc, i)); } if ((ahc->features & AHC_MORE_SRAM) != 0) { for (i = 0x70; i < 0x7f; i++) { if (((i % 8) == 0) && (i != 0)) { printk ("\n "); } printk (" 0x%x", ahc_inb(ahc, i)); } } printk ("\n"); /* * Reading uninitialized scratch ram may * generate parity errors. */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); #endif max_targ = 15; /* * Assume we have a board at this stage and it has been reset. */ if ((ahc->flags & AHC_USEDEFAULTS) != 0) ahc->our_id = ahc->our_id_b = 7; /* * Default to allowing initiator operations. */ ahc->flags |= AHC_INITIATORROLE; /* * Only allow target mode features if this unit has them enabled. */ if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) ahc->features &= ~AHC_TARGETMODE; ahc->init_level++; /* * DMA tag for our command fifos and other data in system memory * the card's sequencer must be able to access. For initiator * roles, we need to allocate space for the qinfifo and qoutfifo. * The qinfifo and qoutfifo are composed of 256 1 byte elements. * When providing for the target mode role, we must additionally * provide space for the incoming target command fifo and an extra * byte to deal with a dma bug in some chip versions. */ driver_data_size = 2 * 256 * sizeof(uint8_t); if ((ahc->features & AHC_TARGETMODE) != 0) driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) + /*DMA WideOdd Bug Buffer*/1; if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, driver_data_size, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahc->shared_data_dmat) != 0) { return (ENOMEM); } ahc->init_level++; /* Allocation of driver data */ if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, (void **)&ahc->qoutfifo, BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { return (ENOMEM); } ahc->init_level++; /* And permanently map it in */ ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, &ahc->shared_data_busaddr, /*flags*/0); if ((ahc->features & AHC_TARGETMODE) != 0) { ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; ahc->dma_bug_buf = ahc->shared_data_busaddr + driver_data_size - 1; /* All target command blocks start out invalid. */ for (i = 0; i < AHC_TMODE_CMDS; i++) ahc->targetcmds[i].cmd_valid = 0; ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; } ahc->qinfifo = &ahc->qoutfifo[256]; ahc->init_level++; /* Allocate SCB data now that buffer_dmat is initialized */ if (ahc->scb_data->maxhscbs == 0) if (ahc_init_scbdata(ahc) != 0) return (ENOMEM); /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { printk("%s: unable to allocate ahc_tmode_tstate. " "Failing attach\n", ahc_name(ahc)); return (ENOMEM); } if ((ahc->features & AHC_TWIN) != 0) { if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { printk("%s: unable to allocate ahc_tmode_tstate. " "Failing attach\n", ahc_name(ahc)); return (ENOMEM); } } if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { ahc->flags |= AHC_PAGESCBS; } else { ahc->flags &= ~AHC_PAGESCBS; } #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_MISC) { printk("%s: hardware scb %u bytes; kernel scb %u bytes; " "ahc_dma %u bytes\n", ahc_name(ahc), (u_int)sizeof(struct hardware_scb), (u_int)sizeof(struct scb), (u_int)sizeof(struct ahc_dma_seg)); } #endif /* AHC_DEBUG */ /* * Look at the information that board initialization or * the board bios has left us. */ if (ahc->features & AHC_TWIN) { scsi_conf = ahc_inb(ahc, SCSICONF + 1); if ((scsi_conf & RESET_SCSI) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) ahc->flags |= AHC_RESET_BUS_B; } scsi_conf = ahc_inb(ahc, SCSICONF); if ((scsi_conf & RESET_SCSI) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) ahc->flags |= AHC_RESET_BUS_A; ultraenb = 0; tagenable = ALL_TARGETS_MASK; /* Grab the disconnection disable table and invert it for our needs */ if ((ahc->flags & AHC_USEDEFAULTS) != 0) { printk("%s: Host Adapter Bios disabled. Using default SCSI " "device parameters\n", ahc_name(ahc)); ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| AHC_TERM_ENB_A|AHC_TERM_ENB_B; discenable = ALL_TARGETS_MASK; if ((ahc->features & AHC_ULTRA) != 0) ultraenb = ALL_TARGETS_MASK; } else { discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) | ahc_inb(ahc, DISC_DSB)); if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) | ahc_inb(ahc, ULTRA_ENB); } if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) max_targ = 7; for (i = 0; i <= max_targ; i++) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int our_id; u_int target_id; char channel; channel = 'A'; our_id = ahc->our_id; target_id = i; if (i > 7 && (ahc->features & AHC_TWIN) != 0) { channel = 'B'; our_id = ahc->our_id_b; target_id = i % 8; } tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id, &tstate); /* Default to async narrow across the board */ memset(tinfo, 0, sizeof(*tinfo)); if (ahc->flags & AHC_USEDEFAULTS) { if ((ahc->features & AHC_WIDE) != 0) tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; /* * These will be truncated when we determine the * connection type we have with the target. */ tinfo->user.period = ahc_syncrates->period; tinfo->user.offset = MAX_OFFSET; } else { u_int scsirate; uint16_t mask; /* Take the settings leftover in scratch RAM. */ scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); mask = (0x01 << i); if ((ahc->features & AHC_ULTRA2) != 0) { u_int offset; u_int maxsync; if ((scsirate & SOFS) == 0x0F) { /* * Haven't negotiated yet, * so the format is different. */ scsirate = (scsirate & SXFR) >> 4 | (ultraenb & mask) ? 0x08 : 0x0 | (scsirate & WIDEXFER); offset = MAX_OFFSET_ULTRA2; } else offset = ahc_inb(ahc, TARG_OFFSET + i); if ((scsirate & ~WIDEXFER) == 0 && offset != 0) /* Set to the lowest sync rate, 5MHz */ scsirate |= 0x1c; maxsync = AHC_SYNCRATE_ULTRA2; if ((ahc->features & AHC_DT) != 0) maxsync = AHC_SYNCRATE_DT; tinfo->user.period = ahc_find_period(ahc, scsirate, maxsync); if (offset == 0) tinfo->user.period = 0; else tinfo->user.offset = MAX_OFFSET; if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ && (ahc->features & AHC_DT) != 0) tinfo->user.ppr_options = MSG_EXT_PPR_DT_REQ; } else if ((scsirate & SOFS) != 0) { if ((scsirate & SXFR) == 0x40 && (ultraenb & mask) != 0) { /* Treat 10MHz as a non-ultra speed */ scsirate &= ~SXFR; ultraenb &= ~mask; } tinfo->user.period = ahc_find_period(ahc, scsirate, (ultraenb & mask) ? AHC_SYNCRATE_ULTRA : AHC_SYNCRATE_FAST); if (tinfo->user.period != 0) tinfo->user.offset = MAX_OFFSET; } if (tinfo->user.period == 0) tinfo->user.offset = 0; if ((scsirate & WIDEXFER) != 0 && (ahc->features & AHC_WIDE) != 0) tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; tinfo->user.protocol_version = 4; if ((ahc->features & AHC_DT) != 0) tinfo->user.transport_version = 3; else tinfo->user.transport_version = 2; tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; } tstate->ultraenb = 0; } ahc->user_discenable = discenable; ahc->user_tagenable = tagenable; return (ahc->bus_chip_init(ahc)); } void ahc_intr_enable(struct ahc_softc *ahc, int enable) { u_int hcntrl; hcntrl = ahc_inb(ahc, HCNTRL); hcntrl &= ~INTEN; ahc->pause &= ~INTEN; ahc->unpause &= ~INTEN; if (enable) { hcntrl |= INTEN; ahc->pause |= INTEN; ahc->unpause |= INTEN; } ahc_outb(ahc, HCNTRL, hcntrl); } /* * Ensure that the card is paused in a location * outside of all critical sections and that all * pending work is completed prior to returning. * This routine should only be called from outside * an interrupt context. */ void ahc_pause_and_flushwork(struct ahc_softc *ahc) { int intstat; int maxloops; int paused; maxloops = 1000; ahc->flags |= AHC_ALL_INTERRUPTS; paused = FALSE; do { if (paused) { ahc_unpause(ahc); /* * Give the sequencer some time to service * any active selections. */ ahc_delay(500); } ahc_intr(ahc); ahc_pause(ahc); paused = TRUE; ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); intstat = ahc_inb(ahc, INTSTAT); if ((intstat & INT_PEND) == 0) { ahc_clear_critical_section(ahc); intstat = ahc_inb(ahc, INTSTAT); } } while (--maxloops && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) && ((intstat & INT_PEND) != 0 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0)); if (maxloops == 0) { printk("Infinite interrupt loop, INTSTAT = %x", ahc_inb(ahc, INTSTAT)); } ahc_platform_flushwork(ahc); ahc->flags &= ~AHC_ALL_INTERRUPTS; } int __maybe_unused ahc_suspend(struct ahc_softc *ahc) { ahc_pause_and_flushwork(ahc); if (LIST_FIRST(&ahc->pending_scbs) != NULL) { ahc_unpause(ahc); return (EBUSY); } #ifdef AHC_TARGET_MODE /* * XXX What about ATIOs that have not yet been serviced? * Perhaps we should just refuse to be suspended if we * are acting in a target role. */ if (ahc->pending_device != NULL) { ahc_unpause(ahc); return (EBUSY); } #endif ahc_shutdown(ahc); return (0); } int __maybe_unused ahc_resume(struct ahc_softc *ahc) { ahc_reset(ahc, /*reinit*/TRUE); ahc_intr_enable(ahc, TRUE); ahc_restart(ahc); return (0); } /************************** Busy Target Table *********************************/ /* * Return the untagged transaction id for a given target/channel lun. * Optionally, clear the entry. */ static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) { u_int scbid; u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); } return (scbid); } static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) { u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); } } static void ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) { u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); } } /************************** SCB and SCB queue management **********************/ int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role) { int targ = SCB_GET_TARGET(ahc, scb); char chan = SCB_GET_CHANNEL(ahc, scb); int slun = SCB_GET_LUN(scb); int match; match = ((chan == channel) || (channel == ALL_CHANNELS)); if (match != 0) match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); if (match != 0) match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); if (match != 0) { #ifdef AHC_TARGET_MODE int group; group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); if (role == ROLE_INITIATOR) { match = (group != XPT_FC_GROUP_TMODE) && ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); } else if (role == ROLE_TARGET) { match = (group == XPT_FC_GROUP_TMODE) && ((tag == scb->io_ctx->csio.tag_id) || (tag == SCB_LIST_NULL)); } #else /* !AHC_TARGET_MODE */ match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); #endif /* AHC_TARGET_MODE */ } return match; } static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) { int target; char channel; int lun; target = SCB_GET_TARGET(ahc, scb); lun = SCB_GET_LUN(scb); channel = SCB_GET_CHANNEL(ahc, scb); ahc_search_qinfifo(ahc, target, channel, lun, /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahc_platform_freeze_devq(ahc, scb); } void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) { struct scb *prev_scb; prev_scb = NULL; if (ahc_qinfifo_count(ahc) != 0) { u_int prev_tag; uint8_t prev_pos; prev_pos = ahc->qinfifonext - 1; prev_tag = ahc->qinfifo[prev_pos]; prev_scb = ahc_lookup_scb(ahc, prev_tag); } ahc_qinfifo_requeue(ahc, prev_scb, scb); if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); } } static void ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, struct scb *scb) { if (prev_scb == NULL) { ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); } else { prev_scb->hscb->next = scb->hscb->tag; ahc_sync_scb(ahc, prev_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; scb->hscb->next = ahc->next_queued_scb->hscb->tag; ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } static int ahc_qinfifo_count(struct ahc_softc *ahc) { uint8_t qinpos; uint8_t diff; if ((ahc->features & AHC_QUEUE_REGS) != 0) { qinpos = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinpos); } else qinpos = ahc_inb(ahc, QINPOS); diff = ahc->qinfifonext - qinpos; return (diff); } int ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahc_search_action action) { struct scb *scb; struct scb *prev_scb; uint8_t qinstart; uint8_t qinpos; uint8_t qintail; uint8_t next; uint8_t prev; uint8_t curscbptr; int found; int have_qregs; qintail = ahc->qinfifonext; have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; if (have_qregs) { qinstart = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinstart); } else qinstart = ahc_inb(ahc, QINPOS); qinpos = qinstart; found = 0; prev_scb = NULL; if (action == SEARCH_COMPLETE) { /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); } /* * Start with an empty queue. Entries that are not chosen * for removal will be re-added to the queue as we go. */ ahc->qinfifonext = qinpos; ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); while (qinpos != qintail) { scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); if (scb == NULL) { printk("qinpos = %d, SCB index = %d\n", qinpos, ahc->qinfifo[qinpos]); panic("Loop 1\n"); } if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = ahc_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scb, status); cstat = ahc_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahc_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in qinfifo\n"); ahc_done(ahc, scb); } fallthrough; case SEARCH_REMOVE: break; case SEARCH_COUNT: ahc_qinfifo_requeue(ahc, prev_scb, scb); prev_scb = scb; break; } } else { ahc_qinfifo_requeue(ahc, prev_scb, scb); prev_scb = scb; } qinpos++; } if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); } if (action != SEARCH_COUNT && (found != 0) && (qinstart != ahc->qinfifonext)) { /* * The sequencer may be in the process of dmaing * down the SCB at the beginning of the queue. * This could be problematic if either the first, * or the second SCB is removed from the queue * (the first SCB includes a pointer to the "next" * SCB to dma). If we have removed any entries, swap * the first element in the queue with the next HSCB * so the sequencer will notice that NEXT_QUEUED_SCB * has changed during its dma attempt and will retry * the DMA. */ scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); if (scb == NULL) { printk("found = %d, qinstart = %d, qinfifionext = %d\n", found, qinstart, ahc->qinfifonext); panic("First/Second Qinfifo fixup\n"); } /* * ahc_swap_with_next_hscb forces our next pointer to * point to the reserved SCB for future commands. Save * and restore our original next pointer to maintain * queue integrity. */ next = scb->hscb->next; ahc->scb_data->scbindex[scb->hscb->tag] = NULL; ahc_swap_with_next_hscb(ahc, scb); scb->hscb->next = next; ahc->qinfifo[qinstart] = scb->hscb->tag; /* Tell the card about the new head of the qinfifo. */ ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); /* Fixup the tail "next" pointer. */ qintail = ahc->qinfifonext - 1; scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); scb->hscb->next = ahc->next_queued_scb->hscb->tag; } /* * Search waiting for selection list. */ curscbptr = ahc_inb(ahc, SCBPTR); next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ prev = SCB_LIST_NULL; while (next != SCB_LIST_NULL) { uint8_t scb_index; ahc_outb(ahc, SCBPTR, next); scb_index = ahc_inb(ahc, SCB_TAG); if (scb_index >= ahc->scb_data->numscbs) { printk("Waiting List inconsistency. " "SCB index == %d, yet numscbs == %d.", scb_index, ahc->scb_data->numscbs); ahc_dump_card_state(ahc); panic("for safety"); } scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printk("scb_index = %d, next = %d\n", scb_index, next); panic("Waiting List traversal\n"); } if (ahc_match_scb(ahc, scb, target, channel, lun, SCB_LIST_NULL, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = ahc_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scb, status); cstat = ahc_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahc_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in Waiting List\n"); ahc_done(ahc, scb); } fallthrough; case SEARCH_REMOVE: next = ahc_rem_wscb(ahc, next, prev); break; case SEARCH_COUNT: prev = next; next = ahc_inb(ahc, SCB_NEXT); break; } } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } } ahc_outb(ahc, SCBPTR, curscbptr); found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, channel, lun, status, action); if (action == SEARCH_COMPLETE) ahc_release_untagged_queues(ahc); return (found); } int ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx, int target, char channel, int lun, uint32_t status, ahc_search_action action) { struct scb *scb; int maxtarget; int found; int i; if (action == SEARCH_COMPLETE) { /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); } found = 0; i = 0; if ((ahc->flags & AHC_SCB_BTT) == 0) { maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } } else { maxtarget = 0; } for (; i < maxtarget; i++) { struct scb_tailq *untagged_q; struct scb *next_scb; untagged_q = &(ahc->untagged_queues[i]); next_scb = TAILQ_FIRST(untagged_q); while (next_scb != NULL) { scb = next_scb; next_scb = TAILQ_NEXT(scb, links.tqe); /* * The head of the list may be the currently * active untagged command for a device. * We're only searching for commands that * have not been started. A transaction * marked active but still in the qinfifo * is removed by the qinfifo scanning code * above. */ if ((scb->flags & SCB_ACTIVE) != 0) continue; if (ahc_match_scb(ahc, scb, target, channel, lun, SCB_LIST_NULL, ROLE_INITIATOR) == 0 || (ctx != NULL && ctx != scb->io_ctx)) continue; /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = ahc_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scb, status); cstat = ahc_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahc_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in untaggedQ\n"); ahc_done(ahc, scb); break; } case SEARCH_REMOVE: scb->flags &= ~SCB_UNTAGGEDQ; TAILQ_REMOVE(untagged_q, scb, links.tqe); break; case SEARCH_COUNT: break; } } } if (action == SEARCH_COMPLETE) ahc_release_untagged_queues(ahc); return (found); } int ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, int stop_on_first, int remove, int save_state) { struct scb *scbp; u_int next; u_int prev; u_int count; u_int active_scb; count = 0; next = ahc_inb(ahc, DISCONNECTED_SCBH); prev = SCB_LIST_NULL; if (save_state) { /* restore this when we're done */ active_scb = ahc_inb(ahc, SCBPTR); } else /* Silence compiler */ active_scb = SCB_LIST_NULL; while (next != SCB_LIST_NULL) { u_int scb_index; ahc_outb(ahc, SCBPTR, next); scb_index = ahc_inb(ahc, SCB_TAG); if (scb_index >= ahc->scb_data->numscbs) { printk("Disconnected List inconsistency. " "SCB index == %d, yet numscbs == %d.", scb_index, ahc->scb_data->numscbs); ahc_dump_card_state(ahc); panic("for safety"); } if (next == prev) { panic("Disconnected List Loop. " "cur SCBPTR == %x, prev SCBPTR == %x.", next, prev); } scbp = ahc_lookup_scb(ahc, scb_index); if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, ROLE_INITIATOR)) { count++; if (remove) { next = ahc_rem_scb_from_disc_list(ahc, prev, next); } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } if (stop_on_first) break; } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } } if (save_state) ahc_outb(ahc, SCBPTR, active_scb); return (count); } /* * Remove an SCB from the on chip list of disconnected transactions. * This is empty/unused if we are not performing SCB paging. */ static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) { u_int next; ahc_outb(ahc, SCBPTR, scbptr); next = ahc_inb(ahc, SCB_NEXT); ahc_outb(ahc, SCB_CONTROL, 0); ahc_add_curscb_to_free_list(ahc); if (prev != SCB_LIST_NULL) { ahc_outb(ahc, SCBPTR, prev); ahc_outb(ahc, SCB_NEXT, next); } else ahc_outb(ahc, DISCONNECTED_SCBH, next); return (next); } /* * Add the SCB as selected by SCBPTR onto the on chip list of * free hardware SCBs. This list is empty/unused if we are not * performing SCB paging. */ static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc) { /* * Invalidate the tag so that our abort * routines don't think it's active. */ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); if ((ahc->flags & AHC_PAGESCBS) != 0) { ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); } } /* * Manipulate the waiting for selection list and return the * scb that follows the one that we remove. */ static u_int ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) { u_int curscb, next; /* * Select the SCB we want to abort and * pull the next pointer out of it. */ curscb = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, scbpos); next = ahc_inb(ahc, SCB_NEXT); /* Clear the necessary fields */ ahc_outb(ahc, SCB_CONTROL, 0); ahc_add_curscb_to_free_list(ahc); /* update the waiting list */ if (prev == SCB_LIST_NULL) { /* First in the list */ ahc_outb(ahc, WAITING_SCBH, next); /* * Ensure we aren't attempting to perform * selection for this entry. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else { /* * Select the scb that pointed to us * and update its next pointer. */ ahc_outb(ahc, SCBPTR, prev); ahc_outb(ahc, SCB_NEXT, next); } /* * Point us back at the original scb position. */ ahc_outb(ahc, SCBPTR, curscb); return next; } /******************************** Error Handling ******************************/ /* * Abort all SCBs that match the given description (target/channel/lun/tag), * setting their status to the passed in status if the status has not already * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer * is paused before it is called. */ static int ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { struct scb *scbp; struct scb *scbp_next; u_int active_scb; int i, j; int maxtarget; int minlun; int maxlun; int found; /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); /* restore this when we're done */ active_scb = ahc_inb(ahc, SCBPTR); found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); /* * Clean out the busy target table for any untagged commands. */ i = 0; maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } if (lun == CAM_LUN_WILDCARD) { /* * Unless we are using an SCB based * busy targets table, there is only * one table entry for all luns of * a target. */ minlun = 0; maxlun = 1; if ((ahc->flags & AHC_SCB_BTT) != 0) maxlun = AHC_NUM_LUNS; } else { minlun = lun; maxlun = lun + 1; } if (role != ROLE_TARGET) { for (;i < maxtarget; i++) { for (j = minlun;j < maxlun; j++) { u_int scbid; u_int tcl; tcl = BUILD_TCL(i << 4, j); scbid = ahc_index_busy_tcl(ahc, tcl); scbp = ahc_lookup_scb(ahc, scbid); if (scbp == NULL || ahc_match_scb(ahc, scbp, target, channel, lun, tag, role) == 0) continue; ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); } } /* * Go through the disconnected list and remove any entries we * have queued for completion, 0'ing their control byte too. * We save the active SCB and restore it ourselves, so there * is no reason for this search to restore it too. */ ahc_search_disc_list(ahc, target, channel, lun, tag, /*stop_on_first*/FALSE, /*remove*/TRUE, /*save_state*/FALSE); } /* * Go through the hardware SCB array looking for commands that * were active but not on any list. In some cases, these remnants * might not still have mappings in the scbindex array (e.g. unexpected * bus free with the same scb queued for an abort). Don't hold this * against them. */ for (i = 0; i < ahc->scb_data->maxhscbs; i++) { u_int scbid; ahc_outb(ahc, SCBPTR, i); scbid = ahc_inb(ahc, SCB_TAG); scbp = ahc_lookup_scb(ahc, scbid); if ((scbp == NULL && scbid != SCB_LIST_NULL) || (scbp != NULL && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) ahc_add_curscb_to_free_list(ahc); } /* * Go through the pending CCB list and look for * commands for this target that are still active. * These are other tagged commands that were * disconnected when the reset occurred. */ scbp_next = LIST_FIRST(&ahc->pending_scbs); while (scbp_next != NULL) { scbp = scbp_next; scbp_next = LIST_NEXT(scbp, pending_links); if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { cam_status ostat; ostat = ahc_get_transaction_status(scbp); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scbp, status); if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) ahc_freeze_scb(scbp); if ((scbp->flags & SCB_ACTIVE) == 0) printk("Inactive SCB on pending list\n"); ahc_done(ahc, scbp); found++; } } ahc_outb(ahc, SCBPTR, active_scb); ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); ahc_release_untagged_queues(ahc); return found; } static void ahc_reset_current_bus(struct ahc_softc *ahc) { uint8_t scsiseq; ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); scsiseq = ahc_inb(ahc, SCSISEQ); ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); ahc_flush_device_writes(ahc); ahc_delay(AHC_BUSRESET_DELAY); /* Turn off the bus reset */ ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); ahc_clear_intstat(ahc); /* Re-enable reset interrupts */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); } int ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) { struct ahc_devinfo devinfo; u_int initiator, target, max_scsiid; u_int sblkctl; u_int scsiseq; u_int simode1; int found; int restart_needed; char cur_channel; ahc->pending_device = NULL; ahc_compile_devinfo(&devinfo, CAM_TARGET_WILDCARD, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahc_pause(ahc); /* Make sure the sequencer is in a safe location. */ ahc_clear_critical_section(ahc); /* * Run our command complete fifos to ensure that we perform * completion processing on any commands that 'completed' * before the reset occurred. */ ahc_run_qoutfifo(ahc); #ifdef AHC_TARGET_MODE /* * XXX - In Twin mode, the tqinfifo may have commands * for an unaffected channel in it. However, if * we have run out of ATIO resources to drain that * queue, we may not get them all out here. Further, * the blocked transactions for the reset channel * should just be killed off, irrespecitve of whether * we are blocked on ATIO resources. Write a routine * to compact the tqinfifo appropriately. */ if ((ahc->flags & AHC_TARGETROLE) != 0) { ahc_run_tqinfifo(ahc, /*paused*/TRUE); } #endif /* * Reset the bus if we are initiating this reset */ sblkctl = ahc_inb(ahc, SBLKCTL); cur_channel = 'A'; if ((ahc->features & AHC_TWIN) != 0 && ((sblkctl & SELBUSB) != 0)) cur_channel = 'B'; scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); if (cur_channel != channel) { /* Case 1: Command for another bus is active * Stealthily reset the other bus without * upsetting the current bus. */ ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); #ifdef AHC_TARGET_MODE /* * Bus resets clear ENSELI, so we cannot * defer re-enabling bus reset interrupts * if we are in target mode. */ if ((ahc->flags & AHC_TARGETROLE) != 0) simode1 |= ENSCSIRST; #endif ahc_outb(ahc, SIMODE1, simode1); if (initiate_reset) ahc_reset_current_bus(ahc); ahc_clear_intstat(ahc); ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); ahc_outb(ahc, SBLKCTL, sblkctl); restart_needed = FALSE; } else { /* Case 2: A command from this bus is active or we're idle */ simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); #ifdef AHC_TARGET_MODE /* * Bus resets clear ENSELI, so we cannot * defer re-enabling bus reset interrupts * if we are in target mode. */ if ((ahc->flags & AHC_TARGETROLE) != 0) simode1 |= ENSCSIRST; #endif ahc_outb(ahc, SIMODE1, simode1); if (initiate_reset) ahc_reset_current_bus(ahc); ahc_clear_intstat(ahc); ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); restart_needed = TRUE; } /* * Clean up all the state information for the * pending transactions on this bus. */ found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; #ifdef AHC_TARGET_MODE /* * Send an immediate notify ccb to all target more peripheral * drivers affected by this action. */ for (target = 0; target <= max_scsiid; target++) { struct ahc_tmode_tstate* tstate; u_int lun; tstate = ahc->enabled_targets[target]; if (tstate == NULL) continue; for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, EVENT_TYPE_BUS_RESET, /*arg*/0); ahc_send_lstate_events(ahc, lstate); } } #endif /* Notify the XPT that a bus reset occurred */ ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AC_BUS_RESET); /* * Revert to async/narrow transfers until we renegotiate. */ for (target = 0; target <= max_scsiid; target++) { if (ahc->enabled_targets[target] == NULL) continue; for (initiator = 0; initiator <= max_scsiid; initiator++) { struct ahc_devinfo devinfo; ahc_compile_devinfo(&devinfo, target, initiator, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR, /*paused*/TRUE); ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR, /*paused*/TRUE); } } if (restart_needed) ahc_restart(ahc); else ahc_unpause(ahc); return found; } /***************************** Residual Processing ****************************/ /* * Calculate the residual for a just completed SCB. */ static void ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *hscb; struct status_pkt *spkt; uint32_t sgptr; uint32_t resid_sgptr; uint32_t resid; /* * 5 cases. * 1) No residual. * SG_RESID_VALID clear in sgptr. * 2) Transferless command * 3) Never performed any transfers. * sgptr has SG_FULL_RESID set. * 4) No residual but target did not * save data pointers after the * last transfer, so sgptr was * never updated. * 5) We have a partial residual. * Use residual_sgptr to determine * where we are. */ hscb = scb->hscb; sgptr = ahc_le32toh(hscb->sgptr); if ((sgptr & SG_RESID_VALID) == 0) /* Case 1 */ return; sgptr &= ~SG_RESID_VALID; if ((sgptr & SG_LIST_NULL) != 0) /* Case 2 */ return; spkt = &hscb->shared_data.status; resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); if ((sgptr & SG_FULL_RESID) != 0) { /* Case 3 */ resid = ahc_get_transfer_length(scb); } else if ((resid_sgptr & SG_LIST_NULL) != 0) { /* Case 4 */ return; } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); } else { struct ahc_dma_seg *sg; /* * Remainder of the SG where the transfer * stopped. */ resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); /* The residual sg_ptr always points to the next sg */ sg--; /* * Add up the contents of all residual * SG segments that are after the SG where * the transfer stopped. */ while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { sg++; resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; } } if ((scb->flags & SCB_SENSE) == 0) ahc_set_residual(scb, resid); else ahc_set_sense_residual(scb, resid); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MISC) != 0) { ahc_print_path(ahc, scb); printk("Handled %sResidual of %d bytes\n", (scb->flags & SCB_SENSE) ? "Sense " : "", resid); } #endif } /******************************* Target Mode **********************************/ #ifdef AHC_TARGET_MODE /* * Add a target mode event to this lun's queue */ static void ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg) { struct ahc_tmode_event *event; int pending; xpt_freeze_devq(lstate->path, /*count*/1); if (lstate->event_w_idx >= lstate->event_r_idx) pending = lstate->event_w_idx - lstate->event_r_idx; else pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 - (lstate->event_r_idx - lstate->event_w_idx); if (event_type == EVENT_TYPE_BUS_RESET || event_type == TARGET_RESET) { /* * Any earlier events are irrelevant, so reset our buffer. * This has the effect of allowing us to deal with reset * floods (an external device holding down the reset line) * without losing the event that is really interesting. */ lstate->event_r_idx = 0; lstate->event_w_idx = 0; xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); } if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { xpt_print_path(lstate->path); printk("immediate event %x:%x lost\n", lstate->event_buffer[lstate->event_r_idx].event_type, lstate->event_buffer[lstate->event_r_idx].event_arg); lstate->event_r_idx++; if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); } event = &lstate->event_buffer[lstate->event_w_idx]; event->initiator_id = initiator_id; event->event_type = event_type; event->event_arg = event_arg; lstate->event_w_idx++; if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_w_idx = 0; } /* * Send any target mode events queued up waiting * for immediate notify resources. */ void ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) { struct ccb_hdr *ccbh; struct ccb_immed_notify *inot; while (lstate->event_r_idx != lstate->event_w_idx && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { struct ahc_tmode_event *event; event = &lstate->event_buffer[lstate->event_r_idx]; SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); inot = (struct ccb_immed_notify *)ccbh; switch (event->event_type) { case EVENT_TYPE_BUS_RESET: ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; break; default: ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; inot->message_args[0] = event->event_type; inot->message_args[1] = event->event_arg; break; } inot->initiator_id = event->initiator_id; inot->sense_len = 0; xpt_done((union ccb *)inot); lstate->event_r_idx++; if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; } } #endif /******************** Sequencer Program Patching/Download *********************/ #ifdef AHC_DUMP_SEQ void ahc_dumpseq(struct ahc_softc* ahc) { int i; ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); for (i = 0; i < ahc->instruction_ram_size; i++) { uint8_t ins_bytes[4]; ahc_insb(ahc, SEQRAM, ins_bytes, 4); printk("0x%08x\n", ins_bytes[0] << 24 | ins_bytes[1] << 16 | ins_bytes[2] << 8 | ins_bytes[3]); } } #endif static int ahc_loadseq(struct ahc_softc *ahc) { struct cs cs_table[NUM_CRITICAL_SECTIONS]; u_int begin_set[NUM_CRITICAL_SECTIONS]; u_int end_set[NUM_CRITICAL_SECTIONS]; const struct patch *cur_patch; u_int cs_count; u_int cur_cs; u_int i; u_int skip_addr; u_int sg_prefetch_cnt; int downloaded; uint8_t download_consts[7]; /* * Start out with 0 critical sections * that apply to this firmware load. */ cs_count = 0; cur_cs = 0; memset(begin_set, 0, sizeof(begin_set)); memset(end_set, 0, sizeof(end_set)); /* Setup downloadable constant table */ download_consts[QOUTFIFO_OFFSET] = 0; if (ahc->targetcmds != NULL) download_consts[QOUTFIFO_OFFSET] += 32; download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); sg_prefetch_cnt = ahc->pci_cachesize; if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); cur_patch = patches; downloaded = 0; skip_addr = 0; ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); for (i = 0; i < sizeof(seqprog)/4; i++) { if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { /* * Don't download this instruction as it * is in a patch that was removed. */ continue; } if (downloaded == ahc->instruction_ram_size) { /* * We're about to exceed the instruction * storage capacity for this chip. Fail * the load. */ printk("\n%s: Program too large for instruction memory " "size of %d!\n", ahc_name(ahc), ahc->instruction_ram_size); return (ENOMEM); } /* * Move through the CS table until we find a CS * that might apply to this instruction. */ for (; cur_cs < NUM_CRITICAL_SECTIONS; cur_cs++) { if (critical_sections[cur_cs].end <= i) { if (begin_set[cs_count] == TRUE && end_set[cs_count] == FALSE) { cs_table[cs_count].end = downloaded; end_set[cs_count] = TRUE; cs_count++; } continue; } if (critical_sections[cur_cs].begin <= i && begin_set[cs_count] == FALSE) { cs_table[cs_count].begin = downloaded; begin_set[cs_count] = TRUE; } break; } ahc_download_instr(ahc, i, download_consts); downloaded++; } ahc->num_critical_sections = cs_count; if (cs_count != 0) { cs_count *= sizeof(struct cs); ahc->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC); if (ahc->critical_sections == NULL) panic("ahc_loadseq: Could not malloc"); } ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); if (bootverbose) { printk(" %d instructions downloaded\n", downloaded); printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); } return (0); } static int ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch, u_int start_instr, u_int *skip_addr) { const struct patch *cur_patch; const struct patch *last_patch; u_int num_patches; num_patches = ARRAY_SIZE(patches); last_patch = &patches[num_patches]; cur_patch = *start_patch; while (cur_patch < last_patch && start_instr == cur_patch->begin) { if (cur_patch->patch_func(ahc) == 0) { /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; cur_patch += cur_patch->skip_patch; } else { /* Accepted this patch. Advance to the next * one and wait for our intruction pointer to * hit this point. */ cur_patch++; } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } static void ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) { union ins_formats instr; struct ins_format1 *fmt1_ins; struct ins_format3 *fmt3_ins; u_int opcode; /* * The firmware is always compiled into a little endian format. */ instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); fmt1_ins = &instr.format1; fmt3_ins = NULL; /* Pull the opcode */ opcode = instr.format1.opcode; switch (opcode) { case AIC_OP_JMP: case AIC_OP_JC: case AIC_OP_JNC: case AIC_OP_CALL: case AIC_OP_JNE: case AIC_OP_JNZ: case AIC_OP_JE: case AIC_OP_JZ: { const struct patch *cur_patch; int address_offset; u_int address; u_int skip_addr; u_int i; fmt3_ins = &instr.format3; address_offset = 0; address = fmt3_ins->address; cur_patch = patches; skip_addr = 0; for (i = 0; i < address;) { ahc_check_patch(ahc, &cur_patch, i, &skip_addr); if (skip_addr > i) { int end_addr; end_addr = min(address, skip_addr); address_offset += end_addr - i; i = skip_addr; } else { i++; } } address -= address_offset; fmt3_ins->address = address; } fallthrough; case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: case AIC_OP_ADD: case AIC_OP_ADC: case AIC_OP_BMOV: if (fmt1_ins->parity != 0) { fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; if ((ahc->features & AHC_CMD_CHAN) == 0 && opcode == AIC_OP_BMOV) { /* * Block move was added at the same time * as the command channel. Verify that * this is only a move of a single element * and convert the BMOV to a MOV * (AND with an immediate of FF). */ if (fmt1_ins->immediate != 1) panic("%s: BMOV not supported\n", ahc_name(ahc)); fmt1_ins->opcode = AIC_OP_AND; fmt1_ins->immediate = 0xff; } fallthrough; case AIC_OP_ROL: if ((ahc->features & AHC_ULTRA2) != 0) { int i, count; /* Calculate odd parity for the instruction */ for (i = 0, count = 0; i < 31; i++) { uint32_t mask; mask = 0x01 << i; if ((instr.integer & mask) != 0) count++; } if ((count & 0x01) == 0) instr.format1.parity = 1; } else { /* Compress the instruction for older sequencers */ if (fmt3_ins != NULL) { instr.integer = fmt3_ins->immediate | (fmt3_ins->source << 8) | (fmt3_ins->address << 16) | (fmt3_ins->opcode << 25); } else { instr.integer = fmt1_ins->immediate | (fmt1_ins->source << 8) | (fmt1_ins->destination << 16) | (fmt1_ins->ret << 24) | (fmt1_ins->opcode << 25); } } /* The sequencer is a little endian cpu */ instr.integer = ahc_htole32(instr.integer); ahc_outsb(ahc, SEQRAM, instr.bytes, 4); break; default: panic("Unknown opcode encountered in seq program"); break; } } int ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point) { int printed; u_int printed_mask; if (cur_column != NULL && *cur_column >= wrap_point) { printk("\n"); *cur_column = 0; } printed = printk("%s[0x%x]", name, value); if (table == NULL) { printed += printk(" "); *cur_column += printed; return (printed); } printed_mask = 0; while (printed_mask != 0xFF) { int entry; for (entry = 0; entry < num_entries; entry++) { if (((value & table[entry].mask) != table[entry].value) || ((printed_mask & table[entry].mask) == table[entry].mask)) continue; printed += printk("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; break; } if (entry >= num_entries) break; } if (printed_mask != 0) printed += printk(") "); else printed += printk(" "); if (cur_column != NULL) *cur_column += printed; return (printed); } void ahc_dump_card_state(struct ahc_softc *ahc) { struct scb *scb; struct scb_tailq *untagged_q; u_int cur_col; int paused; int target; int maxtarget; int i; uint8_t last_phase; uint8_t qinpos; uint8_t qintail; uint8_t qoutpos; uint8_t scb_index; uint8_t saved_scbptr; if (ahc_is_paused(ahc)) { paused = 1; } else { paused = 0; ahc_pause(ahc); } saved_scbptr = ahc_inb(ahc, SCBPTR); last_phase = ahc_inb(ahc, LASTPHASE); printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" "%s: Dumping Card State %s, at SEQADDR 0x%x\n", ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); if (paused) printk("Card was paused\n"); printk("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), ahc_inb(ahc, ARG_2)); printk("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), ahc_inb(ahc, SCBPTR)); cur_col = 0; if ((ahc->features & AHC_DT) != 0) ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); if (cur_col != 0) printk("\n"); printk("STACK:"); for (i = 0; i < STACK_SIZE; i++) printk(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); printk("\nSCB count = %d\n", ahc->scb_data->numscbs); printk("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); printk("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); /* QINFIFO */ printk("QINFIFO entries: "); if ((ahc->features & AHC_QUEUE_REGS) != 0) { qinpos = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinpos); } else qinpos = ahc_inb(ahc, QINPOS); qintail = ahc->qinfifonext; while (qinpos != qintail) { printk("%d ", ahc->qinfifo[qinpos]); qinpos++; } printk("\n"); printk("Waiting Queue entries: "); scb_index = ahc_inb(ahc, WAITING_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); scb_index = ahc_inb(ahc, SCB_NEXT); } printk("\n"); printk("Disconnected Queue entries: "); scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); scb_index = ahc_inb(ahc, SCB_NEXT); } printk("\n"); ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); printk("QOUTFIFO entries: "); qoutpos = ahc->qoutfifonext; i = 0; while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { printk("%d ", ahc->qoutfifo[qoutpos]); qoutpos++; } printk("\n"); printk("Sequencer Free SCB List: "); scb_index = ahc_inb(ahc, FREE_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printk("%d ", scb_index); scb_index = ahc_inb(ahc, SCB_NEXT); } printk("\n"); printk("Sequencer SCB Info: "); for (i = 0; i < ahc->scb_data->maxhscbs; i++) { ahc_outb(ahc, SCBPTR, i); cur_col = printk("\n%3d ", i); ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); } printk("\n"); printk("Pending list: "); i = 0; LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { if (i++ > 256) break; cur_col = printk("\n%3d ", scb->hscb->tag); ahc_scb_control_print(scb->hscb->control, &cur_col, 60); ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); if ((ahc->flags & AHC_PAGESCBS) == 0) { ahc_outb(ahc, SCBPTR, scb->hscb->tag); printk("("); ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); printk(")"); } } printk("\n"); printk("Kernel Free SCB list: "); i = 0; SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { if (i++ > 256) break; printk("%d ", scb->hscb->tag); } printk("\n"); maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; for (target = 0; target <= maxtarget; target++) { untagged_q = &ahc->untagged_queues[target]; if (TAILQ_FIRST(untagged_q) == NULL) continue; printk("Untagged Q(%d): ", target); i = 0; TAILQ_FOREACH(scb, untagged_q, links.tqe) { if (i++ > 256) break; printk("%d ", scb->hscb->tag); } printk("\n"); } printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) ahc_unpause(ahc); } /************************* Target Mode ****************************************/ #ifdef AHC_TARGET_MODE cam_status ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, struct ahc_tmode_tstate **tstate, struct ahc_tmode_lstate **lstate, int notfound_failure) { if ((ahc->features & AHC_TARGETMODE) == 0) return (CAM_REQ_INVALID); /* * Handle the 'black hole' device that sucks up * requests to unattached luns on enabled targets. */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *tstate = NULL; *lstate = ahc->black_hole; } else { u_int max_id; max_id = (ahc->features & AHC_WIDE) ? 16 : 8; if (ccb->ccb_h.target_id >= max_id) return (CAM_TID_INVALID); if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) return (CAM_LUN_INVALID); *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; *lstate = NULL; if (*tstate != NULL) *lstate = (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; } if (notfound_failure != 0 && *lstate == NULL) return (CAM_PATH_INVALID); return (CAM_REQ_CMP); } void ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_en_lun *cel; cam_status status; u_long s; u_int target; u_int lun; u_int target_mask; u_int our_id; int error; char channel; status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, /*notfound_failure*/FALSE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if (cam_sim_bus(sim) == 0) our_id = ahc->our_id; else our_id = ahc->our_id_b; if (ccb->ccb_h.target_id != our_id) { /* * our_id represents our initiator ID, or * the ID of the first target to have an * enabled lun in target mode. There are * two cases that may preclude enabling a * target id other than our_id. * * o our_id is for an active initiator role. * Since the hardware does not support * reselections to the initiator role at * anything other than our_id, and our_id * is used by the hardware to indicate the * ID to use for both select-out and * reselect-out operations, the only target * ID we can support in this mode is our_id. * * o The MULTARGID feature is not available and * a previous target mode ID has been enabled. */ if ((ahc->features & AHC_MULTIROLE) != 0) { if ((ahc->features & AHC_MULTI_TID) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) { /* * Only allow additional targets if * the initiator role is disabled. * The hardware cannot handle a re-select-in * on the initiator id during a re-select-out * on a different target id. */ status = CAM_TID_INVALID; } else if ((ahc->flags & AHC_INITIATORROLE) != 0 || ahc->enabled_luns > 0) { /* * Only allow our target id to change * if the initiator role is not configured * and there are no enabled luns which * are attached to the currently registered * scsi id. */ status = CAM_TID_INVALID; } } else if ((ahc->features & AHC_MULTI_TID) == 0 && ahc->enabled_luns > 0) { status = CAM_TID_INVALID; } } if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } /* * We now have an id that is valid. * If we aren't in target mode, switch modes. */ if ((ahc->flags & AHC_TARGETROLE) == 0 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { u_long s; ahc_flag saved_flags; printk("Configuring Target Mode\n"); ahc_lock(ahc, &s); if (LIST_FIRST(&ahc->pending_scbs) != NULL) { ccb->ccb_h.status = CAM_BUSY; ahc_unlock(ahc, &s); return; } saved_flags = ahc->flags; ahc->flags |= AHC_TARGETROLE; if ((ahc->features & AHC_MULTIROLE) == 0) ahc->flags &= ~AHC_INITIATORROLE; ahc_pause(ahc); error = ahc_loadseq(ahc); if (error != 0) { /* * Restore original configuration and notify * the caller that we cannot support target mode. * Since the adapter started out in this * configuration, the firmware load will succeed, * so there is no point in checking ahc_loadseq's * return value. */ ahc->flags = saved_flags; (void)ahc_loadseq(ahc); ahc_restart(ahc); ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; return; } ahc_restart(ahc); ahc_unlock(ahc, &s); } cel = &ccb->cel; target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; channel = SIM_CHANNEL(ahc, sim); target_mask = 0x01 << target; if (channel == 'B') target_mask <<= 8; if (cel->enable != 0) { u_int scsiseq; /* Are we already enabled?? */ if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printk("Lun already enabled\n"); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { /* * Don't (yet?) support vendor * specific commands. */ ccb->ccb_h.status = CAM_REQ_INVALID; printk("Non-zero Group Codes\n"); return; } /* * Seems to be okay. * Setup our data structures. */ if (target != CAM_TARGET_WILDCARD && tstate == NULL) { tstate = ahc_alloc_tstate(ahc, target, channel); if (tstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate tstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } } lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { kfree(lstate); xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); ahc_lock(ahc, &s); ahc_pause(ahc); if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = lstate; ahc->enabled_luns++; if ((ahc->features & AHC_MULTI_TID) != 0) { u_int targid_mask; targid_mask = ahc_inb(ahc, TARGID) | (ahc_inb(ahc, TARGID + 1) << 8); targid_mask |= target_mask; ahc_outb(ahc, TARGID, targid_mask); ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); ahc_update_scsiid(ahc, targid_mask); } else { u_int our_id; char channel; channel = SIM_CHANNEL(ahc, sim); our_id = SIM_SCSI_ID(ahc, sim); /* * This can only happen if selections * are not enabled */ if (target != our_id) { u_int sblkctl; char cur_channel; int swap; sblkctl = ahc_inb(ahc, SBLKCTL); cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A'; if ((ahc->features & AHC_TWIN) == 0) cur_channel = 'A'; swap = cur_channel != channel; if (channel == 'A') ahc->our_id = target; else ahc->our_id_b = target; if (swap) ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); ahc_outb(ahc, SCSIID, target); if (swap) ahc_outb(ahc, SBLKCTL, sblkctl); } } } else ahc->black_hole = lstate; /* Allow select-in operations */ if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); scsiseq |= ENSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); scsiseq = ahc_inb(ahc, SCSISEQ); scsiseq |= ENSELI; ahc_outb(ahc, SCSISEQ, scsiseq); } ahc_unpause(ahc); ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printk("Lun now enabled for target mode\n"); } else { struct scb *scb; int i, empty; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } ahc_lock(ahc, &s); ccb->ccb_h.status = CAM_REQ_CMP; LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { struct ccb_hdr *ccbh; ccbh = &scb->io_ctx->ccb_h; if (ccbh->func_code == XPT_CONT_TARGET_IO && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ printk("CTIO pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; ahc_unlock(ahc, &s); return; } } if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printk("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printk("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { ahc_unlock(ahc, &s); return; } xpt_print_path(ccb->ccb_h.path); printk("Target mode disabled\n"); xpt_free_path(lstate->path); kfree(lstate); ahc_pause(ahc); /* Can we clean up the target too? */ if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = NULL; ahc->enabled_luns--; for (empty = 1, i = 0; i < 8; i++) if (tstate->enabled_luns[i] != NULL) { empty = 0; break; } if (empty) { ahc_free_tstate(ahc, target, channel, /*force*/FALSE); if (ahc->features & AHC_MULTI_TID) { u_int targid_mask; targid_mask = ahc_inb(ahc, TARGID) | (ahc_inb(ahc, TARGID + 1) << 8); targid_mask &= ~target_mask; ahc_outb(ahc, TARGID, targid_mask); ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); ahc_update_scsiid(ahc, targid_mask); } } } else { ahc->black_hole = NULL; /* * We can't allow selections without * our black hole device. */ empty = TRUE; } if (ahc->enabled_luns == 0) { /* Disallow select-in */ u_int scsiseq; scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); scsiseq &= ~ENSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); scsiseq = ahc_inb(ahc, SCSISEQ); scsiseq &= ~ENSELI; ahc_outb(ahc, SCSISEQ, scsiseq); if ((ahc->features & AHC_MULTIROLE) == 0) { printk("Configuring Initiator Mode\n"); ahc->flags &= ~AHC_TARGETROLE; ahc->flags |= AHC_INITIATORROLE; /* * Returning to a configuration that * fit previously will always succeed. */ (void)ahc_loadseq(ahc); ahc_restart(ahc); /* * Unpaused. The extra unpause * that follows is harmless. */ } } ahc_unpause(ahc); ahc_unlock(ahc, &s); } } static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) { u_int scsiid_mask; u_int scsiid; if ((ahc->features & AHC_MULTI_TID) == 0) panic("ahc_update_scsiid called on non-multitid unit\n"); /* * Since we will rely on the TARGID mask * for selection enables, ensure that OID * in SCSIID is not set to some other ID * that we don't want to allow selections on. */ if ((ahc->features & AHC_ULTRA2) != 0) scsiid = ahc_inb(ahc, SCSIID_ULTRA2); else scsiid = ahc_inb(ahc, SCSIID); scsiid_mask = 0x1 << (scsiid & OID); if ((targid_mask & scsiid_mask) == 0) { u_int our_id; /* ffs counts from 1 */ our_id = ffs(targid_mask); if (our_id == 0) our_id = ahc->our_id; else our_id--; scsiid &= TID; scsiid |= our_id; } if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIID_ULTRA2, scsiid); else ahc_outb(ahc, SCSIID, scsiid); } static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) { struct target_cmd *cmd; /* * If the card supports auto-access pause, * we can access the card directly regardless * of whether it is paused or not. */ if ((ahc->features & AHC_AUTOPAUSE) != 0) paused = TRUE; ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { /* * Only advance through the queue if we * have the resources to process the command. */ if (ahc_handle_target_cmd(ahc, cmd) != 0) break; cmd->cmd_valid = 0; ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, ahc->tqinfifonext), sizeof(struct target_cmd), BUS_DMASYNC_PREREAD); ahc->tqinfifonext++; /* * Lazily update our position in the target mode incoming * command queue as seen by the sequencer. */ if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { if ((ahc->features & AHC_HS_MAILBOX) != 0) { u_int hs_mailbox; hs_mailbox = ahc_inb(ahc, HS_MAILBOX); hs_mailbox &= ~HOST_TQINPOS; hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; ahc_outb(ahc, HS_MAILBOX, hs_mailbox); } else { if (!paused) ahc_pause(ahc); ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext & HOST_TQINPOS); if (!paused) ahc_unpause(ahc); } } } } static int ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_accept_tio *atio; uint8_t *byte; int initiator; int target; int lun; initiator = SCSIID_TARGET(ahc, cmd->scsiid); target = SCSIID_OUR_ID(cmd->scsiid); lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); byte = cmd->bytes; tstate = ahc->enabled_targets[target]; lstate = NULL; if (tstate != NULL) lstate = tstate->enabled_luns[lun]; /* * Commands for disabled luns go to the black hole driver. */ if (lstate == NULL) lstate = ahc->black_hole; atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); if (atio == NULL) { ahc->flags |= AHC_TQINFIFO_BLOCKED; /* * Wait for more ATIOs from the peripheral driver for this lun. */ if (bootverbose) printk("%s: ATIOs exhausted\n", ahc_name(ahc)); return (1); } else ahc->flags &= ~AHC_TQINFIFO_BLOCKED; #if 0 printk("Incoming command from %d for %d:%d%s\n", initiator, target, lun, lstate == ahc->black_hole ? "(Black Holed)" : ""); #endif SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); if (lstate == ahc->black_hole) { /* Fill in the wildcards */ atio->ccb_h.target_id = target; atio->ccb_h.target_lun = lun; } /* * Package it up and send it off to * whomever has this lun enabled. */ atio->sense_len = 0; atio->init_id = initiator; if (byte[0] != 0xFF) { /* Tag was included */ atio->tag_action = *byte++; atio->tag_id = *byte++; atio->ccb_h.flags = CAM_TAG_ACTION_VALID; } else { atio->ccb_h.flags = 0; } byte++; /* Okay. Now determine the cdb size based on the command code */ switch (*byte >> CMD_GROUP_CODE_SHIFT) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printk("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { /* * We weren't allowed to disconnect. * We're hanging on the bus until a * continue target I/O comes in response * to this accept tio. */ #if 0 printk("Received Immediate Command %d:%d:%d - %p\n", initiator, target, lun, ahc->pending_device); #endif ahc->pending_device = lstate; ahc_freeze_ccb((union ccb *)atio); atio->ccb_h.flags |= CAM_DIS_DISCONNECT; } xpt_done((union ccb*)atio); return (0); } #endif
linux-master
drivers/scsi/aic7xxx/aic7xxx_core.c
/* * Linux driver attachment glue for aic7770 based controllers. * * Copyright (c) 2000-2003 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7770_osm.c#14 $ */ #include "aic7xxx_osm.h" #include <linux/device.h> #include <linux/eisa.h> int aic7770_map_registers(struct ahc_softc *ahc, u_int port) { /* * Lock out other contenders for our i/o space. */ if (!request_region(port, AHC_EISA_IOSIZE, "aic7xxx")) return (ENOMEM); ahc->tag = BUS_SPACE_PIO; ahc->bsh.ioport = port; return (0); } int aic7770_map_int(struct ahc_softc *ahc, u_int irq) { int error; int shared; shared = 0; if ((ahc->flags & AHC_EDGE_INTERRUPT) == 0) shared = IRQF_SHARED; error = request_irq(irq, ahc_linux_isr, shared, "aic7xxx", ahc); if (error == 0) ahc->platform_data->irq = irq; return (-error); } static int aic7770_probe(struct device *dev) { struct eisa_device *edev = to_eisa_device(dev); u_int eisaBase = edev->base_addr+AHC_EISA_SLOT_OFFSET; struct ahc_softc *ahc; char buf[80]; char *name; int error; sprintf(buf, "ahc_eisa:%d", eisaBase >> 12); name = kstrdup(buf, GFP_ATOMIC); if (name == NULL) return (ENOMEM); ahc = ahc_alloc(&aic7xxx_driver_template, name); if (ahc == NULL) return (ENOMEM); ahc->dev = dev; error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data, eisaBase); if (error != 0) { ahc->bsh.ioport = 0; ahc_free(ahc); return (error); } dev_set_drvdata(dev, ahc); error = ahc_linux_register_host(ahc, &aic7xxx_driver_template); return (error); } static int aic7770_remove(struct device *dev) { struct ahc_softc *ahc = dev_get_drvdata(dev); u_long s; if (ahc->platform_data && ahc->platform_data->host) scsi_remove_host(ahc->platform_data->host); ahc_lock(ahc, &s); ahc_intr_enable(ahc, FALSE); ahc_unlock(ahc, &s); ahc_free(ahc); return 0; } static struct eisa_device_id aic7770_ids[] = { { "ADP7771", 0 }, /* AHA 274x */ { "ADP7756", 1 }, /* AHA 284x BIOS enabled */ { "ADP7757", 2 }, /* AHA 284x BIOS disabled */ { "ADP7782", 3 }, /* AHA 274x Olivetti OEM */ { "ADP7783", 4 }, /* AHA 274x Olivetti OEM (Differential) */ { "ADP7770", 5 }, /* AIC7770 generic */ { "" } }; MODULE_DEVICE_TABLE(eisa, aic7770_ids); static struct eisa_driver aic7770_driver = { .id_table = aic7770_ids, .driver = { .name = "aic7xxx", .probe = aic7770_probe, .remove = aic7770_remove, } }; int ahc_linux_eisa_init(void) { return eisa_driver_register(&aic7770_driver); } void ahc_linux_eisa_exit(void) { eisa_driver_unregister(&aic7770_driver); }
linux-master
drivers/scsi/aic7xxx/aic7770_osm.c
/* * Product specific probe and attach routines for: * 3940, 2940, aic7895, aic7890, aic7880, * aic7870, aic7860 and aic7850 SCSI controllers * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $ */ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aic7xxx_93cx6.h" #include "aic7xxx_pci.h" static inline uint64_t ahc_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor) { uint64_t id; id = subvendor | (subdevice << 16) | ((uint64_t)vendor << 32) | ((uint64_t)device << 48); return (id); } #define AHC_PCI_IOADDR PCIR_MAPS /* I/O Address */ #define AHC_PCI_MEMADDR (PCIR_MAPS + 4) /* Mem I/O Address */ #define DEVID_9005_TYPE(id) ((id) & 0xF) #define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */ #define DEVID_9005_TYPE_AAA 0x3 /* RAID Card */ #define DEVID_9005_TYPE_SISL 0x5 /* Container ROMB */ #define DEVID_9005_TYPE_MB 0xF /* On Motherboard */ #define DEVID_9005_MAXRATE(id) (((id) & 0x30) >> 4) #define DEVID_9005_MAXRATE_U160 0x0 #define DEVID_9005_MAXRATE_ULTRA2 0x1 #define DEVID_9005_MAXRATE_ULTRA 0x2 #define DEVID_9005_MAXRATE_FAST 0x3 #define DEVID_9005_MFUNC(id) (((id) & 0x40) >> 6) #define DEVID_9005_CLASS(id) (((id) & 0xFF00) >> 8) #define DEVID_9005_CLASS_SPI 0x0 /* Parallel SCSI */ #define SUBID_9005_TYPE(id) ((id) & 0xF) #define SUBID_9005_TYPE_MB 0xF /* On Motherboard */ #define SUBID_9005_TYPE_CARD 0x0 /* Standard Card */ #define SUBID_9005_TYPE_LCCARD 0x1 /* Low Cost Card */ #define SUBID_9005_TYPE_RAID 0x3 /* Combined with Raid */ #define SUBID_9005_TYPE_KNOWN(id) \ ((((id) & 0xF) == SUBID_9005_TYPE_MB) \ || (((id) & 0xF) == SUBID_9005_TYPE_CARD) \ || (((id) & 0xF) == SUBID_9005_TYPE_LCCARD) \ || (((id) & 0xF) == SUBID_9005_TYPE_RAID)) #define SUBID_9005_MAXRATE(id) (((id) & 0x30) >> 4) #define SUBID_9005_MAXRATE_ULTRA2 0x0 #define SUBID_9005_MAXRATE_ULTRA 0x1 #define SUBID_9005_MAXRATE_U160 0x2 #define SUBID_9005_MAXRATE_RESERVED 0x3 #define SUBID_9005_SEEPTYPE(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? ((id) & 0xC0) >> 6 \ : ((id) & 0x300) >> 8) #define SUBID_9005_SEEPTYPE_NONE 0x0 #define SUBID_9005_SEEPTYPE_1K 0x1 #define SUBID_9005_SEEPTYPE_2K_4K 0x2 #define SUBID_9005_SEEPTYPE_RESERVED 0x3 #define SUBID_9005_AUTOTERM(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? (((id) & 0x400) >> 10) == 0 \ : (((id) & 0x40) >> 6) == 0) #define SUBID_9005_NUMCHAN(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? ((id) & 0x300) >> 8 \ : ((id) & 0xC00) >> 10) #define SUBID_9005_LEGACYCONN(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? 0 \ : ((id) & 0x80) >> 7) #define SUBID_9005_MFUNCENB(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? ((id) & 0x800) >> 11 \ : ((id) & 0x1000) >> 12) /* * Informational only. Should use chip register to be * certain, but may be use in identification strings. */ #define SUBID_9005_CARD_SCSIWIDTH_MASK 0x2000 #define SUBID_9005_CARD_PCIWIDTH_MASK 0x4000 #define SUBID_9005_CARD_SEDIFF_MASK 0x8000 static ahc_device_setup_t ahc_aic785X_setup; static ahc_device_setup_t ahc_aic7860_setup; static ahc_device_setup_t ahc_apa1480_setup; static ahc_device_setup_t ahc_aic7870_setup; static ahc_device_setup_t ahc_aic7870h_setup; static ahc_device_setup_t ahc_aha394X_setup; static ahc_device_setup_t ahc_aha394Xh_setup; static ahc_device_setup_t ahc_aha494X_setup; static ahc_device_setup_t ahc_aha494Xh_setup; static ahc_device_setup_t ahc_aha398X_setup; static ahc_device_setup_t ahc_aic7880_setup; static ahc_device_setup_t ahc_aic7880h_setup; static ahc_device_setup_t ahc_aha2940Pro_setup; static ahc_device_setup_t ahc_aha394XU_setup; static ahc_device_setup_t ahc_aha394XUh_setup; static ahc_device_setup_t ahc_aha398XU_setup; static ahc_device_setup_t ahc_aic7890_setup; static ahc_device_setup_t ahc_aic7892_setup; static ahc_device_setup_t ahc_aic7895_setup; static ahc_device_setup_t ahc_aic7895h_setup; static ahc_device_setup_t ahc_aic7896_setup; static ahc_device_setup_t ahc_aic7899_setup; static ahc_device_setup_t ahc_aha29160C_setup; static ahc_device_setup_t ahc_raid_setup; static ahc_device_setup_t ahc_aha394XX_setup; static ahc_device_setup_t ahc_aha494XX_setup; static ahc_device_setup_t ahc_aha398XX_setup; static const struct ahc_pci_identity ahc_pci_ident_table[] = { /* aic7850 based controllers */ { ID_AHA_2902_04_10_15_20C_30C, ID_ALL_MASK, "Adaptec 2902/04/10/15/20C/30C SCSI adapter", ahc_aic785X_setup }, /* aic7860 based controllers */ { ID_AHA_2930CU, ID_ALL_MASK, "Adaptec 2930CU SCSI adapter", ahc_aic7860_setup }, { ID_AHA_1480A & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 1480A Ultra SCSI adapter", ahc_apa1480_setup }, { ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940A Ultra SCSI adapter", ahc_aic7860_setup }, { ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940A/CN Ultra SCSI adapter", ahc_aic7860_setup }, { ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2930C Ultra SCSI adapter (VAR)", ahc_aic7860_setup }, /* aic7870 based controllers */ { ID_AHA_2940, ID_ALL_MASK, "Adaptec 2940 SCSI adapter", ahc_aic7870_setup }, { ID_AHA_3940, ID_ALL_MASK, "Adaptec 3940 SCSI adapter", ahc_aha394X_setup }, { ID_AHA_398X, ID_ALL_MASK, "Adaptec 398X SCSI RAID adapter", ahc_aha398X_setup }, { ID_AHA_2944, ID_ALL_MASK, "Adaptec 2944 SCSI adapter", ahc_aic7870h_setup }, { ID_AHA_3944, ID_ALL_MASK, "Adaptec 3944 SCSI adapter", ahc_aha394Xh_setup }, { ID_AHA_4944, ID_ALL_MASK, "Adaptec 4944 SCSI adapter", ahc_aha494Xh_setup }, /* aic7880 based controllers */ { ID_AHA_2940U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AHA_3940U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 3940 Ultra SCSI adapter", ahc_aha394XU_setup }, { ID_AHA_2944U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2944 Ultra SCSI adapter", ahc_aic7880h_setup }, { ID_AHA_3944U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 3944 Ultra SCSI adapter", ahc_aha394XUh_setup }, { ID_AHA_398XU & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 398X Ultra SCSI RAID adapter", ahc_aha398XU_setup }, { /* * XXX Don't know the slot numbers * so we can't identify channels */ ID_AHA_4944U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 4944 Ultra SCSI adapter", ahc_aic7880h_setup }, { ID_AHA_2930U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2930 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940 Pro Ultra SCSI adapter", ahc_aha2940Pro_setup }, { ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940/CN Ultra SCSI adapter", ahc_aic7880_setup }, /* Ignore all SISL (AAC on MB) based controllers. */ { ID_9005_SISL_ID, ID_9005_SISL_MASK, NULL, NULL }, /* aic7890 based controllers */ { ID_AHA_2930U2, ID_ALL_MASK, "Adaptec 2930 Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AHA_2940U2B, ID_ALL_MASK, "Adaptec 2940B Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AHA_2940U2_OEM, ID_ALL_MASK, "Adaptec 2940 Ultra2 SCSI adapter (OEM)", ahc_aic7890_setup }, { ID_AHA_2940U2, ID_ALL_MASK, "Adaptec 2940 Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AHA_2950U2B, ID_ALL_MASK, "Adaptec 2950 Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AIC7890_ARO, ID_ALL_MASK, "Adaptec aic7890/91 Ultra2 SCSI adapter (ARO)", ahc_aic7890_setup }, { ID_AAA_131U2, ID_ALL_MASK, "Adaptec AAA-131 Ultra2 RAID adapter", ahc_aic7890_setup }, /* aic7892 based controllers */ { ID_AHA_29160, ID_ALL_MASK, "Adaptec 29160 Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AHA_29160_CPQ, ID_ALL_MASK, "Adaptec (Compaq OEM) 29160 Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AHA_29160N, ID_ALL_MASK, "Adaptec 29160N Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AHA_29160C, ID_ALL_MASK, "Adaptec 29160C Ultra160 SCSI adapter", ahc_aha29160C_setup }, { ID_AHA_29160B, ID_ALL_MASK, "Adaptec 29160B Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AHA_19160B, ID_ALL_MASK, "Adaptec 19160B Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AIC7892_ARO, ID_ALL_MASK, "Adaptec aic7892 Ultra160 SCSI adapter (ARO)", ahc_aic7892_setup }, { ID_AHA_2915_30LP, ID_ALL_MASK, "Adaptec 2915/30LP Ultra160 SCSI adapter", ahc_aic7892_setup }, /* aic7895 based controllers */ { ID_AHA_2940U_DUAL, ID_ALL_MASK, "Adaptec 2940/DUAL Ultra SCSI adapter", ahc_aic7895_setup }, { ID_AHA_3940AU, ID_ALL_MASK, "Adaptec 3940A Ultra SCSI adapter", ahc_aic7895_setup }, { ID_AHA_3944AU, ID_ALL_MASK, "Adaptec 3944A Ultra SCSI adapter", ahc_aic7895h_setup }, { ID_AIC7895_ARO, ID_AIC7895_ARO_MASK, "Adaptec aic7895 Ultra SCSI adapter (ARO)", ahc_aic7895_setup }, /* aic7896/97 based controllers */ { ID_AHA_3950U2B_0, ID_ALL_MASK, "Adaptec 3950B Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AHA_3950U2B_1, ID_ALL_MASK, "Adaptec 3950B Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AHA_3950U2D_0, ID_ALL_MASK, "Adaptec 3950D Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AHA_3950U2D_1, ID_ALL_MASK, "Adaptec 3950D Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AIC7896_ARO, ID_ALL_MASK, "Adaptec aic7896/97 Ultra2 SCSI adapter (ARO)", ahc_aic7896_setup }, /* aic7899 based controllers */ { ID_AHA_3960D, ID_ALL_MASK, "Adaptec 3960D Ultra160 SCSI adapter", ahc_aic7899_setup }, { ID_AHA_3960D_CPQ, ID_ALL_MASK, "Adaptec (Compaq OEM) 3960D Ultra160 SCSI adapter", ahc_aic7899_setup }, { ID_AIC7899_ARO, ID_ALL_MASK, "Adaptec aic7899 Ultra160 SCSI adapter (ARO)", ahc_aic7899_setup }, /* Generic chip probes for devices we don't know 'exactly' */ { ID_AIC7850 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7850 SCSI adapter", ahc_aic785X_setup }, { ID_AIC7855 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7855 SCSI adapter", ahc_aic785X_setup }, { ID_AIC7859 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7859 SCSI adapter", ahc_aic7860_setup }, { ID_AIC7860 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7860 Ultra SCSI adapter", ahc_aic7860_setup }, { ID_AIC7870 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7870 SCSI adapter", ahc_aic7870_setup }, { ID_AIC7880 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7880 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AIC7890 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec aic7890/91 Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AIC7892 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec aic7892 Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AIC7895 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7895 Ultra SCSI adapter", ahc_aic7895_setup }, { ID_AIC7896 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec aic7896/97 Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AIC7899 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec aic7899 Ultra160 SCSI adapter", ahc_aic7899_setup }, { ID_AIC7810 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7810 RAID memory controller", ahc_raid_setup }, { ID_AIC7815 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7815 RAID memory controller", ahc_raid_setup } }; static const u_int ahc_num_pci_devs = ARRAY_SIZE(ahc_pci_ident_table); #define AHC_394X_SLOT_CHANNEL_A 4 #define AHC_394X_SLOT_CHANNEL_B 5 #define AHC_398X_SLOT_CHANNEL_A 4 #define AHC_398X_SLOT_CHANNEL_B 8 #define AHC_398X_SLOT_CHANNEL_C 12 #define AHC_494X_SLOT_CHANNEL_A 4 #define AHC_494X_SLOT_CHANNEL_B 5 #define AHC_494X_SLOT_CHANNEL_C 6 #define AHC_494X_SLOT_CHANNEL_D 7 #define DEVCONFIG 0x40 #define PCIERRGENDIS 0x80000000ul #define SCBSIZE32 0x00010000ul /* aic789X only */ #define REXTVALID 0x00001000ul /* ultra cards only */ #define MPORTMODE 0x00000400ul /* aic7870+ only */ #define RAMPSM 0x00000200ul /* aic7870+ only */ #define VOLSENSE 0x00000100ul #define PCI64BIT 0x00000080ul /* 64Bit PCI bus (Ultra2 Only)*/ #define SCBRAMSEL 0x00000080ul #define MRDCEN 0x00000040ul #define EXTSCBTIME 0x00000020ul /* aic7870 only */ #define EXTSCBPEN 0x00000010ul /* aic7870 only */ #define BERREN 0x00000008ul #define DACEN 0x00000004ul #define STPWLEVEL 0x00000002ul #define DIFACTNEGEN 0x00000001ul /* aic7870 only */ #define CSIZE_LATTIME 0x0c #define CACHESIZE 0x0000003ful /* only 5 bits */ #define LATTIME 0x0000ff00ul /* PCI STATUS definitions */ #define DPE 0x80 #define SSE 0x40 #define RMA 0x20 #define RTA 0x10 #define STA 0x08 #define DPR 0x01 static int ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor, uint16_t subdevice, uint16_t subvendor); static int ahc_ext_scbram_present(struct ahc_softc *ahc); static void ahc_scbram_config(struct ahc_softc *ahc, int enable, int pcheck, int fast, int large); static void ahc_probe_ext_scbram(struct ahc_softc *ahc); static void check_extport(struct ahc_softc *ahc, u_int *sxfrctl1); static void ahc_parse_pci_eeprom(struct ahc_softc *ahc, struct seeprom_config *sc); static void configure_termination(struct ahc_softc *ahc, struct seeprom_descriptor *sd, u_int adapter_control, u_int *sxfrctl1); static void ahc_new_term_detect(struct ahc_softc *ahc, int *enableSEC_low, int *enableSEC_high, int *enablePRI_low, int *enablePRI_high, int *eeprom_present); static void aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present, int *internal68_present, int *externalcable_present, int *eeprom_present); static void aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present, int *externalcable_present, int *eeprom_present); static void write_brdctl(struct ahc_softc *ahc, uint8_t value); static uint8_t read_brdctl(struct ahc_softc *ahc); static void ahc_pci_intr(struct ahc_softc *ahc); static int ahc_pci_chip_init(struct ahc_softc *ahc); static int ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor, uint16_t subdevice, uint16_t subvendor) { int result; /* Default to invalid. */ result = 0; if (vendor == 0x9005 && subvendor == 0x9005 && subdevice != device && SUBID_9005_TYPE_KNOWN(subdevice) != 0) { switch (SUBID_9005_TYPE(subdevice)) { case SUBID_9005_TYPE_MB: break; case SUBID_9005_TYPE_CARD: case SUBID_9005_TYPE_LCCARD: /* * Currently only trust Adaptec cards to * get the sub device info correct. */ if (DEVID_9005_TYPE(device) == DEVID_9005_TYPE_HBA) result = 1; break; case SUBID_9005_TYPE_RAID: break; default: break; } } return (result); } const struct ahc_pci_identity * ahc_find_pci_device(ahc_dev_softc_t pci) { uint64_t full_id; uint16_t device; uint16_t vendor; uint16_t subdevice; uint16_t subvendor; const struct ahc_pci_identity *entry; u_int i; vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); subvendor = ahc_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2); subdevice = ahc_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2); full_id = ahc_compose_id(device, vendor, subdevice, subvendor); /* * If the second function is not hooked up, ignore it. * Unfortunately, not all MB vendors implement the * subdevice ID as per the Adaptec spec, so do our best * to sanity check it prior to accepting the subdevice * ID as valid. */ if (ahc_get_pci_function(pci) > 0 && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor) && SUBID_9005_MFUNCENB(subdevice) == 0) return (NULL); for (i = 0; i < ahc_num_pci_devs; i++) { entry = &ahc_pci_ident_table[i]; if (entry->full_id == (full_id & entry->id_mask)) { /* Honor exclusion entries. */ if (entry->name == NULL) return (NULL); return (entry); } } return (NULL); } int ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry) { u_int command; u_int our_id; u_int sxfrctl1; u_int scsiseq; u_int dscommand0; uint32_t devconfig; int error; uint8_t sblkctl; our_id = 0; error = entry->setup(ahc); if (error != 0) return (error); ahc->chip |= AHC_PCI; ahc->description = entry->name; pci_set_power_state(ahc->dev_softc, AHC_POWER_STATE_D0); error = ahc_pci_map_registers(ahc); if (error != 0) return (error); /* * Before we continue probing the card, ensure that * its interrupts are *disabled*. We don't want * a misstep to hang the machine in an interrupt * storm. */ ahc_intr_enable(ahc, FALSE); devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); /* * If we need to support high memory, enable dual * address cycles. This bit must be set to enable * high address bit generation even if we are on a * 64bit bus (PCI64BIT set in devconfig). */ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { if (bootverbose) printk("%s: Enabling 39Bit Addressing\n", ahc_name(ahc)); devconfig |= DACEN; } /* Ensure that pci error generation, a test feature, is disabled. */ devconfig |= PCIERRGENDIS; ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); /* Ensure busmastering is enabled */ command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2); command |= PCIM_CMD_BUSMASTEREN; ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, /*bytes*/2); /* On all PCI adapters, we allow SCB paging */ ahc->flags |= AHC_PAGESCBS; error = ahc_softc_init(ahc); if (error != 0) return (error); /* * Disable PCI parity error checking. Users typically * do this to work around broken PCI chipsets that get * the parity timing wrong and thus generate lots of spurious * errors. The chip only allows us to disable *all* parity * error reporting when doing this, so CIO bus, scb ram, and * scratch ram parity errors will be ignored too. */ if ((ahc->flags & AHC_DISABLE_PCI_PERR) != 0) ahc->seqctl |= FAILDIS; ahc->bus_intr = ahc_pci_intr; ahc->bus_chip_init = ahc_pci_chip_init; /* Remember how the card was setup in case there is no SEEPROM */ if ((ahc_inb(ahc, HCNTRL) & POWRDN) == 0) { ahc_pause(ahc); if ((ahc->features & AHC_ULTRA2) != 0) our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; else our_id = ahc_inb(ahc, SCSIID) & OID; sxfrctl1 = ahc_inb(ahc, SXFRCTL1) & STPWEN; scsiseq = ahc_inb(ahc, SCSISEQ); } else { sxfrctl1 = STPWEN; our_id = 7; scsiseq = 0; } error = ahc_reset(ahc, /*reinit*/FALSE); if (error != 0) return (ENXIO); if ((ahc->features & AHC_DT) != 0) { u_int sfunct; /* Perform ALT-Mode Setup */ sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); ahc_outb(ahc, OPTIONMODE, OPTIONMODE_DEFAULTS|AUTOACKEN|BUSFREEREV|EXPPHASEDIS); ahc_outb(ahc, SFUNCT, sfunct); /* Normal mode setup */ ahc_outb(ahc, CRCCONTROL1, CRCVALCHKEN|CRCENDCHKEN|CRCREQCHKEN |TARGCRCENDEN); } dscommand0 = ahc_inb(ahc, DSCOMMAND0); dscommand0 |= MPARCKEN|CACHETHEN; if ((ahc->features & AHC_ULTRA2) != 0) { /* * DPARCKEN doesn't work correctly on * some MBs so don't use it. */ dscommand0 &= ~DPARCKEN; } /* * Handle chips that must have cache line * streaming (dis/en)abled. */ if ((ahc->bugs & AHC_CACHETHEN_DIS_BUG) != 0) dscommand0 |= CACHETHEN; if ((ahc->bugs & AHC_CACHETHEN_BUG) != 0) dscommand0 &= ~CACHETHEN; ahc_outb(ahc, DSCOMMAND0, dscommand0); ahc->pci_cachesize = ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1) & CACHESIZE; ahc->pci_cachesize *= 4; if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0 && ahc->pci_cachesize == 4) { ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, 0, /*bytes*/1); ahc->pci_cachesize = 0; } /* * We cannot perform ULTRA speeds without the presence * of the external precision resistor. */ if ((ahc->features & AHC_ULTRA) != 0) { uint32_t devconfig; devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); if ((devconfig & REXTVALID) == 0) ahc->features &= ~AHC_ULTRA; } /* See if we have a SEEPROM and perform auto-term */ check_extport(ahc, &sxfrctl1); /* * Take the LED out of diagnostic mode */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON))); if ((ahc->features & AHC_ULTRA2) != 0) { ahc_outb(ahc, DFF_THRSH, RD_DFTHRSH_MAX|WR_DFTHRSH_MAX); } else { ahc_outb(ahc, DSPCISTATUS, DFTHRSH_100); } if (ahc->flags & AHC_USEDEFAULTS) { /* * PCI Adapter default setup * Should only be used if the adapter does not have * a SEEPROM. */ /* See if someone else set us up already */ if ((ahc->flags & AHC_NO_BIOS_INIT) == 0 && scsiseq != 0) { printk("%s: Using left over BIOS settings\n", ahc_name(ahc)); ahc->flags &= ~AHC_USEDEFAULTS; ahc->flags |= AHC_BIOS_ENABLED; } else { /* * Assume only one connector and always turn * on termination. */ our_id = 0x07; sxfrctl1 = STPWEN; } ahc_outb(ahc, SCSICONF, our_id|ENSPCHK|RESET_SCSI); ahc->our_id = our_id; } /* * Take a look to see if we have external SRAM. * We currently do not attempt to use SRAM that is * shared among multiple controllers. */ ahc_probe_ext_scbram(ahc); /* * Record our termination setting for the * generic initialization routine. */ if ((sxfrctl1 & STPWEN) != 0) ahc->flags |= AHC_TERM_ENB_A; /* * Save chip register configuration data for chip resets * that occur during runtime and resume events. */ ahc->bus_softc.pci_softc.devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); ahc->bus_softc.pci_softc.command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1); ahc->bus_softc.pci_softc.csize_lattime = ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1); ahc->bus_softc.pci_softc.dscommand0 = ahc_inb(ahc, DSCOMMAND0); ahc->bus_softc.pci_softc.dspcistatus = ahc_inb(ahc, DSPCISTATUS); if ((ahc->features & AHC_DT) != 0) { u_int sfunct; sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); ahc->bus_softc.pci_softc.optionmode = ahc_inb(ahc, OPTIONMODE); ahc->bus_softc.pci_softc.targcrccnt = ahc_inw(ahc, TARGCRCCNT); ahc_outb(ahc, SFUNCT, sfunct); ahc->bus_softc.pci_softc.crccontrol1 = ahc_inb(ahc, CRCCONTROL1); } if ((ahc->features & AHC_MULTI_FUNC) != 0) ahc->bus_softc.pci_softc.scbbaddr = ahc_inb(ahc, SCBBADDR); if ((ahc->features & AHC_ULTRA2) != 0) ahc->bus_softc.pci_softc.dff_thrsh = ahc_inb(ahc, DFF_THRSH); /* Core initialization */ error = ahc_init(ahc); if (error != 0) return (error); ahc->init_level++; /* * Allow interrupts now that we are completely setup. */ return ahc_pci_map_int(ahc); } /* * Test for the presence of external sram in an * "unshared" configuration. */ static int ahc_ext_scbram_present(struct ahc_softc *ahc) { u_int chip; int ramps; int single_user; uint32_t devconfig; chip = ahc->chip & AHC_CHIPID_MASK; devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); single_user = (devconfig & MPORTMODE) != 0; if ((ahc->features & AHC_ULTRA2) != 0) ramps = (ahc_inb(ahc, DSCOMMAND0) & RAMPS) != 0; else if (chip == AHC_AIC7895 || chip == AHC_AIC7895C) /* * External SCBRAM arbitration is flakey * on these chips. Unfortunately this means * we don't use the extra SCB ram space on the * 3940AUW. */ ramps = 0; else if (chip >= AHC_AIC7870) ramps = (devconfig & RAMPSM) != 0; else ramps = 0; if (ramps && single_user) return (1); return (0); } /* * Enable external scbram. */ static void ahc_scbram_config(struct ahc_softc *ahc, int enable, int pcheck, int fast, int large) { uint32_t devconfig; if (ahc->features & AHC_MULTI_FUNC) { /* * Set the SCB Base addr (highest address bit) * depending on which channel we are. */ ahc_outb(ahc, SCBBADDR, ahc_get_pci_function(ahc->dev_softc)); } ahc->flags &= ~AHC_LSCBS_ENABLED; if (large) ahc->flags |= AHC_LSCBS_ENABLED; devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); if ((ahc->features & AHC_ULTRA2) != 0) { u_int dscommand0; dscommand0 = ahc_inb(ahc, DSCOMMAND0); if (enable) dscommand0 &= ~INTSCBRAMSEL; else dscommand0 |= INTSCBRAMSEL; if (large) dscommand0 &= ~USCBSIZE32; else dscommand0 |= USCBSIZE32; ahc_outb(ahc, DSCOMMAND0, dscommand0); } else { if (fast) devconfig &= ~EXTSCBTIME; else devconfig |= EXTSCBTIME; if (enable) devconfig &= ~SCBRAMSEL; else devconfig |= SCBRAMSEL; if (large) devconfig &= ~SCBSIZE32; else devconfig |= SCBSIZE32; } if (pcheck) devconfig |= EXTSCBPEN; else devconfig &= ~EXTSCBPEN; ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); } /* * Take a look to see if we have external SRAM. * We currently do not attempt to use SRAM that is * shared among multiple controllers. */ static void ahc_probe_ext_scbram(struct ahc_softc *ahc) { int num_scbs; int test_num_scbs; int enable; int pcheck; int fast; int large; enable = FALSE; pcheck = FALSE; fast = FALSE; large = FALSE; num_scbs = 0; if (ahc_ext_scbram_present(ahc) == 0) goto done; /* * Probe for the best parameters to use. */ ahc_scbram_config(ahc, /*enable*/TRUE, pcheck, fast, large); num_scbs = ahc_probe_scbs(ahc); if (num_scbs == 0) { /* The SRAM wasn't really present. */ goto done; } enable = TRUE; /* * Clear any outstanding parity error * and ensure that parity error reporting * is enabled. */ ahc_outb(ahc, SEQCTL, 0); ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); /* Now see if we can do parity */ ahc_scbram_config(ahc, enable, /*pcheck*/TRUE, fast, large); num_scbs = ahc_probe_scbs(ahc); if ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0 || (ahc_inb(ahc, ERROR) & MPARERR) == 0) pcheck = TRUE; /* Clear any resulting parity error */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); /* Now see if we can do fast timing */ ahc_scbram_config(ahc, enable, pcheck, /*fast*/TRUE, large); test_num_scbs = ahc_probe_scbs(ahc); if (test_num_scbs == num_scbs && ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0 || (ahc_inb(ahc, ERROR) & MPARERR) == 0)) fast = TRUE; /* * See if we can use large SCBs and still maintain * the same overall count of SCBs. */ if ((ahc->features & AHC_LARGE_SCBS) != 0) { ahc_scbram_config(ahc, enable, pcheck, fast, /*large*/TRUE); test_num_scbs = ahc_probe_scbs(ahc); if (test_num_scbs >= num_scbs) { large = TRUE; num_scbs = test_num_scbs; if (num_scbs >= 64) { /* * We have enough space to move the * "busy targets table" into SCB space * and make it qualify all the way to the * lun level. */ ahc->flags |= AHC_SCB_BTT; } } } done: /* * Disable parity error reporting until we * can load instruction ram. */ ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS); /* Clear any latched parity error */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); if (bootverbose && enable) { printk("%s: External SRAM, %s access%s, %dbytes/SCB\n", ahc_name(ahc), fast ? "fast" : "slow", pcheck ? ", parity checking enabled" : "", large ? 64 : 32); } ahc_scbram_config(ahc, enable, pcheck, fast, large); } /* * Perform some simple tests that should catch situations where * our registers are invalidly mapped. */ int ahc_pci_test_register_access(struct ahc_softc *ahc) { int error; u_int status1; uint32_t cmd; uint8_t hcntrl; error = EIO; /* * Enable PCI error interrupt status, but suppress NMIs * generated by SERR raised due to target aborts. */ cmd = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2); ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2); /* * First a simple test to see if any * registers can be read. Reading * HCNTRL has no side effects and has * at least one bit that is guaranteed to * be zero so it is a good register to * use for this test. */ hcntrl = ahc_inb(ahc, HCNTRL); if (hcntrl == 0xFF) goto fail; if ((hcntrl & CHIPRST) != 0) { /* * The chip has not been initialized since * PCI/EISA/VLB bus reset. Don't trust * "left over BIOS data". */ ahc->flags |= AHC_NO_BIOS_INIT; } /* * Next create a situation where write combining * or read prefetching could be initiated by the * CPU or host bridge. Our device does not support * either, so look for data corruption and/or flagged * PCI errors. First pause without causing another * chip reset. */ hcntrl &= ~CHIPRST; ahc_outb(ahc, HCNTRL, hcntrl|PAUSE); while (ahc_is_paused(ahc) == 0) ; /* Clear any PCI errors that occurred before our driver attached. */ status1 = ahc_pci_read_config(ahc->dev_softc, PCIR_STATUS + 1, /*bytes*/1); ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, status1, /*bytes*/1); ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, SEQCTL, PERRORDIS); ahc_outb(ahc, SCBPTR, 0); ahc_outl(ahc, SCB_BASE, 0x5aa555aa); if (ahc_inl(ahc, SCB_BASE) != 0x5aa555aa) goto fail; status1 = ahc_pci_read_config(ahc->dev_softc, PCIR_STATUS + 1, /*bytes*/1); if ((status1 & STA) != 0) goto fail; error = 0; fail: /* Silently clear any latched errors. */ status1 = ahc_pci_read_config(ahc->dev_softc, PCIR_STATUS + 1, /*bytes*/1); ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, status1, /*bytes*/1); ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS); ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); return (error); } /* * Check the external port logic for a serial eeprom * and termination/cable detection contrls. */ static void check_extport(struct ahc_softc *ahc, u_int *sxfrctl1) { struct seeprom_descriptor sd; struct seeprom_config *sc; int have_seeprom; int have_autoterm; sd.sd_ahc = ahc; sd.sd_control_offset = SEECTL; sd.sd_status_offset = SEECTL; sd.sd_dataout_offset = SEECTL; sc = ahc->seep_config; /* * For some multi-channel devices, the c46 is simply too * small to work. For the other controller types, we can * get our information from either SEEPROM type. Set the * type to start our probe with accordingly. */ if (ahc->flags & AHC_LARGE_SEEPROM) sd.sd_chip = C56_66; else sd.sd_chip = C46; sd.sd_MS = SEEMS; sd.sd_RDY = SEERDY; sd.sd_CS = SEECS; sd.sd_CK = SEECK; sd.sd_DO = SEEDO; sd.sd_DI = SEEDI; have_seeprom = ahc_acquire_seeprom(ahc, &sd); if (have_seeprom) { if (bootverbose) printk("%s: Reading SEEPROM...", ahc_name(ahc)); for (;;) { u_int start_addr; start_addr = 32 * (ahc->channel - 'A'); have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc, start_addr, sizeof(*sc)/2); if (have_seeprom) have_seeprom = ahc_verify_cksum(sc); if (have_seeprom != 0 || sd.sd_chip == C56_66) { if (bootverbose) { if (have_seeprom == 0) printk ("checksum error\n"); else printk ("done.\n"); } break; } sd.sd_chip = C56_66; } ahc_release_seeprom(&sd); /* Remember the SEEPROM type for later */ if (sd.sd_chip == C56_66) ahc->flags |= AHC_LARGE_SEEPROM; } if (!have_seeprom) { /* * Pull scratch ram settings and treat them as * if they are the contents of an seeprom if * the 'ADPT' signature is found in SCB2. * We manually compose the data as 16bit values * to avoid endian issues. */ ahc_outb(ahc, SCBPTR, 2); if (ahc_inb(ahc, SCB_BASE) == 'A' && ahc_inb(ahc, SCB_BASE + 1) == 'D' && ahc_inb(ahc, SCB_BASE + 2) == 'P' && ahc_inb(ahc, SCB_BASE + 3) == 'T') { uint16_t *sc_data; int i; sc_data = (uint16_t *)sc; for (i = 0; i < 32; i++, sc_data++) { int j; j = i * 2; *sc_data = ahc_inb(ahc, SRAM_BASE + j) | ahc_inb(ahc, SRAM_BASE + j + 1) << 8; } have_seeprom = ahc_verify_cksum(sc); if (have_seeprom) ahc->flags |= AHC_SCB_CONFIG_USED; } /* * Clear any SCB parity errors in case this data and * its associated parity was not initialized by the BIOS */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); } if (!have_seeprom) { if (bootverbose) printk("%s: No SEEPROM available.\n", ahc_name(ahc)); ahc->flags |= AHC_USEDEFAULTS; kfree(ahc->seep_config); ahc->seep_config = NULL; sc = NULL; } else { ahc_parse_pci_eeprom(ahc, sc); } /* * Cards that have the external logic necessary to talk to * a SEEPROM, are almost certain to have the remaining logic * necessary for auto-termination control. This assumption * hasn't failed yet... */ have_autoterm = have_seeprom; /* * Some low-cost chips have SEEPROM and auto-term control built * in, instead of using a GAL. They can tell us directly * if the termination logic is enabled. */ if ((ahc->features & AHC_SPIOCAP) != 0) { if ((ahc_inb(ahc, SPIOCAP) & SSPIOCPS) == 0) have_autoterm = FALSE; } if (have_autoterm) { ahc->flags |= AHC_HAS_TERM_LOGIC; ahc_acquire_seeprom(ahc, &sd); configure_termination(ahc, &sd, sc->adapter_control, sxfrctl1); ahc_release_seeprom(&sd); } else if (have_seeprom) { *sxfrctl1 &= ~STPWEN; if ((sc->adapter_control & CFSTERM) != 0) *sxfrctl1 |= STPWEN; if (bootverbose) printk("%s: Low byte termination %sabled\n", ahc_name(ahc), (*sxfrctl1 & STPWEN) ? "en" : "dis"); } } static void ahc_parse_pci_eeprom(struct ahc_softc *ahc, struct seeprom_config *sc) { /* * Put the data we've collected down into SRAM * where ahc_init will find it. */ int i; int max_targ = sc->max_targets & CFMAXTARG; u_int scsi_conf; uint16_t discenable; uint16_t ultraenb; discenable = 0; ultraenb = 0; if ((sc->adapter_control & CFULTRAEN) != 0) { /* * Determine if this adapter has a "newstyle" * SEEPROM format. */ for (i = 0; i < max_targ; i++) { if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0) { ahc->flags |= AHC_NEWEEPROM_FMT; break; } } } for (i = 0; i < max_targ; i++) { u_int scsirate; uint16_t target_mask; target_mask = 0x01 << i; if (sc->device_flags[i] & CFDISC) discenable |= target_mask; if ((ahc->flags & AHC_NEWEEPROM_FMT) != 0) { if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0) ultraenb |= target_mask; } else if ((sc->adapter_control & CFULTRAEN) != 0) { ultraenb |= target_mask; } if ((sc->device_flags[i] & CFXFER) == 0x04 && (ultraenb & target_mask) != 0) { /* Treat 10MHz as a non-ultra speed */ sc->device_flags[i] &= ~CFXFER; ultraenb &= ~target_mask; } if ((ahc->features & AHC_ULTRA2) != 0) { u_int offset; if (sc->device_flags[i] & CFSYNCH) offset = MAX_OFFSET_ULTRA2; else offset = 0; ahc_outb(ahc, TARG_OFFSET + i, offset); /* * The ultra enable bits contain the * high bit of the ultra2 sync rate * field. */ scsirate = (sc->device_flags[i] & CFXFER) | ((ultraenb & target_mask) ? 0x8 : 0x0); if (sc->device_flags[i] & CFWIDEB) scsirate |= WIDEXFER; } else { scsirate = (sc->device_flags[i] & CFXFER) << 4; if (sc->device_flags[i] & CFSYNCH) scsirate |= SOFS; if (sc->device_flags[i] & CFWIDEB) scsirate |= WIDEXFER; } ahc_outb(ahc, TARG_SCSIRATE + i, scsirate); } ahc->our_id = sc->brtime_id & CFSCSIID; scsi_conf = (ahc->our_id & 0x7); if (sc->adapter_control & CFSPARITY) scsi_conf |= ENSPCHK; if (sc->adapter_control & CFRESETB) scsi_conf |= RESET_SCSI; ahc->flags |= (sc->adapter_control & CFBOOTCHAN) >> CFBOOTCHANSHIFT; if (sc->bios_control & CFEXTEND) ahc->flags |= AHC_EXTENDED_TRANS_A; if (sc->bios_control & CFBIOSEN) ahc->flags |= AHC_BIOS_ENABLED; if (ahc->features & AHC_ULTRA && (ahc->flags & AHC_NEWEEPROM_FMT) == 0) { /* Should we enable Ultra mode? */ if (!(sc->adapter_control & CFULTRAEN)) /* Treat us as a non-ultra card */ ultraenb = 0; } if (sc->signature == CFSIGNATURE || sc->signature == CFSIGNATURE2) { uint32_t devconfig; /* Honor the STPWLEVEL settings */ devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); devconfig &= ~STPWLEVEL; if ((sc->bios_control & CFSTPWLEVEL) != 0) devconfig |= STPWLEVEL; ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); } /* Set SCSICONF info */ ahc_outb(ahc, SCSICONF, scsi_conf); ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff)); ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff)); ahc_outb(ahc, ULTRA_ENB, ultraenb & 0xff); ahc_outb(ahc, ULTRA_ENB + 1, (ultraenb >> 8) & 0xff); } static void configure_termination(struct ahc_softc *ahc, struct seeprom_descriptor *sd, u_int adapter_control, u_int *sxfrctl1) { uint8_t brddat; brddat = 0; /* * Update the settings in sxfrctl1 to match the * termination settings */ *sxfrctl1 = 0; /* * SEECS must be on for the GALS to latch * the data properly. Be sure to leave MS * on or we will release the seeprom. */ SEEPROM_OUTB(sd, sd->sd_MS | sd->sd_CS); if ((adapter_control & CFAUTOTERM) != 0 || (ahc->features & AHC_NEW_TERMCTL) != 0) { int internal50_present; int internal68_present; int externalcable_present; int eeprom_present; int enableSEC_low; int enableSEC_high; int enablePRI_low; int enablePRI_high; int sum; enableSEC_low = 0; enableSEC_high = 0; enablePRI_low = 0; enablePRI_high = 0; if ((ahc->features & AHC_NEW_TERMCTL) != 0) { ahc_new_term_detect(ahc, &enableSEC_low, &enableSEC_high, &enablePRI_low, &enablePRI_high, &eeprom_present); if ((adapter_control & CFSEAUTOTERM) == 0) { if (bootverbose) printk("%s: Manual SE Termination\n", ahc_name(ahc)); enableSEC_low = (adapter_control & CFSELOWTERM); enableSEC_high = (adapter_control & CFSEHIGHTERM); } if ((adapter_control & CFAUTOTERM) == 0) { if (bootverbose) printk("%s: Manual LVD Termination\n", ahc_name(ahc)); enablePRI_low = (adapter_control & CFSTERM); enablePRI_high = (adapter_control & CFWSTERM); } /* Make the table calculations below happy */ internal50_present = 0; internal68_present = 1; externalcable_present = 1; } else if ((ahc->features & AHC_SPIOCAP) != 0) { aic785X_cable_detect(ahc, &internal50_present, &externalcable_present, &eeprom_present); /* Can never support a wide connector. */ internal68_present = 0; } else { aic787X_cable_detect(ahc, &internal50_present, &internal68_present, &externalcable_present, &eeprom_present); } if ((ahc->features & AHC_WIDE) == 0) internal68_present = 0; if (bootverbose && (ahc->features & AHC_ULTRA2) == 0) { printk("%s: internal 50 cable %s present", ahc_name(ahc), internal50_present ? "is":"not"); if ((ahc->features & AHC_WIDE) != 0) printk(", internal 68 cable %s present", internal68_present ? "is":"not"); printk("\n%s: external cable %s present\n", ahc_name(ahc), externalcable_present ? "is":"not"); } if (bootverbose) printk("%s: BIOS eeprom %s present\n", ahc_name(ahc), eeprom_present ? "is" : "not"); if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) { /* * The 50 pin connector is a separate bus, * so force it to always be terminated. * In the future, perform current sensing * to determine if we are in the middle of * a properly terminated bus. */ internal50_present = 0; } /* * Now set the termination based on what * we found. * Flash Enable = BRDDAT7 * Secondary High Term Enable = BRDDAT6 * Secondary Low Term Enable = BRDDAT5 (7890) * Primary High Term Enable = BRDDAT4 (7890) */ if ((ahc->features & AHC_ULTRA2) == 0 && (internal50_present != 0) && (internal68_present != 0) && (externalcable_present != 0)) { printk("%s: Illegal cable configuration!!. " "Only two connectors on the " "adapter may be used at a " "time!\n", ahc_name(ahc)); /* * Pretend there are no cables in the hope * that having all of the termination on * gives us a more stable bus. */ internal50_present = 0; internal68_present = 0; externalcable_present = 0; } if ((ahc->features & AHC_WIDE) != 0 && ((externalcable_present == 0) || (internal68_present == 0) || (enableSEC_high != 0))) { brddat |= BRDDAT6; if (bootverbose) { if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) printk("%s: 68 pin termination " "Enabled\n", ahc_name(ahc)); else printk("%s: %sHigh byte termination " "Enabled\n", ahc_name(ahc), enableSEC_high ? "Secondary " : ""); } } sum = internal50_present + internal68_present + externalcable_present; if (sum < 2 || (enableSEC_low != 0)) { if ((ahc->features & AHC_ULTRA2) != 0) brddat |= BRDDAT5; else *sxfrctl1 |= STPWEN; if (bootverbose) { if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) printk("%s: 50 pin termination " "Enabled\n", ahc_name(ahc)); else printk("%s: %sLow byte termination " "Enabled\n", ahc_name(ahc), enableSEC_low ? "Secondary " : ""); } } if (enablePRI_low != 0) { *sxfrctl1 |= STPWEN; if (bootverbose) printk("%s: Primary Low Byte termination " "Enabled\n", ahc_name(ahc)); } /* * Setup STPWEN before setting up the rest of * the termination per the tech note on the U160 cards. */ ahc_outb(ahc, SXFRCTL1, *sxfrctl1); if (enablePRI_high != 0) { brddat |= BRDDAT4; if (bootverbose) printk("%s: Primary High Byte " "termination Enabled\n", ahc_name(ahc)); } write_brdctl(ahc, brddat); } else { if ((adapter_control & CFSTERM) != 0) { *sxfrctl1 |= STPWEN; if (bootverbose) printk("%s: %sLow byte termination Enabled\n", ahc_name(ahc), (ahc->features & AHC_ULTRA2) ? "Primary " : ""); } if ((adapter_control & CFWSTERM) != 0 && (ahc->features & AHC_WIDE) != 0) { brddat |= BRDDAT6; if (bootverbose) printk("%s: %sHigh byte termination Enabled\n", ahc_name(ahc), (ahc->features & AHC_ULTRA2) ? "Secondary " : ""); } /* * Setup STPWEN before setting up the rest of * the termination per the tech note on the U160 cards. */ ahc_outb(ahc, SXFRCTL1, *sxfrctl1); if ((ahc->features & AHC_WIDE) != 0) write_brdctl(ahc, brddat); } SEEPROM_OUTB(sd, sd->sd_MS); /* Clear CS */ } static void ahc_new_term_detect(struct ahc_softc *ahc, int *enableSEC_low, int *enableSEC_high, int *enablePRI_low, int *enablePRI_high, int *eeprom_present) { uint8_t brdctl; /* * BRDDAT7 = Eeprom * BRDDAT6 = Enable Secondary High Byte termination * BRDDAT5 = Enable Secondary Low Byte termination * BRDDAT4 = Enable Primary high byte termination * BRDDAT3 = Enable Primary low byte termination */ brdctl = read_brdctl(ahc); *eeprom_present = brdctl & BRDDAT7; *enableSEC_high = (brdctl & BRDDAT6); *enableSEC_low = (brdctl & BRDDAT5); *enablePRI_high = (brdctl & BRDDAT4); *enablePRI_low = (brdctl & BRDDAT3); } static void aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present, int *internal68_present, int *externalcable_present, int *eeprom_present) { uint8_t brdctl; /* * First read the status of our cables. * Set the rom bank to 0 since the * bank setting serves as a multiplexor * for the cable detection logic. * BRDDAT5 controls the bank switch. */ write_brdctl(ahc, 0); /* * Now read the state of the internal * connectors. BRDDAT6 is INT50 and * BRDDAT7 is INT68. */ brdctl = read_brdctl(ahc); *internal50_present = (brdctl & BRDDAT6) ? 0 : 1; *internal68_present = (brdctl & BRDDAT7) ? 0 : 1; /* * Set the rom bank to 1 and determine * the other signals. */ write_brdctl(ahc, BRDDAT5); /* * Now read the state of the external * connectors. BRDDAT6 is EXT68 and * BRDDAT7 is EPROMPS. */ brdctl = read_brdctl(ahc); *externalcable_present = (brdctl & BRDDAT6) ? 0 : 1; *eeprom_present = (brdctl & BRDDAT7) ? 1 : 0; } static void aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present, int *externalcable_present, int *eeprom_present) { uint8_t brdctl; uint8_t spiocap; spiocap = ahc_inb(ahc, SPIOCAP); spiocap &= ~SOFTCMDEN; spiocap |= EXT_BRDCTL; ahc_outb(ahc, SPIOCAP, spiocap); ahc_outb(ahc, BRDCTL, BRDRW|BRDCS); ahc_flush_device_writes(ahc); ahc_delay(500); ahc_outb(ahc, BRDCTL, 0); ahc_flush_device_writes(ahc); ahc_delay(500); brdctl = ahc_inb(ahc, BRDCTL); *internal50_present = (brdctl & BRDDAT5) ? 0 : 1; *externalcable_present = (brdctl & BRDDAT6) ? 0 : 1; *eeprom_present = (ahc_inb(ahc, SPIOCAP) & EEPROM) ? 1 : 0; } int ahc_acquire_seeprom(struct ahc_softc *ahc, struct seeprom_descriptor *sd) { int wait; if ((ahc->features & AHC_SPIOCAP) != 0 && (ahc_inb(ahc, SPIOCAP) & SEEPROM) == 0) return (0); /* * Request access of the memory port. When access is * granted, SEERDY will go high. We use a 1 second * timeout which should be near 1 second more than * is needed. Reason: after the chip reset, there * should be no contention. */ SEEPROM_OUTB(sd, sd->sd_MS); wait = 1000; /* 1 second timeout in msec */ while (--wait && ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0)) { ahc_delay(1000); /* delay 1 msec */ } if ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0) { SEEPROM_OUTB(sd, 0); return (0); } return(1); } void ahc_release_seeprom(struct seeprom_descriptor *sd) { /* Release access to the memory port and the serial EEPROM. */ SEEPROM_OUTB(sd, 0); } static void write_brdctl(struct ahc_softc *ahc, uint8_t value) { uint8_t brdctl; if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) { brdctl = BRDSTB; if (ahc->channel == 'B') brdctl |= BRDCS; } else if ((ahc->features & AHC_ULTRA2) != 0) { brdctl = 0; } else { brdctl = BRDSTB|BRDCS; } ahc_outb(ahc, BRDCTL, brdctl); ahc_flush_device_writes(ahc); brdctl |= value; ahc_outb(ahc, BRDCTL, brdctl); ahc_flush_device_writes(ahc); if ((ahc->features & AHC_ULTRA2) != 0) brdctl |= BRDSTB_ULTRA2; else brdctl &= ~BRDSTB; ahc_outb(ahc, BRDCTL, brdctl); ahc_flush_device_writes(ahc); if ((ahc->features & AHC_ULTRA2) != 0) brdctl = 0; else brdctl &= ~BRDCS; ahc_outb(ahc, BRDCTL, brdctl); } static uint8_t read_brdctl(struct ahc_softc *ahc) { uint8_t brdctl; uint8_t value; if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) { brdctl = BRDRW; if (ahc->channel == 'B') brdctl |= BRDCS; } else if ((ahc->features & AHC_ULTRA2) != 0) { brdctl = BRDRW_ULTRA2; } else { brdctl = BRDRW|BRDCS; } ahc_outb(ahc, BRDCTL, brdctl); ahc_flush_device_writes(ahc); value = ahc_inb(ahc, BRDCTL); ahc_outb(ahc, BRDCTL, 0); return (value); } static void ahc_pci_intr(struct ahc_softc *ahc) { u_int error; u_int status1; error = ahc_inb(ahc, ERROR); if ((error & PCIERRSTAT) == 0) return; status1 = ahc_pci_read_config(ahc->dev_softc, PCIR_STATUS + 1, /*bytes*/1); printk("%s: PCI error Interrupt at seqaddr = 0x%x\n", ahc_name(ahc), ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); if (status1 & DPE) { ahc->pci_target_perr_count++; printk("%s: Data Parity Error Detected during address " "or write data phase\n", ahc_name(ahc)); } if (status1 & SSE) { printk("%s: Signal System Error Detected\n", ahc_name(ahc)); } if (status1 & RMA) { printk("%s: Received a Master Abort\n", ahc_name(ahc)); } if (status1 & RTA) { printk("%s: Received a Target Abort\n", ahc_name(ahc)); } if (status1 & STA) { printk("%s: Signaled a Target Abort\n", ahc_name(ahc)); } if (status1 & DPR) { printk("%s: Data Parity Error has been reported via PERR#\n", ahc_name(ahc)); } /* Clear latched errors. */ ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, status1, /*bytes*/1); if ((status1 & (DPE|SSE|RMA|RTA|STA|DPR)) == 0) { printk("%s: Latched PCIERR interrupt with " "no status bits set\n", ahc_name(ahc)); } else { ahc_outb(ahc, CLRINT, CLRPARERR); } if (ahc->pci_target_perr_count > AHC_PCI_TARGET_PERR_THRESH) { printk( "%s: WARNING WARNING WARNING WARNING\n" "%s: Too many PCI parity errors observed as a target.\n" "%s: Some device on this bus is generating bad parity.\n" "%s: This is an error *observed by*, not *generated by*, this controller.\n" "%s: PCI parity error checking has been disabled.\n" "%s: WARNING WARNING WARNING WARNING\n", ahc_name(ahc), ahc_name(ahc), ahc_name(ahc), ahc_name(ahc), ahc_name(ahc), ahc_name(ahc)); ahc->seqctl |= FAILDIS; ahc_outb(ahc, SEQCTL, ahc->seqctl); } ahc_unpause(ahc); } static int ahc_pci_chip_init(struct ahc_softc *ahc) { ahc_outb(ahc, DSCOMMAND0, ahc->bus_softc.pci_softc.dscommand0); ahc_outb(ahc, DSPCISTATUS, ahc->bus_softc.pci_softc.dspcistatus); if ((ahc->features & AHC_DT) != 0) { u_int sfunct; sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); ahc_outb(ahc, OPTIONMODE, ahc->bus_softc.pci_softc.optionmode); ahc_outw(ahc, TARGCRCCNT, ahc->bus_softc.pci_softc.targcrccnt); ahc_outb(ahc, SFUNCT, sfunct); ahc_outb(ahc, CRCCONTROL1, ahc->bus_softc.pci_softc.crccontrol1); } if ((ahc->features & AHC_MULTI_FUNC) != 0) ahc_outb(ahc, SCBBADDR, ahc->bus_softc.pci_softc.scbbaddr); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, DFF_THRSH, ahc->bus_softc.pci_softc.dff_thrsh); return (ahc_chip_init(ahc)); } void __maybe_unused ahc_pci_resume(struct ahc_softc *ahc) { /* * We assume that the OS has restored our register * mappings, etc. Just update the config space registers * that the OS doesn't know about and rely on our chip * reset handler to handle the rest. */ ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, ahc->bus_softc.pci_softc.devconfig, /*bytes*/4); ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, ahc->bus_softc.pci_softc.command, /*bytes*/1); ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1); if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) { struct seeprom_descriptor sd; u_int sxfrctl1; sd.sd_ahc = ahc; sd.sd_control_offset = SEECTL; sd.sd_status_offset = SEECTL; sd.sd_dataout_offset = SEECTL; ahc_acquire_seeprom(ahc, &sd); configure_termination(ahc, &sd, ahc->seep_config->adapter_control, &sxfrctl1); ahc_release_seeprom(&sd); } } static int ahc_aic785X_setup(struct ahc_softc *ahc) { ahc_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = 'A'; ahc->chip = AHC_AIC7850; ahc->features = AHC_AIC7850_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev >= 1) ahc->bugs |= AHC_PCI_2_1_RETRY_BUG; ahc->instruction_ram_size = 512; return (0); } static int ahc_aic7860_setup(struct ahc_softc *ahc) { ahc_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = 'A'; ahc->chip = AHC_AIC7860; ahc->features = AHC_AIC7860_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev >= 1) ahc->bugs |= AHC_PCI_2_1_RETRY_BUG; ahc->instruction_ram_size = 512; return (0); } static int ahc_apa1480_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7860_setup(ahc); if (error != 0) return (error); ahc->features |= AHC_REMOVABLE; return (0); } static int ahc_aic7870_setup(struct ahc_softc *ahc) { ahc->channel = 'A'; ahc->chip = AHC_AIC7870; ahc->features = AHC_AIC7870_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; ahc->instruction_ram_size = 512; return (0); } static int ahc_aic7870h_setup(struct ahc_softc *ahc) { int error = ahc_aic7870_setup(ahc); ahc->features |= AHC_HVD; return error; } static int ahc_aha394X_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7870_setup(ahc); if (error == 0) error = ahc_aha394XX_setup(ahc); return (error); } static int ahc_aha394Xh_setup(struct ahc_softc *ahc) { int error = ahc_aha394X_setup(ahc); ahc->features |= AHC_HVD; return error; } static int ahc_aha398X_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7870_setup(ahc); if (error == 0) error = ahc_aha398XX_setup(ahc); return (error); } static int ahc_aha494X_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7870_setup(ahc); if (error == 0) error = ahc_aha494XX_setup(ahc); return (error); } static int ahc_aha494Xh_setup(struct ahc_softc *ahc) { int error = ahc_aha494X_setup(ahc); ahc->features |= AHC_HVD; return error; } static int ahc_aic7880_setup(struct ahc_softc *ahc) { ahc_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = 'A'; ahc->chip = AHC_AIC7880; ahc->features = AHC_AIC7880_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG; rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev >= 1) { ahc->bugs |= AHC_PCI_2_1_RETRY_BUG; } else { ahc->bugs |= AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; } ahc->instruction_ram_size = 512; return (0); } static int ahc_aic7880h_setup(struct ahc_softc *ahc) { int error = ahc_aic7880_setup(ahc); ahc->features |= AHC_HVD; return error; } static int ahc_aha2940Pro_setup(struct ahc_softc *ahc) { ahc->flags |= AHC_INT50_SPEEDFLEX; return (ahc_aic7880_setup(ahc)); } static int ahc_aha394XU_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7880_setup(ahc); if (error == 0) error = ahc_aha394XX_setup(ahc); return (error); } static int ahc_aha394XUh_setup(struct ahc_softc *ahc) { int error = ahc_aha394XU_setup(ahc); ahc->features |= AHC_HVD; return error; } static int ahc_aha398XU_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7880_setup(ahc); if (error == 0) error = ahc_aha398XX_setup(ahc); return (error); } static int ahc_aic7890_setup(struct ahc_softc *ahc) { ahc_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = 'A'; ahc->chip = AHC_AIC7890; ahc->features = AHC_AIC7890_FE; ahc->flags |= AHC_NEWEEPROM_FMT; rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev == 0) ahc->bugs |= AHC_AUTOFLUSH_BUG|AHC_CACHETHEN_BUG; ahc->instruction_ram_size = 768; return (0); } static int ahc_aic7892_setup(struct ahc_softc *ahc) { ahc->channel = 'A'; ahc->chip = AHC_AIC7892; ahc->features = AHC_AIC7892_FE; ahc->flags |= AHC_NEWEEPROM_FMT; ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG; ahc->instruction_ram_size = 1024; return (0); } static int ahc_aic7895_setup(struct ahc_softc *ahc) { ahc_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A'; /* * The 'C' revision of the aic7895 has a few additional features. */ rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev >= 4) { ahc->chip = AHC_AIC7895C; ahc->features = AHC_AIC7895C_FE; } else { u_int command; ahc->chip = AHC_AIC7895; ahc->features = AHC_AIC7895_FE; /* * The BIOS disables the use of MWI transactions * since it does not have the MWI bug work around * we have. Disabling MWI reduces performance, so * turn it on again. */ command = ahc_pci_read_config(pci, PCIR_COMMAND, /*bytes*/1); command |= PCIM_CMD_MWRICEN; ahc_pci_write_config(pci, PCIR_COMMAND, command, /*bytes*/1); ahc->bugs |= AHC_PCI_MWI_BUG; } /* * XXX Does CACHETHEN really not work??? What about PCI retry? * on C level chips. Need to test, but for now, play it safe. */ ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_PCI_2_1_RETRY_BUG | AHC_CACHETHEN_BUG; #if 0 uint32_t devconfig; /* * Cachesize must also be zero due to stray DAC * problem when sitting behind some bridges. */ ahc_pci_write_config(pci, CSIZE_LATTIME, 0, /*bytes*/1); devconfig = ahc_pci_read_config(pci, DEVCONFIG, /*bytes*/1); devconfig |= MRDCEN; ahc_pci_write_config(pci, DEVCONFIG, devconfig, /*bytes*/1); #endif ahc->flags |= AHC_NEWEEPROM_FMT; ahc->instruction_ram_size = 512; return (0); } static int ahc_aic7895h_setup(struct ahc_softc *ahc) { int error = ahc_aic7895_setup(ahc); ahc->features |= AHC_HVD; return error; } static int ahc_aic7896_setup(struct ahc_softc *ahc) { ahc_dev_softc_t pci; pci = ahc->dev_softc; ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A'; ahc->chip = AHC_AIC7896; ahc->features = AHC_AIC7896_FE; ahc->flags |= AHC_NEWEEPROM_FMT; ahc->bugs |= AHC_CACHETHEN_DIS_BUG; ahc->instruction_ram_size = 768; return (0); } static int ahc_aic7899_setup(struct ahc_softc *ahc) { ahc_dev_softc_t pci; pci = ahc->dev_softc; ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A'; ahc->chip = AHC_AIC7899; ahc->features = AHC_AIC7899_FE; ahc->flags |= AHC_NEWEEPROM_FMT; ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG; ahc->instruction_ram_size = 1024; return (0); } static int ahc_aha29160C_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7899_setup(ahc); if (error != 0) return (error); ahc->features |= AHC_REMOVABLE; return (0); } static int ahc_raid_setup(struct ahc_softc *ahc) { printk("RAID functionality unsupported\n"); return (ENXIO); } static int ahc_aha394XX_setup(struct ahc_softc *ahc) { ahc_dev_softc_t pci; pci = ahc->dev_softc; switch (ahc_get_pci_slot(pci)) { case AHC_394X_SLOT_CHANNEL_A: ahc->channel = 'A'; break; case AHC_394X_SLOT_CHANNEL_B: ahc->channel = 'B'; break; default: printk("adapter at unexpected slot %d\n" "unable to map to a channel\n", ahc_get_pci_slot(pci)); ahc->channel = 'A'; } return (0); } static int ahc_aha398XX_setup(struct ahc_softc *ahc) { ahc_dev_softc_t pci; pci = ahc->dev_softc; switch (ahc_get_pci_slot(pci)) { case AHC_398X_SLOT_CHANNEL_A: ahc->channel = 'A'; break; case AHC_398X_SLOT_CHANNEL_B: ahc->channel = 'B'; break; case AHC_398X_SLOT_CHANNEL_C: ahc->channel = 'C'; break; default: printk("adapter at unexpected slot %d\n" "unable to map to a channel\n", ahc_get_pci_slot(pci)); ahc->channel = 'A'; break; } ahc->flags |= AHC_LARGE_SEEPROM; return (0); } static int ahc_aha494XX_setup(struct ahc_softc *ahc) { ahc_dev_softc_t pci; pci = ahc->dev_softc; switch (ahc_get_pci_slot(pci)) { case AHC_494X_SLOT_CHANNEL_A: ahc->channel = 'A'; break; case AHC_494X_SLOT_CHANNEL_B: ahc->channel = 'B'; break; case AHC_494X_SLOT_CHANNEL_C: ahc->channel = 'C'; break; case AHC_494X_SLOT_CHANNEL_D: ahc->channel = 'D'; break; default: printk("adapter at unexpected slot %d\n" "unable to map to a channel\n", ahc_get_pci_slot(pci)); ahc->channel = 'A'; } ahc->flags |= AHC_LARGE_SEEPROM; return (0); }
linux-master
drivers/scsi/aic7xxx/aic7xxx_pci.c
/* * Interface for the 93C66/56/46/26/06 serial eeprom parts. * * Copyright (c) 1995, 1996 Daniel M. Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.c#19 $ */ /* * The instruction set of the 93C66/56/46/26/06 chips are as follows: * * Start OP * * Function Bit Code Address** Data Description * ------------------------------------------------------------------- * READ 1 10 A5 - A0 Reads data stored in memory, * starting at specified address * EWEN 1 00 11XXXX Write enable must precede * all programming modes * ERASE 1 11 A5 - A0 Erase register A5A4A3A2A1A0 * WRITE 1 01 A5 - A0 D15 - D0 Writes register * ERAL 1 00 10XXXX Erase all registers * WRAL 1 00 01XXXX D15 - D0 Writes to all registers * EWDS 1 00 00XXXX Disables all programming * instructions * *Note: A value of X for address is a don't care condition. * **Note: There are 8 address bits for the 93C56/66 chips unlike * the 93C46/26/06 chips which have 6 address bits. * * The 93C46 has a four wire interface: clock, chip select, data in, and * data out. In order to perform one of the above functions, you need * to enable the chip select for a clock period (typically a minimum of * 1 usec, with the clock high and low a minimum of 750 and 250 nsec * respectively). While the chip select remains high, you can clock in * the instructions (above) starting with the start bit, followed by the * OP code, Address, and Data (if needed). For the READ instruction, the * requested 16-bit register contents is read from the data out line but * is preceded by an initial zero (leading 0, followed by 16-bits, MSB * first). The clock cycling from low to high initiates the next data * bit to be sent from the chip. */ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aic7xxx_93cx6.h" /* * Right now, we only have to read the SEEPROM. But we make it easier to * add other 93Cx6 functions. */ struct seeprom_cmd { uint8_t len; uint8_t bits[11]; }; /* Short opcodes for the c46 */ static const struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; static const struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; /* Long opcodes for the C56/C66 */ static const struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; static const struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; /* Common opcodes */ static const struct seeprom_cmd seeprom_write = {3, {1, 0, 1}}; static const struct seeprom_cmd seeprom_read = {3, {1, 1, 0}}; /* * Wait for the SEERDY to go high; about 800 ns. */ #define CLOCK_PULSE(sd, rdy) \ while ((SEEPROM_STATUS_INB(sd) & rdy) == 0) { \ ; /* Do nothing */ \ } \ (void)SEEPROM_INB(sd); /* Clear clock */ /* * Send a START condition and the given command */ static void send_seeprom_cmd(struct seeprom_descriptor *sd, const struct seeprom_cmd *cmd) { uint8_t temp; int i = 0; /* Send chip select for one clock cycle. */ temp = sd->sd_MS ^ sd->sd_CS; SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); for (i = 0; i < cmd->len; i++) { if (cmd->bits[i] != 0) temp ^= sd->sd_DO; SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); if (cmd->bits[i] != 0) temp ^= sd->sd_DO; } } /* * Clear CS put the chip in the reset state, where it can wait for new commands. */ static void reset_seeprom(struct seeprom_descriptor *sd) { uint8_t temp; temp = sd->sd_MS; SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); } /* * Read the serial EEPROM and returns 1 if successful and 0 if * not successful. */ int ahc_read_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, u_int start_addr, u_int count) { int i = 0; u_int k = 0; uint16_t v; uint8_t temp; /* * Read the requested registers of the seeprom. The loop * will range from 0 to count-1. */ for (k = start_addr; k < count + start_addr; k++) { /* * Now we're ready to send the read command followed by the * address of the 16-bit register we want to read. */ send_seeprom_cmd(sd, &seeprom_read); /* Send the 6 or 8 bit address (MSB first, LSB last). */ temp = sd->sd_MS ^ sd->sd_CS; for (i = (sd->sd_chip - 1); i >= 0; i--) { if ((k & (1 << i)) != 0) temp ^= sd->sd_DO; SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); if ((k & (1 << i)) != 0) temp ^= sd->sd_DO; } /* * Now read the 16 bit register. An initial 0 precedes the * register contents which begins with bit 15 (MSB) and ends * with bit 0 (LSB). The initial 0 will be shifted off the * top of our word as we let the loop run from 0 to 16. */ v = 0; for (i = 16; i >= 0; i--) { SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); v <<= 1; if (SEEPROM_DATA_INB(sd) & sd->sd_DI) v |= 1; SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); } buf[k - start_addr] = v; /* Reset the chip select for the next command cycle. */ reset_seeprom(sd); } #ifdef AHC_DUMP_EEPROM printk("\nSerial EEPROM:\n\t"); for (k = 0; k < count; k = k + 1) { if (((k % 8) == 0) && (k != 0)) { printk(KERN_CONT "\n\t"); } printk(KERN_CONT " 0x%x", buf[k]); } printk(KERN_CONT "\n"); #endif return (1); } /* * Write the serial EEPROM and return 1 if successful and 0 if * not successful. */ int ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, u_int start_addr, u_int count) { const struct seeprom_cmd *ewen, *ewds; uint16_t v; uint8_t temp; int i, k; /* Place the chip into write-enable mode */ if (sd->sd_chip == C46) { ewen = &seeprom_ewen; ewds = &seeprom_ewds; } else if (sd->sd_chip == C56_66) { ewen = &seeprom_long_ewen; ewds = &seeprom_long_ewds; } else { printk("ahc_write_seeprom: unsupported seeprom type %d\n", sd->sd_chip); return (0); } send_seeprom_cmd(sd, ewen); reset_seeprom(sd); /* Write all requested data out to the seeprom. */ temp = sd->sd_MS ^ sd->sd_CS; for (k = start_addr; k < count + start_addr; k++) { /* Send the write command */ send_seeprom_cmd(sd, &seeprom_write); /* Send the 6 or 8 bit address (MSB first). */ for (i = (sd->sd_chip - 1); i >= 0; i--) { if ((k & (1 << i)) != 0) temp ^= sd->sd_DO; SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); if ((k & (1 << i)) != 0) temp ^= sd->sd_DO; } /* Write the 16 bit value, MSB first */ v = buf[k - start_addr]; for (i = 15; i >= 0; i--) { if ((v & (1 << i)) != 0) temp ^= sd->sd_DO; SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); if ((v & (1 << i)) != 0) temp ^= sd->sd_DO; } /* Wait for the chip to complete the write */ temp = sd->sd_MS; SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); temp = sd->sd_MS ^ sd->sd_CS; do { SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); } while ((SEEPROM_DATA_INB(sd) & sd->sd_DI) == 0); reset_seeprom(sd); } /* Put the chip back into write-protect mode */ send_seeprom_cmd(sd, ewds); reset_seeprom(sd); return (1); } int ahc_verify_cksum(struct seeprom_config *sc) { int i; int maxaddr; uint32_t checksum; uint16_t *scarray; maxaddr = (sizeof(*sc)/2) - 1; checksum = 0; scarray = (uint16_t *)sc; for (i = 0; i < maxaddr; i++) checksum = checksum + scarray[i]; if (checksum == 0 || (checksum & 0xFFFF) != sc->checksum) { return (0); } else { return(1); } }
linux-master
drivers/scsi/aic7xxx/aic7xxx_93cx6.c
/* * Linux driver attachment glue for PCI based controllers. * * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c#47 $ */ #include "aic7xxx_osm.h" #include "aic7xxx_pci.h" /* Define the macro locally since it's different for different class of chips. */ #define ID(x) ID_C(x, PCI_CLASS_STORAGE_SCSI) static const struct pci_device_id ahc_linux_pci_id_table[] = { /* aic7850 based controllers */ ID(ID_AHA_2902_04_10_15_20C_30C), /* aic7860 based controllers */ ID(ID_AHA_2930CU), ID(ID_AHA_1480A & ID_DEV_VENDOR_MASK), ID(ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK), ID(ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK), ID(ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK), /* aic7870 based controllers */ ID(ID_AHA_2940), ID(ID_AHA_3940), ID(ID_AHA_398X), ID(ID_AHA_2944), ID(ID_AHA_3944), ID(ID_AHA_4944), /* aic7880 based controllers */ ID(ID_AHA_2940U & ID_DEV_VENDOR_MASK), ID(ID_AHA_3940U & ID_DEV_VENDOR_MASK), ID(ID_AHA_2944U & ID_DEV_VENDOR_MASK), ID(ID_AHA_3944U & ID_DEV_VENDOR_MASK), ID(ID_AHA_398XU & ID_DEV_VENDOR_MASK), ID(ID_AHA_4944U & ID_DEV_VENDOR_MASK), ID(ID_AHA_2930U & ID_DEV_VENDOR_MASK), ID(ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK), ID(ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK), /* aic7890 based controllers */ ID(ID_AHA_2930U2), ID(ID_AHA_2940U2B), ID(ID_AHA_2940U2_OEM), ID(ID_AHA_2940U2), ID(ID_AHA_2950U2B), ID16(ID_AIC7890_ARO & ID_AIC7895_ARO_MASK), ID(ID_AAA_131U2), /* aic7890 based controllers */ ID(ID_AHA_29160), ID(ID_AHA_29160_CPQ), ID(ID_AHA_29160N), ID(ID_AHA_29160C), ID(ID_AHA_29160B), ID(ID_AHA_19160B), ID(ID_AIC7892_ARO), /* aic7892 based controllers */ ID(ID_AHA_2940U_DUAL), ID(ID_AHA_3940AU), ID(ID_AHA_3944AU), ID(ID_AIC7895_ARO), ID(ID_AHA_3950U2B_0), ID(ID_AHA_3950U2B_1), ID(ID_AHA_3950U2D_0), ID(ID_AHA_3950U2D_1), ID(ID_AIC7896_ARO), /* aic7899 based controllers */ ID(ID_AHA_3960D), ID(ID_AHA_3960D_CPQ), ID(ID_AIC7899_ARO), /* Generic chip probes for devices we don't know exactly. */ ID(ID_AIC7850 & ID_DEV_VENDOR_MASK), ID(ID_AIC7855 & ID_DEV_VENDOR_MASK), ID(ID_AIC7859 & ID_DEV_VENDOR_MASK), ID(ID_AIC7860 & ID_DEV_VENDOR_MASK), ID(ID_AIC7870 & ID_DEV_VENDOR_MASK), ID(ID_AIC7880 & ID_DEV_VENDOR_MASK), ID16(ID_AIC7890 & ID_9005_GENERIC_MASK), ID16(ID_AIC7892 & ID_9005_GENERIC_MASK), ID(ID_AIC7895 & ID_DEV_VENDOR_MASK), ID16(ID_AIC7896 & ID_9005_GENERIC_MASK), ID16(ID_AIC7899 & ID_9005_GENERIC_MASK), ID(ID_AIC7810 & ID_DEV_VENDOR_MASK), ID(ID_AIC7815 & ID_DEV_VENDOR_MASK), { 0 } }; MODULE_DEVICE_TABLE(pci, ahc_linux_pci_id_table); static int __maybe_unused ahc_linux_pci_dev_suspend(struct device *dev) { struct ahc_softc *ahc = dev_get_drvdata(dev); return ahc_suspend(ahc); } static int __maybe_unused ahc_linux_pci_dev_resume(struct device *dev) { struct ahc_softc *ahc = dev_get_drvdata(dev); ahc_pci_resume(ahc); return (ahc_resume(ahc)); } static void ahc_linux_pci_dev_remove(struct pci_dev *pdev) { struct ahc_softc *ahc = pci_get_drvdata(pdev); u_long s; if (ahc->platform_data && ahc->platform_data->host) scsi_remove_host(ahc->platform_data->host); ahc_lock(ahc, &s); ahc_intr_enable(ahc, FALSE); ahc_unlock(ahc, &s); ahc_free(ahc); } static void ahc_linux_pci_inherit_flags(struct ahc_softc *ahc) { struct pci_dev *pdev = ahc->dev_softc, *master_pdev; unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); master_pdev = pci_get_slot(pdev->bus, master_devfn); if (master_pdev) { struct ahc_softc *master = pci_get_drvdata(master_pdev); if (master) { ahc->flags &= ~AHC_BIOS_ENABLED; ahc->flags |= master->flags & AHC_BIOS_ENABLED; ahc->flags &= ~AHC_PRIMARY_CHANNEL; ahc->flags |= master->flags & AHC_PRIMARY_CHANNEL; } else printk(KERN_ERR "aic7xxx: no multichannel peer found!\n"); pci_dev_put(master_pdev); } } static int ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { char buf[80]; const uint64_t mask_39bit = 0x7FFFFFFFFFULL; struct ahc_softc *ahc; ahc_dev_softc_t pci; const struct ahc_pci_identity *entry; char *name; int error; struct device *dev = &pdev->dev; pci = pdev; entry = ahc_find_pci_device(pci); if (entry == NULL) return (-ENODEV); /* * Allocate a softc for this card and * set it up for attachment by our * common detect routine. */ sprintf(buf, "ahc_pci:%d:%d:%d", ahc_get_pci_bus(pci), ahc_get_pci_slot(pci), ahc_get_pci_function(pci)); name = kstrdup(buf, GFP_ATOMIC); if (name == NULL) return (-ENOMEM); ahc = ahc_alloc(NULL, name); if (ahc == NULL) return (-ENOMEM); if (pci_enable_device(pdev)) { ahc_free(ahc); return (-ENODEV); } pci_set_master(pdev); if (sizeof(dma_addr_t) > 4 && ahc->features & AHC_LARGE_SCBS && dma_set_mask(dev, mask_39bit) == 0 && dma_get_required_mask(dev) > DMA_BIT_MASK(32)) { ahc->flags |= AHC_39BIT_ADDRESSING; } else { if (dma_set_mask(dev, DMA_BIT_MASK(32))) { ahc_free(ahc); printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); return (-ENODEV); } } ahc->dev_softc = pci; ahc->dev = &pci->dev; error = ahc_pci_config(ahc, entry); if (error != 0) { ahc_free(ahc); return (-error); } /* * Second Function PCI devices need to inherit some * settings from function 0. */ if ((ahc->features & AHC_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0) ahc_linux_pci_inherit_flags(ahc); pci_set_drvdata(pdev, ahc); ahc_linux_register_host(ahc, &aic7xxx_driver_template); return (0); } /******************************* PCI Routines *********************************/ uint32_t ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width) { switch (width) { case 1: { uint8_t retval; pci_read_config_byte(pci, reg, &retval); return (retval); } case 2: { uint16_t retval; pci_read_config_word(pci, reg, &retval); return (retval); } case 4: { uint32_t retval; pci_read_config_dword(pci, reg, &retval); return (retval); } default: panic("ahc_pci_read_config: Read size too big"); /* NOTREACHED */ return (0); } } void ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width) { switch (width) { case 1: pci_write_config_byte(pci, reg, value); break; case 2: pci_write_config_word(pci, reg, value); break; case 4: pci_write_config_dword(pci, reg, value); break; default: panic("ahc_pci_write_config: Write size too big"); /* NOTREACHED */ } } static SIMPLE_DEV_PM_OPS(ahc_linux_pci_dev_pm_ops, ahc_linux_pci_dev_suspend, ahc_linux_pci_dev_resume); static struct pci_driver aic7xxx_pci_driver = { .name = "aic7xxx", .probe = ahc_linux_pci_dev_probe, .driver.pm = &ahc_linux_pci_dev_pm_ops, .remove = ahc_linux_pci_dev_remove, .id_table = ahc_linux_pci_id_table }; int ahc_linux_pci_init(void) { return pci_register_driver(&aic7xxx_pci_driver); } void ahc_linux_pci_exit(void) { pci_unregister_driver(&aic7xxx_pci_driver); } static int ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, resource_size_t *base) { if (aic7xxx_allow_memio == 0) return (ENOMEM); *base = pci_resource_start(ahc->dev_softc, 0); if (*base == 0) return (ENOMEM); if (!request_region(*base, 256, "aic7xxx")) return (ENOMEM); return (0); } static int ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc, resource_size_t *bus_addr, uint8_t __iomem **maddr) { resource_size_t start; int error; error = 0; start = pci_resource_start(ahc->dev_softc, 1); if (start != 0) { *bus_addr = start; if (!request_mem_region(start, 0x1000, "aic7xxx")) error = ENOMEM; if (error == 0) { *maddr = ioremap(start, 256); if (*maddr == NULL) { error = ENOMEM; release_mem_region(start, 0x1000); } } } else error = ENOMEM; return (error); } int ahc_pci_map_registers(struct ahc_softc *ahc) { uint32_t command; resource_size_t base; uint8_t __iomem *maddr; int error; /* * If its allowed, we prefer memory mapped access. */ command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, 4); command &= ~(PCIM_CMD_PORTEN|PCIM_CMD_MEMEN); base = 0; maddr = NULL; error = ahc_linux_pci_reserve_mem_region(ahc, &base, &maddr); if (error == 0) { ahc->platform_data->mem_busaddr = base; ahc->tag = BUS_SPACE_MEMIO; ahc->bsh.maddr = maddr; ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command | PCIM_CMD_MEMEN, 4); /* * Do a quick test to see if memory mapped * I/O is functioning correctly. */ if (ahc_pci_test_register_access(ahc) != 0) { printk("aic7xxx: PCI Device %d:%d:%d " "failed memory mapped test. Using PIO.\n", ahc_get_pci_bus(ahc->dev_softc), ahc_get_pci_slot(ahc->dev_softc), ahc_get_pci_function(ahc->dev_softc)); iounmap(maddr); release_mem_region(ahc->platform_data->mem_busaddr, 0x1000); ahc->bsh.maddr = NULL; maddr = NULL; } else command |= PCIM_CMD_MEMEN; } else { printk("aic7xxx: PCI%d:%d:%d MEM region 0x%llx " "unavailable. Cannot memory map device.\n", ahc_get_pci_bus(ahc->dev_softc), ahc_get_pci_slot(ahc->dev_softc), ahc_get_pci_function(ahc->dev_softc), (unsigned long long)base); } /* * We always prefer memory mapped access. */ if (maddr == NULL) { error = ahc_linux_pci_reserve_io_region(ahc, &base); if (error == 0) { ahc->tag = BUS_SPACE_PIO; ahc->bsh.ioport = (u_long)base; command |= PCIM_CMD_PORTEN; } else { printk("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] " "unavailable. Cannot map device.\n", ahc_get_pci_bus(ahc->dev_softc), ahc_get_pci_slot(ahc->dev_softc), ahc_get_pci_function(ahc->dev_softc), (unsigned long long)base); } } ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4); return (error); } int ahc_pci_map_int(struct ahc_softc *ahc) { int error; error = request_irq(ahc->dev_softc->irq, ahc_linux_isr, IRQF_SHARED, "aic7xxx", ahc); if (error == 0) ahc->platform_data->irq = ahc->dev_softc->irq; return (-error); }
linux-master
drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
/* * Product specific probe and attach routines for: * 27/284X and aic7770 motherboard SCSI controllers * * Copyright (c) 1994-1998, 2000, 2001 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic7770.c#32 $ * * $FreeBSD$ */ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aic7xxx_93cx6.h" #define ID_AIC7770 0x04907770 #define ID_AHA_274x 0x04907771 #define ID_AHA_284xB 0x04907756 /* BIOS enabled */ #define ID_AHA_284x 0x04907757 /* BIOS disabled*/ #define ID_OLV_274x 0x04907782 /* Olivetti OEM */ #define ID_OLV_274xD 0x04907783 /* Olivetti OEM (Differential) */ static int aic7770_chip_init(struct ahc_softc *ahc); static int aha2840_load_seeprom(struct ahc_softc *ahc); static ahc_device_setup_t ahc_aic7770_VL_setup; static ahc_device_setup_t ahc_aic7770_EISA_setup; static ahc_device_setup_t ahc_aic7770_setup; struct aic7770_identity aic7770_ident_table[] = { { ID_AHA_274x, 0xFFFFFFFF, "Adaptec 274X SCSI adapter", ahc_aic7770_EISA_setup }, { ID_AHA_284xB, 0xFFFFFFFE, "Adaptec 284X SCSI adapter", ahc_aic7770_VL_setup }, { ID_AHA_284x, 0xFFFFFFFE, "Adaptec 284X SCSI adapter (BIOS Disabled)", ahc_aic7770_VL_setup }, { ID_OLV_274x, 0xFFFFFFFF, "Adaptec (Olivetti OEM) 274X SCSI adapter", ahc_aic7770_EISA_setup }, { ID_OLV_274xD, 0xFFFFFFFF, "Adaptec (Olivetti OEM) 274X Differential SCSI adapter", ahc_aic7770_EISA_setup }, /* Generic chip probes for devices we don't know 'exactly' */ { ID_AIC7770, 0xFFFFFFFF, "Adaptec aic7770 SCSI adapter", ahc_aic7770_EISA_setup } }; const int ahc_num_aic7770_devs = ARRAY_SIZE(aic7770_ident_table); struct aic7770_identity * aic7770_find_device(uint32_t id) { struct aic7770_identity *entry; int i; for (i = 0; i < ahc_num_aic7770_devs; i++) { entry = &aic7770_ident_table[i]; if (entry->full_id == (id & entry->id_mask)) return (entry); } return (NULL); } int aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io) { int error; int have_seeprom; u_int hostconf; u_int irq; u_int intdef; error = entry->setup(ahc); have_seeprom = 0; if (error != 0) return (error); error = aic7770_map_registers(ahc, io); if (error != 0) return (error); /* * Before we continue probing the card, ensure that * its interrupts are *disabled*. We don't want * a misstep to hang the machine in an interrupt * storm. */ ahc_intr_enable(ahc, FALSE); ahc->description = entry->name; error = ahc_softc_init(ahc); if (error != 0) return (error); ahc->bus_chip_init = aic7770_chip_init; error = ahc_reset(ahc, /*reinit*/FALSE); if (error != 0) return (error); /* Make sure we have a valid interrupt vector */ intdef = ahc_inb(ahc, INTDEF); irq = intdef & VECTOR; switch (irq) { case 9: case 10: case 11: case 12: case 14: case 15: break; default: printk("aic7770_config: invalid irq setting %d\n", intdef); return (ENXIO); } if ((intdef & EDGE_TRIG) != 0) ahc->flags |= AHC_EDGE_INTERRUPT; switch (ahc->chip & (AHC_EISA|AHC_VL)) { case AHC_EISA: { u_int biosctrl; u_int scsiconf; u_int scsiconf1; biosctrl = ahc_inb(ahc, HA_274_BIOSCTRL); scsiconf = ahc_inb(ahc, SCSICONF); scsiconf1 = ahc_inb(ahc, SCSICONF + 1); /* Get the primary channel information */ if ((biosctrl & CHANNEL_B_PRIMARY) != 0) ahc->flags |= 1; if ((biosctrl & BIOSMODE) == BIOSDISABLED) { ahc->flags |= AHC_USEDEFAULTS; } else { if ((ahc->features & AHC_WIDE) != 0) { ahc->our_id = scsiconf1 & HWSCSIID; if (scsiconf & TERM_ENB) ahc->flags |= AHC_TERM_ENB_A; } else { ahc->our_id = scsiconf & HSCSIID; ahc->our_id_b = scsiconf1 & HSCSIID; if (scsiconf & TERM_ENB) ahc->flags |= AHC_TERM_ENB_A; if (scsiconf1 & TERM_ENB) ahc->flags |= AHC_TERM_ENB_B; } } if ((ahc_inb(ahc, HA_274_BIOSGLOBAL) & HA_274_EXTENDED_TRANS)) ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B; break; } case AHC_VL: { have_seeprom = aha2840_load_seeprom(ahc); break; } default: break; } if (have_seeprom == 0) { kfree(ahc->seep_config); ahc->seep_config = NULL; } /* * Ensure autoflush is enabled */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~AUTOFLUSHDIS); /* Setup the FIFO threshold and the bus off time */ hostconf = ahc_inb(ahc, HOSTCONF); ahc_outb(ahc, BUSSPD, hostconf & DFTHRSH); ahc_outb(ahc, BUSTIME, (hostconf << 2) & BOFF); ahc->bus_softc.aic7770_softc.busspd = hostconf & DFTHRSH; ahc->bus_softc.aic7770_softc.bustime = (hostconf << 2) & BOFF; /* * Generic aic7xxx initialization. */ error = ahc_init(ahc); if (error != 0) return (error); error = aic7770_map_int(ahc, irq); if (error != 0) return (error); ahc->init_level++; /* * Enable the board's BUS drivers */ ahc_outb(ahc, BCTL, ENABLE); return (0); } static int aic7770_chip_init(struct ahc_softc *ahc) { ahc_outb(ahc, BUSSPD, ahc->bus_softc.aic7770_softc.busspd); ahc_outb(ahc, BUSTIME, ahc->bus_softc.aic7770_softc.bustime); ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~AUTOFLUSHDIS); ahc_outb(ahc, BCTL, ENABLE); return (ahc_chip_init(ahc)); } /* * Read the 284x SEEPROM. */ static int aha2840_load_seeprom(struct ahc_softc *ahc) { struct seeprom_descriptor sd; struct seeprom_config *sc; int have_seeprom; uint8_t scsi_conf; sd.sd_ahc = ahc; sd.sd_control_offset = SEECTL_2840; sd.sd_status_offset = STATUS_2840; sd.sd_dataout_offset = STATUS_2840; sd.sd_chip = C46; sd.sd_MS = 0; sd.sd_RDY = EEPROM_TF; sd.sd_CS = CS_2840; sd.sd_CK = CK_2840; sd.sd_DO = DO_2840; sd.sd_DI = DI_2840; sc = ahc->seep_config; if (bootverbose) printk("%s: Reading SEEPROM...", ahc_name(ahc)); have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc, /*start_addr*/0, sizeof(*sc)/2); if (have_seeprom) { if (ahc_verify_cksum(sc) == 0) { if(bootverbose) printk ("checksum error\n"); have_seeprom = 0; } else if (bootverbose) { printk("done.\n"); } } if (!have_seeprom) { if (bootverbose) printk("%s: No SEEPROM available\n", ahc_name(ahc)); ahc->flags |= AHC_USEDEFAULTS; } else { /* * Put the data we've collected down into SRAM * where ahc_init will find it. */ int i; int max_targ; uint16_t discenable; max_targ = (ahc->features & AHC_WIDE) != 0 ? 16 : 8; discenable = 0; for (i = 0; i < max_targ; i++){ uint8_t target_settings; target_settings = (sc->device_flags[i] & CFXFER) << 4; if (sc->device_flags[i] & CFSYNCH) target_settings |= SOFS; if (sc->device_flags[i] & CFWIDEB) target_settings |= WIDEXFER; if (sc->device_flags[i] & CFDISC) discenable |= (0x01 << i); ahc_outb(ahc, TARG_SCSIRATE + i, target_settings); } ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff)); ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff)); ahc->our_id = sc->brtime_id & CFSCSIID; scsi_conf = (ahc->our_id & 0x7); if (sc->adapter_control & CFSPARITY) scsi_conf |= ENSPCHK; if (sc->adapter_control & CFRESETB) scsi_conf |= RESET_SCSI; if (sc->bios_control & CF284XEXTEND) ahc->flags |= AHC_EXTENDED_TRANS_A; /* Set SCSICONF info */ ahc_outb(ahc, SCSICONF, scsi_conf); if (sc->adapter_control & CF284XSTERM) ahc->flags |= AHC_TERM_ENB_A; } return (have_seeprom); } static int ahc_aic7770_VL_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7770_setup(ahc); ahc->chip |= AHC_VL; return (error); } static int ahc_aic7770_EISA_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7770_setup(ahc); ahc->chip |= AHC_EISA; return (error); } static int ahc_aic7770_setup(struct ahc_softc *ahc) { ahc->channel = 'A'; ahc->channel_b = 'B'; ahc->chip = AHC_AIC7770; ahc->features = AHC_AIC7770_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG; ahc->flags |= AHC_PAGESCBS; ahc->instruction_ram_size = 448; return (0); }
linux-master
drivers/scsi/aic7xxx/aic7770.c
/* * Adaptec AIC7xxx device driver for Linux. * * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.c#235 $ * * Copyright (c) 1994 John Aycock * The University of Calgary Department of Computer Science. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide, * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux, * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual, * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the * ANSI SCSI-2 specification (draft 10c), ... * * -------------------------------------------------------------------------- * * Modifications by Daniel M. Eischen ([email protected]): * * Substantially modified to include support for wide and twin bus * adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes, * SCB paging, and other rework of the code. * * -------------------------------------------------------------------------- * Copyright (c) 1994-2000 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * *--------------------------------------------------------------------------- * * Thanks also go to (in alphabetical order) the following: * * Rory Bolt - Sequencer bug fixes * Jay Estabrook - Initial DEC Alpha support * Doug Ledford - Much needed abort/reset bug fixes * Kai Makisara - DMAing of SCBs * * A Boot time option was also added for not resetting the scsi bus. * * Form: aic7xxx=extended * aic7xxx=no_reset * aic7xxx=verbose * * Daniel M. Eischen, [email protected], 1/23/97 * * Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp */ /* * Further driver modifications made by Doug Ledford <[email protected]> * * Copyright (c) 1997-1999 Doug Ledford * * These changes are released under the same licensing terms as the FreeBSD * driver written by Justin Gibbs. Please see his Copyright notice above * for the exact terms and conditions covering my changes as well as the * warranty statement. * * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include * but are not limited to: * * 1: Import of the latest FreeBSD sequencer code for this driver * 2: Modification of kernel code to accommodate different sequencer semantics * 3: Extensive changes throughout kernel portion of driver to improve * abort/reset processing and error hanndling * 4: Other work contributed by various people on the Internet * 5: Changes to printk information and verbosity selection code * 6: General reliability related changes, especially in IRQ management * 7: Modifications to the default probe/attach order for supported cards * 8: SMP friendliness has been improved * */ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include <scsi/scsicam.h> static struct scsi_transport_template *ahc_linux_transport_template = NULL; #include <linux/init.h> /* __setup */ #include <linux/mm.h> /* For fetching system memory size */ #include <linux/blkdev.h> /* For block_size() */ #include <linux/delay.h> /* For ssleep/msleep */ #include <linux/slab.h> /* * Set this to the delay in seconds after SCSI bus reset. * Note, we honor this only for the initial bus reset. * The scsi error recovery code performs its own bus settle * delay handling for error recovery actions. */ #ifdef CONFIG_AIC7XXX_RESET_DELAY_MS #define AIC7XXX_RESET_DELAY CONFIG_AIC7XXX_RESET_DELAY_MS #else #define AIC7XXX_RESET_DELAY 5000 #endif /* * To change the default number of tagged transactions allowed per-device, * add a line to the lilo.conf file like: * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}" * which will result in the first four devices on the first two * controllers being set to a tagged queue depth of 32. * * The tag_commands is an array of 16 to allow for wide and twin adapters. * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15 * for channel 1. */ typedef struct { uint8_t tag_commands[16]; /* Allow for wide/twin adapters. */ } adapter_tag_info_t; /* * Modify this as you see fit for your system. * * 0 tagged queuing disabled * 1 <= n <= 253 n == max tags ever dispatched. * * The driver will throttle the number of commands dispatched to a * device if it returns queue full. For devices with a fixed maximum * queue depth, the driver will eventually determine this depth and * lock it in (a console message is printed to indicate that a lock * has occurred). On some devices, queue full is returned for a temporary * resource shortage. These devices will return queue full at varying * depths. The driver will throttle back when the queue fulls occur and * attempt to slowly increase the depth over time as the device recovers * from the resource shortage. * * In this example, the first line will disable tagged queueing for all * the devices on the first probed aic7xxx adapter. * * The second line enables tagged queueing with 4 commands/LUN for IDs * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the * driver to attempt to use up to 64 tags for ID 1. * * The third line is the same as the first line. * * The fourth line disables tagged queueing for devices 0 and 3. It * enables tagged queueing for the other IDs, with 16 commands/LUN * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for * IDs 2, 5-7, and 9-15. */ /* * NOTE: The below structure is for reference only, the actual structure * to modify in order to change things is just below this comment block. adapter_tag_info_t aic7xxx_tag_info[] = { {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}}, {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}} }; */ #ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE #define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE #else #define AIC7XXX_CMDS_PER_DEVICE AHC_MAX_QUEUE #endif #define AIC7XXX_CONFIGED_TAG_COMMANDS { \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \ AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE \ } /* * By default, use the number of commands specified by * the users kernel configuration. */ static adapter_tag_info_t aic7xxx_tag_info[] = { {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS}, {AIC7XXX_CONFIGED_TAG_COMMANDS} }; /* * There should be a specific return value for this in scsi.h, but * it seems that most drivers ignore it. */ #define DID_UNDERFLOW DID_ERROR void ahc_print_path(struct ahc_softc *ahc, struct scb *scb) { printk("(scsi%d:%c:%d:%d): ", ahc->platform_data->host->host_no, scb != NULL ? SCB_GET_CHANNEL(ahc, scb) : 'X', scb != NULL ? SCB_GET_TARGET(ahc, scb) : -1, scb != NULL ? SCB_GET_LUN(scb) : -1); } /* * XXX - these options apply unilaterally to _all_ 274x/284x/294x * cards in the system. This should be fixed. Exceptions to this * rule are noted in the comments. */ /* * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This * has no effect on any later resets that might occur due to things like * SCSI bus timeouts. */ static uint32_t aic7xxx_no_reset; /* * Should we force EXTENDED translation on a controller. * 0 == Use whatever is in the SEEPROM or default to off * 1 == Use whatever is in the SEEPROM or default to on */ static uint32_t aic7xxx_extended; /* * PCI bus parity checking of the Adaptec controllers. This is somewhat * dubious at best. To my knowledge, this option has never actually * solved a PCI parity problem, but on certain machines with broken PCI * chipset configurations where stray PCI transactions with bad parity are * the norm rather than the exception, the error messages can be overwhelming. * It's included in the driver for completeness. * 0 = Shut off PCI parity check * non-0 = reverse polarity pci parity checking */ static uint32_t aic7xxx_pci_parity = ~0; /* * There are lots of broken chipsets in the world. Some of them will * violate the PCI spec when we issue byte sized memory writes to our * controller. I/O mapped register access, if allowed by the given * platform, will work in almost all cases. */ uint32_t aic7xxx_allow_memio = ~0; /* * So that we can set how long each device is given as a selection timeout. * The table of values goes like this: * 0 - 256ms * 1 - 128ms * 2 - 64ms * 3 - 32ms * We default to 256ms because some older devices need a longer time * to respond to initial selection. */ static uint32_t aic7xxx_seltime; /* * Certain devices do not perform any aging on commands. Should the * device be saturated by commands in one portion of the disk, it is * possible for transactions on far away sectors to never be serviced. * To handle these devices, we can periodically send an ordered tag to * force all outstanding transactions to be serviced prior to a new * transaction. */ static uint32_t aic7xxx_periodic_otag; /* * Module information and settable options. */ static char *aic7xxx = NULL; MODULE_AUTHOR("Maintainer: Hannes Reinecke <[email protected]>"); MODULE_DESCRIPTION("Adaptec AIC77XX/78XX SCSI Host Bus Adapter driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(AIC7XXX_DRIVER_VERSION); module_param(aic7xxx, charp, 0444); MODULE_PARM_DESC(aic7xxx, "period-delimited options string:\n" " verbose Enable verbose/diagnostic logging\n" " allow_memio Allow device registers to be memory mapped\n" " debug Bitmask of debug values to enable\n" " no_probe Toggle EISA/VLB controller probing\n" " probe_eisa_vl Toggle EISA/VLB controller probing\n" " no_reset Suppress initial bus resets\n" " extended Enable extended geometry on all controllers\n" " periodic_otag Send an ordered tagged transaction\n" " periodically to prevent tag starvation.\n" " This may be required by some older disk\n" " drives or RAID arrays.\n" " tag_info:<tag_str> Set per-target tag depth\n" " global_tag_depth:<int> Global tag depth for every target\n" " on every bus\n" " seltime:<int> Selection Timeout\n" " (0/256ms,1/128ms,2/64ms,3/32ms)\n" "\n" " Sample modprobe configuration file:\n" " # Toggle EISA/VLB probing\n" " # Set tag depth on Controller 1/Target 1 to 10 tags\n" " # Shorten the selection timeout to 128ms\n" "\n" " options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n" ); static void ahc_linux_handle_scsi_status(struct ahc_softc *, struct scsi_device *, struct scb *); static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd); static void ahc_linux_freeze_simq(struct ahc_softc *ahc); static void ahc_linux_release_simq(struct ahc_softc *ahc); static int ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag); static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc); static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_linux_device_queue_depth(struct scsi_device *); static int ahc_linux_run_command(struct ahc_softc*, struct ahc_linux_device *, struct scsi_cmnd *); static void ahc_linux_setup_tag_info_global(char *p); static int aic7xxx_setup(char *s); static int ahc_linux_unit; /************************** OS Utility Wrappers *******************************/ void ahc_delay(long usec) { /* * udelay on Linux can have problems for * multi-millisecond waits. Wait at most * 1024us per call. */ while (usec > 0) { udelay(usec % 1024); usec -= 1024; } } /***************************** Low Level I/O **********************************/ uint8_t ahc_inb(struct ahc_softc * ahc, long port) { uint8_t x; if (ahc->tag == BUS_SPACE_MEMIO) { x = readb(ahc->bsh.maddr + port); } else { x = inb(ahc->bsh.ioport + port); } mb(); return (x); } void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val) { if (ahc->tag == BUS_SPACE_MEMIO) { writeb(val, ahc->bsh.maddr + port); } else { outb(val, ahc->bsh.ioport + port); } mb(); } void ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count) { int i; /* * There is probably a more efficient way to do this on Linux * but we don't use this for anything speed critical and this * should work. */ for (i = 0; i < count; i++) ahc_outb(ahc, port, *array++); } void ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count) { int i; /* * There is probably a more efficient way to do this on Linux * but we don't use this for anything speed critical and this * should work. */ for (i = 0; i < count; i++) *array++ = ahc_inb(ahc, port); } /********************************* Inlines ************************************/ static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*); static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len); static void ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb) { struct scsi_cmnd *cmd; cmd = scb->io_ctx; ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE); scsi_dma_unmap(cmd); } static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len) { int consumed; if ((scb->sg_count + 1) > AHC_NSEG) panic("Too few segs for dma mapping. " "Increase AHC_NSEG\n"); consumed = 1; sg->addr = ahc_htole32(addr & 0xFFFFFFFF); scb->platform_data->xfer_len += len; if (sizeof(dma_addr_t) > 4 && (ahc->flags & AHC_39BIT_ADDRESSING) != 0) len |= (addr >> 8) & AHC_SG_HIGH_ADDR_MASK; sg->len = ahc_htole32(len); return (consumed); } /* * Return a string describing the driver. */ static const char * ahc_linux_info(struct Scsi_Host *host) { static char buffer[512]; char ahc_info[256]; char *bp; struct ahc_softc *ahc; bp = &buffer[0]; ahc = *(struct ahc_softc **)host->hostdata; memset(bp, 0, sizeof(buffer)); strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev " AIC7XXX_DRIVER_VERSION "\n" " <"); strcat(bp, ahc->description); strcat(bp, ">\n" " "); ahc_controller_info(ahc, ahc_info); strcat(bp, ahc_info); strcat(bp, "\n"); return (bp); } /* * Queue an SCB to the controller. */ static int ahc_linux_queue_lck(struct scsi_cmnd *cmd) { struct ahc_softc *ahc; struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device); int rtn = SCSI_MLQUEUE_HOST_BUSY; unsigned long flags; ahc = *(struct ahc_softc **)cmd->device->host->hostdata; ahc_lock(ahc, &flags); if (ahc->platform_data->qfrozen == 0) { cmd->result = CAM_REQ_INPROG << 16; rtn = ahc_linux_run_command(ahc, dev, cmd); } ahc_unlock(ahc, &flags); return rtn; } static DEF_SCSI_QCMD(ahc_linux_queue) static inline struct scsi_target ** ahc_linux_target_in_softc(struct scsi_target *starget) { struct ahc_softc *ahc = *((struct ahc_softc **)dev_to_shost(&starget->dev)->hostdata); unsigned int target_offset; target_offset = starget->id; if (starget->channel != 0) target_offset += 8; return &ahc->platform_data->starget[target_offset]; } static int ahc_linux_target_alloc(struct scsi_target *starget) { struct ahc_softc *ahc = *((struct ahc_softc **)dev_to_shost(&starget->dev)->hostdata); struct seeprom_config *sc = ahc->seep_config; unsigned long flags; struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget); unsigned short scsirate; struct ahc_devinfo devinfo; char channel = starget->channel + 'A'; unsigned int our_id = ahc->our_id; unsigned int target_offset; target_offset = starget->id; if (starget->channel != 0) target_offset += 8; if (starget->channel) our_id = ahc->our_id_b; ahc_lock(ahc, &flags); BUG_ON(*ahc_targp != NULL); *ahc_targp = starget; if (sc) { int maxsync = AHC_SYNCRATE_DT; int ultra = 0; int flags = sc->device_flags[target_offset]; if (ahc->flags & AHC_NEWEEPROM_FMT) { if (flags & CFSYNCHISULTRA) ultra = 1; } else if (flags & CFULTRAEN) ultra = 1; /* AIC nutcase; 10MHz appears as ultra = 1, CFXFER = 0x04 * change it to ultra=0, CFXFER = 0 */ if(ultra && (flags & CFXFER) == 0x04) { ultra = 0; flags &= ~CFXFER; } if ((ahc->features & AHC_ULTRA2) != 0) { scsirate = (flags & CFXFER) | (ultra ? 0x8 : 0); } else { scsirate = (flags & CFXFER) << 4; maxsync = ultra ? AHC_SYNCRATE_ULTRA : AHC_SYNCRATE_FAST; } spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0; if (!(flags & CFSYNCH)) spi_max_offset(starget) = 0; spi_min_period(starget) = ahc_find_period(ahc, scsirate, maxsync); } ahc_compile_devinfo(&devinfo, our_id, starget->id, CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0, AHC_TRANS_GOAL, /*paused*/FALSE); ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_GOAL, /*paused*/FALSE); ahc_unlock(ahc, &flags); return 0; } static void ahc_linux_target_destroy(struct scsi_target *starget) { struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget); *ahc_targp = NULL; } static int ahc_linux_slave_alloc(struct scsi_device *sdev) { struct ahc_softc *ahc = *((struct ahc_softc **)sdev->host->hostdata); struct scsi_target *starget = sdev->sdev_target; struct ahc_linux_device *dev; if (bootverbose) printk("%s: Slave Alloc %d\n", ahc_name(ahc), sdev->id); dev = scsi_transport_device_data(sdev); memset(dev, 0, sizeof(*dev)); /* * We start out life using untagged * transactions of which we allow one. */ dev->openings = 1; /* * Set maxtags to 0. This will be changed if we * later determine that we are dealing with * a tagged queuing capable device. */ dev->maxtags = 0; spi_period(starget) = 0; return 0; } static int ahc_linux_slave_configure(struct scsi_device *sdev) { if (bootverbose) sdev_printk(KERN_INFO, sdev, "Slave Configure\n"); ahc_linux_device_queue_depth(sdev); /* Initial Domain Validation */ if (!spi_initial_dv(sdev->sdev_target)) spi_dv_device(sdev); return 0; } #if defined(__i386__) /* * Return the disk geometry for the given SCSI device. */ static int ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads; int sectors; int cylinders; int extended; struct ahc_softc *ahc; u_int channel; ahc = *((struct ahc_softc **)sdev->host->hostdata); channel = sdev_channel(sdev); if (scsi_partsize(bdev, capacity, geom)) return 0; heads = 64; sectors = 32; cylinders = aic_sector_div(capacity, heads, sectors); if (aic7xxx_extended != 0) extended = 1; else if (channel == 0) extended = (ahc->flags & AHC_EXTENDED_TRANS_A) != 0; else extended = (ahc->flags & AHC_EXTENDED_TRANS_B) != 0; if (extended && cylinders >= 1024) { heads = 255; sectors = 63; cylinders = aic_sector_div(capacity, heads, sectors); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return (0); } #endif /* * Abort the current SCSI command(s). */ static int ahc_linux_abort(struct scsi_cmnd *cmd) { int error; error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT); if (error != SUCCESS) printk("aic7xxx_abort returns 0x%x\n", error); return (error); } /* * Attempt to send a target reset message to the device that timed out. */ static int ahc_linux_dev_reset(struct scsi_cmnd *cmd) { int error; error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET); if (error != SUCCESS) printk("aic7xxx_dev_reset returns 0x%x\n", error); return (error); } /* * Reset the SCSI bus. */ static int ahc_linux_bus_reset(struct scsi_cmnd *cmd) { struct ahc_softc *ahc; int found; unsigned long flags; ahc = *(struct ahc_softc **)cmd->device->host->hostdata; ahc_lock(ahc, &flags); found = ahc_reset_channel(ahc, scmd_channel(cmd) + 'A', /*initiate reset*/TRUE); ahc_unlock(ahc, &flags); if (bootverbose) printk("%s: SCSI bus reset delivered. " "%d SCBs aborted.\n", ahc_name(ahc), found); return SUCCESS; } struct scsi_host_template aic7xxx_driver_template = { .module = THIS_MODULE, .name = "aic7xxx", .proc_name = "aic7xxx", .show_info = ahc_linux_show_info, .write_info = ahc_proc_write_seeprom, .info = ahc_linux_info, .queuecommand = ahc_linux_queue, .eh_abort_handler = ahc_linux_abort, .eh_device_reset_handler = ahc_linux_dev_reset, .eh_bus_reset_handler = ahc_linux_bus_reset, #if defined(__i386__) .bios_param = ahc_linux_biosparam, #endif .can_queue = AHC_MAX_QUEUE, .this_id = -1, .max_sectors = 8192, .cmd_per_lun = 2, .slave_alloc = ahc_linux_slave_alloc, .slave_configure = ahc_linux_slave_configure, .target_alloc = ahc_linux_target_alloc, .target_destroy = ahc_linux_target_destroy, }; /**************************** Tasklet Handler *********************************/ /******************************** Macros **************************************/ #define BUILD_SCSIID(ahc, cmd) \ ((((cmd)->device->id << TID_SHIFT) & TID) \ | (((cmd)->device->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \ | (((cmd)->device->channel == 0) ? 0 : TWIN_CHNLB)) /******************************** Bus DMA *************************************/ int ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent, bus_size_t alignment, bus_size_t boundary, dma_addr_t lowaddr, dma_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag) { bus_dma_tag_t dmat; dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC); if (dmat == NULL) return (ENOMEM); /* * Linux is very simplistic about DMA memory. For now don't * maintain all specification information. Once Linux supplies * better facilities for doing these operations, or the * needs of this particular driver change, we might need to do * more here. */ dmat->alignment = alignment; dmat->boundary = boundary; dmat->maxsize = maxsize; *ret_tag = dmat; return (0); } void ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat) { kfree(dmat); } int ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { /* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */ *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC); if (*vaddr == NULL) return ENOMEM; return 0; } void ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map) { dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map); } int ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb, void *cb_arg, int flags) { /* * Assume for now that this will only be used during * initialization and not for per-transaction buffer mapping. */ bus_dma_segment_t stack_sg; stack_sg.ds_addr = map; stack_sg.ds_len = dmat->maxsize; cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0); return (0); } void ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map) { } int ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map) { /* Nothing to do */ return (0); } static void ahc_linux_setup_tag_info_global(char *p) { int tags, i, j; tags = simple_strtoul(p + 1, NULL, 0) & 0xff; printk("Setting Global Tags= %d\n", tags); for (i = 0; i < ARRAY_SIZE(aic7xxx_tag_info); i++) { for (j = 0; j < AHC_NUM_TARGETS; j++) { aic7xxx_tag_info[i].tag_commands[j] = tags; } } } static void ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value) { if ((instance >= 0) && (targ >= 0) && (instance < ARRAY_SIZE(aic7xxx_tag_info)) && (targ < AHC_NUM_TARGETS)) { aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff; if (bootverbose) printk("tag_info[%d:%d] = %d\n", instance, targ, value); } } static char * ahc_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth, void (*callback)(u_long, int, int, int32_t), u_long callback_arg) { char *tok_end; char *tok_end2; int i; int instance; int targ; int done; char tok_list[] = {'.', ',', '{', '}', '\0'}; /* All options use a ':' name/arg separator */ if (*opt_arg != ':') return (opt_arg); opt_arg++; instance = -1; targ = -1; done = FALSE; /* * Restore separator that may be in * the middle of our option argument. */ tok_end = strchr(opt_arg, '\0'); if (tok_end < end) *tok_end = ','; while (!done) { switch (*opt_arg) { case '{': if (instance == -1) { instance = 0; } else { if (depth > 1) { if (targ == -1) targ = 0; } else { printk("Malformed Option %s\n", opt_name); done = TRUE; } } opt_arg++; break; case '}': if (targ != -1) targ = -1; else if (instance != -1) instance = -1; opt_arg++; break; case ',': case '.': if (instance == -1) done = TRUE; else if (targ >= 0) targ++; else if (instance >= 0) instance++; opt_arg++; break; case '\0': done = TRUE; break; default: tok_end = end; for (i = 0; tok_list[i]; i++) { tok_end2 = strchr(opt_arg, tok_list[i]); if ((tok_end2) && (tok_end2 < tok_end)) tok_end = tok_end2; } callback(callback_arg, instance, targ, simple_strtol(opt_arg, NULL, 0)); opt_arg = tok_end; break; } } return (opt_arg); } /* * Handle Linux boot parameters. This routine allows for assigning a value * to a parameter with a ':' between the parameter and the value. * ie. aic7xxx=stpwlev:1,extended */ static int aic7xxx_setup(char *s) { int i, n; char *p; char *end; static const struct { const char *name; uint32_t *flag; } options[] = { { "extended", &aic7xxx_extended }, { "no_reset", &aic7xxx_no_reset }, { "verbose", &aic7xxx_verbose }, { "allow_memio", &aic7xxx_allow_memio}, #ifdef AHC_DEBUG { "debug", &ahc_debug }, #endif { "periodic_otag", &aic7xxx_periodic_otag }, { "pci_parity", &aic7xxx_pci_parity }, { "seltime", &aic7xxx_seltime }, { "tag_info", NULL }, { "global_tag_depth", NULL }, { "dv", NULL } }; end = strchr(s, '\0'); /* * XXX ia64 gcc isn't smart enough to know that ARRAY_SIZE * will never be 0 in this case. */ n = 0; while ((p = strsep(&s, ",.")) != NULL) { if (*p == '\0') continue; for (i = 0; i < ARRAY_SIZE(options); i++) { n = strlen(options[i].name); if (strncmp(options[i].name, p, n) == 0) break; } if (i == ARRAY_SIZE(options)) continue; if (strncmp(p, "global_tag_depth", n) == 0) { ahc_linux_setup_tag_info_global(p + n); } else if (strncmp(p, "tag_info", n) == 0) { s = ahc_parse_brace_option("tag_info", p + n, end, 2, ahc_linux_setup_tag_info, 0); } else if (p[n] == ':') { *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); } else if (strncmp(p, "verbose", n) == 0) { *(options[i].flag) = 1; } else { *(options[i].flag) ^= 0xFFFFFFFF; } } return 1; } __setup("aic7xxx=", aic7xxx_setup); uint32_t aic7xxx_verbose; int ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *template) { char buf[80]; struct Scsi_Host *host; char *new_name; u_long s; int retval; template->name = ahc->description; host = scsi_host_alloc(template, sizeof(struct ahc_softc *)); if (host == NULL) return (ENOMEM); *((struct ahc_softc **)host->hostdata) = ahc; ahc->platform_data->host = host; host->can_queue = AHC_MAX_QUEUE; host->cmd_per_lun = 2; /* XXX No way to communicate the ID for multiple channels */ host->this_id = ahc->our_id; host->irq = ahc->platform_data->irq; host->max_id = (ahc->features & AHC_WIDE) ? 16 : 8; host->max_lun = AHC_NUM_LUNS; host->max_channel = (ahc->features & AHC_TWIN) ? 1 : 0; host->sg_tablesize = AHC_NSEG; ahc_lock(ahc, &s); ahc_set_unit(ahc, ahc_linux_unit++); ahc_unlock(ahc, &s); sprintf(buf, "scsi%d", host->host_no); new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC); if (new_name != NULL) { strcpy(new_name, buf); ahc_set_name(ahc, new_name); } host->unique_id = ahc->unit; ahc_linux_initialize_scsi_bus(ahc); ahc_intr_enable(ahc, TRUE); host->transportt = ahc_linux_transport_template; retval = scsi_add_host(host, ahc->dev); if (retval) { printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n"); scsi_host_put(host); return retval; } scsi_scan_host(host); return 0; } /* * Place the SCSI bus into a known state by either resetting it, * or forcing transfer negotiations on the next command to any * target. */ static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc) { int i; int numtarg; unsigned long s; i = 0; numtarg = 0; ahc_lock(ahc, &s); if (aic7xxx_no_reset != 0) ahc->flags &= ~(AHC_RESET_BUS_A|AHC_RESET_BUS_B); if ((ahc->flags & AHC_RESET_BUS_A) != 0) ahc_reset_channel(ahc, 'A', /*initiate_reset*/TRUE); else numtarg = (ahc->features & AHC_WIDE) ? 16 : 8; if ((ahc->features & AHC_TWIN) != 0) { if ((ahc->flags & AHC_RESET_BUS_B) != 0) { ahc_reset_channel(ahc, 'B', /*initiate_reset*/TRUE); } else { if (numtarg == 0) i = 8; numtarg += 8; } } /* * Force negotiation to async for all targets that * will not see an initial bus reset. */ for (; i < numtarg; i++) { struct ahc_devinfo devinfo; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int our_id; u_int target_id; char channel; channel = 'A'; our_id = ahc->our_id; target_id = i; if (i > 7 && (ahc->features & AHC_TWIN) != 0) { channel = 'B'; our_id = ahc->our_id_b; target_id = i % 8; } tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id, &tstate); ahc_compile_devinfo(&devinfo, our_id, target_id, CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); ahc_update_neg_request(ahc, &devinfo, tstate, tinfo, AHC_NEG_ALWAYS); } ahc_unlock(ahc, &s); /* Give the bus some time to recover */ if ((ahc->flags & (AHC_RESET_BUS_A|AHC_RESET_BUS_B)) != 0) { ahc_linux_freeze_simq(ahc); msleep(AIC7XXX_RESET_DELAY); ahc_linux_release_simq(ahc); } } int ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg) { ahc->platform_data = kzalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC); if (ahc->platform_data == NULL) return (ENOMEM); ahc->platform_data->irq = AHC_LINUX_NOIRQ; ahc_lockinit(ahc); ahc->seltime = (aic7xxx_seltime & 0x3) << 4; ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4; if (aic7xxx_pci_parity == 0) ahc->flags |= AHC_DISABLE_PCI_PERR; return (0); } void ahc_platform_free(struct ahc_softc *ahc) { struct scsi_target *starget; int i; if (ahc->platform_data != NULL) { /* destroy all of the device and target objects */ for (i = 0; i < AHC_NUM_TARGETS; i++) { starget = ahc->platform_data->starget[i]; if (starget != NULL) { ahc->platform_data->starget[i] = NULL; } } if (ahc->platform_data->irq != AHC_LINUX_NOIRQ) free_irq(ahc->platform_data->irq, ahc); if (ahc->tag == BUS_SPACE_PIO && ahc->bsh.ioport != 0) release_region(ahc->bsh.ioport, 256); if (ahc->tag == BUS_SPACE_MEMIO && ahc->bsh.maddr != NULL) { iounmap(ahc->bsh.maddr); release_mem_region(ahc->platform_data->mem_busaddr, 0x1000); } if (ahc->platform_data->host) scsi_host_put(ahc->platform_data->host); kfree(ahc->platform_data); } } void ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb) { ahc_platform_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ); } void ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev, struct ahc_devinfo *devinfo, ahc_queue_alg alg) { struct ahc_linux_device *dev; int was_queuing; int now_queuing; if (sdev == NULL) return; dev = scsi_transport_device_data(sdev); was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED); switch (alg) { default: case AHC_QUEUE_NONE: now_queuing = 0; break; case AHC_QUEUE_BASIC: now_queuing = AHC_DEV_Q_BASIC; break; case AHC_QUEUE_TAGGED: now_queuing = AHC_DEV_Q_TAGGED; break; } if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0 && (was_queuing != now_queuing) && (dev->active != 0)) { dev->flags |= AHC_DEV_FREEZE_TIL_EMPTY; dev->qfrozen++; } dev->flags &= ~(AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED|AHC_DEV_PERIODIC_OTAG); if (now_queuing) { u_int usertags; usertags = ahc_linux_user_tagdepth(ahc, devinfo); if (!was_queuing) { /* * Start out aggressively and allow our * dynamic queue depth algorithm to take * care of the rest. */ dev->maxtags = usertags; dev->openings = dev->maxtags - dev->active; } if (dev->maxtags == 0) { /* * Queueing is disabled by the user. */ dev->openings = 1; } else if (alg == AHC_QUEUE_TAGGED) { dev->flags |= AHC_DEV_Q_TAGGED; if (aic7xxx_periodic_otag != 0) dev->flags |= AHC_DEV_PERIODIC_OTAG; } else dev->flags |= AHC_DEV_Q_BASIC; } else { /* We can only have one opening. */ dev->maxtags = 0; dev->openings = 1 - dev->active; } switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) { case AHC_DEV_Q_BASIC: case AHC_DEV_Q_TAGGED: scsi_change_queue_depth(sdev, dev->openings + dev->active); break; default: /* * We allow the OS to queue 2 untagged transactions to * us at any time even though we can only execute them * serially on the controller/device. This should * remove some latency. */ scsi_change_queue_depth(sdev, 2); break; } } int ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { return 0; } static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { static int warned_user; u_int tags; tags = 0; if ((ahc->user_discenable & devinfo->target_mask) != 0) { if (ahc->unit >= ARRAY_SIZE(aic7xxx_tag_info)) { if (warned_user == 0) { printk(KERN_WARNING "aic7xxx: WARNING: Insufficient tag_info instances\n" "aic7xxx: for installed controllers. Using defaults\n" "aic7xxx: Please update the aic7xxx_tag_info array in\n" "aic7xxx: the aic7xxx_osm..c source file.\n"); warned_user++; } tags = AHC_MAX_QUEUE; } else { adapter_tag_info_t *tag_info; tag_info = &aic7xxx_tag_info[ahc->unit]; tags = tag_info->tag_commands[devinfo->target_offset]; if (tags > AHC_MAX_QUEUE) tags = AHC_MAX_QUEUE; } } return (tags); } /* * Determines the queue depth for a given device. */ static void ahc_linux_device_queue_depth(struct scsi_device *sdev) { struct ahc_devinfo devinfo; u_int tags; struct ahc_softc *ahc = *((struct ahc_softc **)sdev->host->hostdata); ahc_compile_devinfo(&devinfo, sdev->sdev_target->channel == 0 ? ahc->our_id : ahc->our_id_b, sdev->sdev_target->id, sdev->lun, sdev->sdev_target->channel == 0 ? 'A' : 'B', ROLE_INITIATOR); tags = ahc_linux_user_tagdepth(ahc, &devinfo); if (tags != 0 && sdev->tagged_supported != 0) { ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_TAGGED); ahc_send_async(ahc, devinfo.channel, devinfo.target, devinfo.lun, AC_TRANSFER_NEG); ahc_print_devinfo(ahc, &devinfo); printk("Tagged Queuing enabled. Depth %d\n", tags); } else { ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_NONE); ahc_send_async(ahc, devinfo.channel, devinfo.target, devinfo.lun, AC_TRANSFER_NEG); } } static int ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev, struct scsi_cmnd *cmd) { struct scb *scb; struct hardware_scb *hscb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; uint16_t mask; struct scb_tailq *untagged_q = NULL; int nseg; /* * Schedule us to run later. The only reason we are not * running is because the whole controller Q is frozen. */ if (ahc->platform_data->qfrozen != 0) return SCSI_MLQUEUE_HOST_BUSY; /* * We only allow one untagged transaction * per target in the initiator role unless * we are storing a full busy target *lun* * table in SCB space. */ if (!(cmd->flags & SCMD_TAGGED) && (ahc->features & AHC_SCB_BTT) == 0) { int target_offset; target_offset = cmd->device->id + cmd->device->channel * 8; untagged_q = &(ahc->untagged_queues[target_offset]); if (!TAILQ_EMPTY(untagged_q)) /* if we're already executing an untagged command * we're busy to another */ return SCSI_MLQUEUE_DEVICE_BUSY; } nseg = scsi_dma_map(cmd); if (nseg < 0) return SCSI_MLQUEUE_HOST_BUSY; /* * Get an scb to use. */ scb = ahc_get_scb(ahc); if (!scb) { scsi_dma_unmap(cmd); return SCSI_MLQUEUE_HOST_BUSY; } scb->io_ctx = cmd; scb->platform_data->dev = dev; hscb = scb->hscb; cmd->host_scribble = (char *)scb; /* * Fill out basics of the HSCB. */ hscb->control = 0; hscb->scsiid = BUILD_SCSIID(ahc, cmd); hscb->lun = cmd->device->lun; mask = SCB_GET_TARGET_MASK(ahc, scb); tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb), SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahc, scb), &tstate); hscb->scsirate = tinfo->scsirate; hscb->scsioffset = tinfo->curr.offset; if ((tstate->ultraenb & mask) != 0) hscb->control |= ULTRAENB; if ((ahc->user_discenable & mask) != 0) hscb->control |= DISCENB; if ((tstate->auto_negotiate & mask) != 0) { scb->flags |= SCB_AUTO_NEGOTIATE; scb->hscb->control |= MK_MESSAGE; } if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { hscb->control |= ORDERED_QUEUE_TAG; dev->commands_since_idle_or_otag = 0; } else { hscb->control |= SIMPLE_QUEUE_TAG; } } hscb->cdb_len = cmd->cmd_len; if (hscb->cdb_len <= 12) { memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len); } else { memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len); scb->flags |= SCB_CDB32_PTR; } scb->platform_data->xfer_len = 0; ahc_set_residual(scb, 0); ahc_set_sense_residual(scb, 0); scb->sg_count = 0; if (nseg > 0) { struct ahc_dma_seg *sg; struct scatterlist *cur_seg; int i; /* Copy the segments into the SG list. */ sg = scb->sg_list; /* * The sg_count may be larger than nseg if * a transfer crosses a 32bit page. */ scsi_for_each_sg(cmd, cur_seg, nseg, i) { dma_addr_t addr; bus_size_t len; int consumed; addr = sg_dma_address(cur_seg); len = sg_dma_len(cur_seg); consumed = ahc_linux_map_seg(ahc, scb, sg, addr, len); sg += consumed; scb->sg_count += consumed; } sg--; sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); /* * Reset the sg list pointer. */ scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); /* * Copy the first SG into the "current" * data pointer area. */ scb->hscb->dataptr = scb->sg_list->addr; scb->hscb->datacnt = scb->sg_list->len; } else { scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); scb->hscb->dataptr = 0; scb->hscb->datacnt = 0; scb->sg_count = 0; } LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); dev->openings--; dev->active++; dev->commands_issued++; if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0) dev->commands_since_idle_or_otag++; scb->flags |= SCB_ACTIVE; if (untagged_q) { TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); scb->flags |= SCB_UNTAGGEDQ; } ahc_queue_scb(ahc, scb); return 0; } /* * SCSI controller interrupt handler. */ irqreturn_t ahc_linux_isr(int irq, void *dev_id) { struct ahc_softc *ahc; u_long flags; int ours; ahc = (struct ahc_softc *) dev_id; ahc_lock(ahc, &flags); ours = ahc_intr(ahc); ahc_unlock(ahc, &flags); return IRQ_RETVAL(ours); } void ahc_platform_flushwork(struct ahc_softc *ahc) { } void ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun, ac_code code) { switch (code) { case AC_TRANSFER_NEG: { struct scsi_target *starget; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; int target_offset; unsigned int target_ppr_options; BUG_ON(target == CAM_TARGET_WILDCARD); tinfo = ahc_fetch_transinfo(ahc, channel, channel == 'A' ? ahc->our_id : ahc->our_id_b, target, &tstate); /* * Don't bother reporting results while * negotiations are still pending. */ if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options) if (bootverbose == 0) break; /* * Don't bother reporting results that * are identical to those last reported. */ target_offset = target; if (channel == 'B') target_offset += 8; starget = ahc->platform_data->starget[target_offset]; if (starget == NULL) break; target_ppr_options = (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0) + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0) + (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0); if (tinfo->curr.period == spi_period(starget) && tinfo->curr.width == spi_width(starget) && tinfo->curr.offset == spi_offset(starget) && tinfo->curr.ppr_options == target_ppr_options) if (bootverbose == 0) break; spi_period(starget) = tinfo->curr.period; spi_width(starget) = tinfo->curr.width; spi_offset(starget) = tinfo->curr.offset; spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0; spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0; spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0; spi_display_xfer_agreement(starget); break; } case AC_SENT_BDR: { WARN_ON(lun != CAM_LUN_WILDCARD); scsi_report_device_reset(ahc->platform_data->host, channel - 'A', target); break; } case AC_BUS_RESET: if (ahc->platform_data->host != NULL) { scsi_report_bus_reset(ahc->platform_data->host, channel - 'A'); } break; default: panic("ahc_send_async: Unexpected async event"); } } /* * Calls the higher level scsi done function and frees the scb. */ void ahc_done(struct ahc_softc *ahc, struct scb *scb) { struct scsi_cmnd *cmd; struct ahc_linux_device *dev; LIST_REMOVE(scb, pending_links); if ((scb->flags & SCB_UNTAGGEDQ) != 0) { struct scb_tailq *untagged_q; int target_offset; target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); untagged_q = &(ahc->untagged_queues[target_offset]); TAILQ_REMOVE(untagged_q, scb, links.tqe); BUG_ON(!TAILQ_EMPTY(untagged_q)); } else if ((scb->flags & SCB_ACTIVE) == 0) { /* * Transactions aborted from the untagged queue may * not have been dispatched to the controller, so * only check the SCB_ACTIVE flag for tagged transactions. */ printk("SCB %d done'd twice\n", scb->hscb->tag); ahc_dump_card_state(ahc); panic("Stopping for safety"); } cmd = scb->io_ctx; dev = scb->platform_data->dev; dev->active--; dev->openings++; if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) { cmd->result &= ~(CAM_DEV_QFRZN << 16); dev->qfrozen--; } ahc_linux_unmap_scb(ahc, scb); /* * Guard against stale sense data. * The Linux mid-layer assumes that sense * was retrieved anytime the first byte of * the sense buffer looks "sane". */ cmd->sense_buffer[0] = 0; if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) { #ifdef AHC_REPORT_UNDERFLOWS uint32_t amount_xferred; amount_xferred = ahc_get_transfer_length(scb) - ahc_get_residual(scb); #endif if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MISC) != 0) { ahc_print_path(ahc, scb); printk("Set CAM_UNCOR_PARITY\n"); } #endif ahc_set_transaction_status(scb, CAM_UNCOR_PARITY); #ifdef AHC_REPORT_UNDERFLOWS /* * This code is disabled by default as some * clients of the SCSI system do not properly * initialize the underflow parameter. This * results in spurious termination of commands * that complete as expected (e.g. underflow is * allowed as command can return variable amounts * of data. */ } else if (amount_xferred < scb->io_ctx->underflow) { u_int i; ahc_print_path(ahc, scb); printk("CDB:"); for (i = 0; i < scb->io_ctx->cmd_len; i++) printk(" 0x%x", scb->io_ctx->cmnd[i]); printk("\n"); ahc_print_path(ahc, scb); printk("Saw underflow (%ld of %ld bytes). " "Treated as error\n", ahc_get_residual(scb), ahc_get_transfer_length(scb)); ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); #endif } else { ahc_set_transaction_status(scb, CAM_REQ_CMP); } } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) { ahc_linux_handle_scsi_status(ahc, cmd->device, scb); } if (dev->openings == 1 && ahc_get_transaction_status(scb) == CAM_REQ_CMP && ahc_get_scsi_status(scb) != SAM_STAT_TASK_SET_FULL) dev->tag_success_count++; /* * Some devices deal with temporary internal resource * shortages by returning queue full. When the queue * full occurrs, we throttle back. Slowly try to get * back to our previous queue depth. */ if ((dev->openings + dev->active) < dev->maxtags && dev->tag_success_count > AHC_TAG_SUCCESS_INTERVAL) { dev->tag_success_count = 0; dev->openings++; } if (dev->active == 0) dev->commands_since_idle_or_otag = 0; if ((scb->flags & SCB_RECOVERY_SCB) != 0) { printk("Recovery SCB completes\n"); if (ahc_get_transaction_status(scb) == CAM_BDR_SENT || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED) ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); if (ahc->platform_data->eh_done) complete(ahc->platform_data->eh_done); } ahc_free_scb(ahc, scb); ahc_linux_queue_cmd_complete(ahc, cmd); } static void ahc_linux_handle_scsi_status(struct ahc_softc *ahc, struct scsi_device *sdev, struct scb *scb) { struct ahc_devinfo devinfo; struct ahc_linux_device *dev = scsi_transport_device_data(sdev); ahc_compile_devinfo(&devinfo, ahc->our_id, sdev->sdev_target->id, sdev->lun, sdev->sdev_target->channel == 0 ? 'A' : 'B', ROLE_INITIATOR); /* * We don't currently trust the mid-layer to * properly deal with queue full or busy. So, * when one occurs, we tell the mid-layer to * unconditionally requeue the command to us * so that we can retry it ourselves. We also * implement our own throttling mechanism so * we don't clobber the device with too many * commands. */ switch (ahc_get_scsi_status(scb)) { default: break; case SAM_STAT_CHECK_CONDITION: case SAM_STAT_COMMAND_TERMINATED: { struct scsi_cmnd *cmd; /* * Copy sense information to the OS's cmd * structure if it is available. */ cmd = scb->io_ctx; if (scb->flags & SCB_SENSE) { u_int sense_size; sense_size = min(sizeof(struct scsi_sense_data) - ahc_get_sense_residual(scb), (u_long)SCSI_SENSE_BUFFERSIZE); memcpy(cmd->sense_buffer, ahc_get_sense_buf(ahc, scb), sense_size); if (sense_size < SCSI_SENSE_BUFFERSIZE) memset(&cmd->sense_buffer[sense_size], 0, SCSI_SENSE_BUFFERSIZE - sense_size); #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { int i; printk("Copied %d bytes of sense data:", sense_size); for (i = 0; i < sense_size; i++) { if ((i & 0xF) == 0) printk("\n"); printk("0x%x ", cmd->sense_buffer[i]); } printk("\n"); } #endif } break; } case SAM_STAT_TASK_SET_FULL: { /* * By the time the core driver has returned this * command, all other commands that were queued * to us but not the device have been returned. * This ensures that dev->active is equal to * the number of commands actually queued to * the device. */ dev->tag_success_count = 0; if (dev->active != 0) { /* * Drop our opening count to the number * of commands currently outstanding. */ dev->openings = 0; /* ahc_print_path(ahc, scb); printk("Dropping tag count to %d\n", dev->active); */ if (dev->active == dev->tags_on_last_queuefull) { dev->last_queuefull_same_count++; /* * If we repeatedly see a queue full * at the same queue depth, this * device has a fixed number of tag * slots. Lock in this tag depth * so we stop seeing queue fulls from * this device. */ if (dev->last_queuefull_same_count == AHC_LOCK_TAGS_COUNT) { dev->maxtags = dev->active; ahc_print_path(ahc, scb); printk("Locking max tag count at %d\n", dev->active); } } else { dev->tags_on_last_queuefull = dev->active; dev->last_queuefull_same_count = 0; } ahc_set_transaction_status(scb, CAM_REQUEUE_REQ); ahc_set_scsi_status(scb, SAM_STAT_GOOD); ahc_platform_set_tags(ahc, sdev, &devinfo, (dev->flags & AHC_DEV_Q_BASIC) ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED); break; } /* * Drop down to a single opening, and treat this * as if the target returned BUSY SCSI status. */ dev->openings = 1; ahc_set_scsi_status(scb, SAM_STAT_BUSY); ahc_platform_set_tags(ahc, sdev, &devinfo, (dev->flags & AHC_DEV_Q_BASIC) ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED); break; } } } static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd) { /* * Map CAM error codes into Linux Error codes. We * avoid the conversion so that the DV code has the * full error information available when making * state change decisions. */ { u_int new_status; switch (ahc_cmd_get_transaction_status(cmd)) { case CAM_REQ_INPROG: case CAM_REQ_CMP: case CAM_SCSI_STATUS_ERROR: new_status = DID_OK; break; case CAM_REQ_ABORTED: new_status = DID_ABORT; break; case CAM_BUSY: new_status = DID_BUS_BUSY; break; case CAM_REQ_INVALID: case CAM_PATH_INVALID: new_status = DID_BAD_TARGET; break; case CAM_SEL_TIMEOUT: new_status = DID_NO_CONNECT; break; case CAM_SCSI_BUS_RESET: case CAM_BDR_SENT: new_status = DID_RESET; break; case CAM_UNCOR_PARITY: new_status = DID_PARITY; break; case CAM_CMD_TIMEOUT: new_status = DID_TIME_OUT; break; case CAM_UA_ABORT: case CAM_REQ_CMP_ERR: case CAM_AUTOSENSE_FAIL: case CAM_NO_HBA: case CAM_DATA_RUN_ERR: case CAM_UNEXP_BUSFREE: case CAM_SEQUENCE_FAIL: case CAM_CCB_LEN_ERR: case CAM_PROVIDE_FAIL: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_REQ_TOO_BIG: new_status = DID_ERROR; break; case CAM_REQUEUE_REQ: new_status = DID_REQUEUE; break; default: /* We should never get here */ new_status = DID_ERROR; break; } ahc_cmd_set_transaction_status(cmd, new_status); } scsi_done(cmd); } static void ahc_linux_freeze_simq(struct ahc_softc *ahc) { unsigned long s; ahc_lock(ahc, &s); ahc->platform_data->qfrozen++; if (ahc->platform_data->qfrozen == 1) { scsi_block_requests(ahc->platform_data->host); /* XXX What about Twin channels? */ ahc_platform_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ); } ahc_unlock(ahc, &s); } static void ahc_linux_release_simq(struct ahc_softc *ahc) { u_long s; int unblock_reqs; unblock_reqs = 0; ahc_lock(ahc, &s); if (ahc->platform_data->qfrozen > 0) ahc->platform_data->qfrozen--; if (ahc->platform_data->qfrozen == 0) unblock_reqs = 1; ahc_unlock(ahc, &s); /* * There is still a race here. The mid-layer * should keep its own freeze count and use * a bottom half handler to run the queues * so we can unblock with our own lock held. */ if (unblock_reqs) scsi_unblock_requests(ahc->platform_data->host); } static int ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag) { struct ahc_softc *ahc; struct ahc_linux_device *dev; struct scb *pending_scb; u_int saved_scbptr; u_int active_scb_index; u_int last_phase; u_int saved_scsiid; u_int cdb_byte; int retval; int was_paused; int paused; int wait; int disconnected; unsigned long flags; pending_scb = NULL; paused = FALSE; wait = FALSE; ahc = *(struct ahc_softc **)cmd->device->host->hostdata; scmd_printk(KERN_INFO, cmd, "Attempting to queue a%s message\n", flag == SCB_ABORT ? "n ABORT" : " TARGET RESET"); printk("CDB:"); for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++) printk(" 0x%x", cmd->cmnd[cdb_byte]); printk("\n"); ahc_lock(ahc, &flags); /* * First determine if we currently own this command. * Start by searching the device queue. If not found * there, check the pending_scb list. If not found * at all, and the system wanted us to just abort the * command, return success. */ dev = scsi_transport_device_data(cmd->device); if (dev == NULL) { /* * No target device for this command exists, * so we must not still own the command. */ printk("%s:%d:%d:%d: Is not an active device\n", ahc_name(ahc), cmd->device->channel, cmd->device->id, (u8)cmd->device->lun); retval = SUCCESS; goto no_cmd; } if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0 && ahc_search_untagged_queues(ahc, cmd, cmd->device->id, cmd->device->channel + 'A', (u8)cmd->device->lun, CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) { printk("%s:%d:%d:%d: Command found on untagged queue\n", ahc_name(ahc), cmd->device->channel, cmd->device->id, (u8)cmd->device->lun); retval = SUCCESS; goto done; } /* * See if we can find a matching cmd in the pending list. */ LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { if (pending_scb->io_ctx == cmd) break; } if (pending_scb == NULL && flag == SCB_DEVICE_RESET) { /* Any SCB for this device will do for a target reset */ LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd), scmd_channel(cmd) + 'A', CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR)) break; } } if (pending_scb == NULL) { scmd_printk(KERN_INFO, cmd, "Command not found\n"); goto no_cmd; } if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) { /* * We can't queue two recovery actions using the same SCB */ retval = FAILED; goto done; } /* * Ensure that the card doesn't do anything * behind our back and that we didn't "just" miss * an interrupt that would affect this cmd. */ was_paused = ahc_is_paused(ahc); ahc_pause_and_flushwork(ahc); paused = TRUE; if ((pending_scb->flags & SCB_ACTIVE) == 0) { scmd_printk(KERN_INFO, cmd, "Command already completed\n"); goto no_cmd; } printk("%s: At time of recovery, card was %spaused\n", ahc_name(ahc), was_paused ? "" : "not "); ahc_dump_card_state(ahc); disconnected = TRUE; if (flag == SCB_ABORT) { if (ahc_search_qinfifo(ahc, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, pending_scb->hscb->tag, ROLE_INITIATOR, CAM_REQ_ABORTED, SEARCH_COMPLETE) > 0) { printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n", ahc_name(ahc), cmd->device->channel, cmd->device->id, (u8)cmd->device->lun); retval = SUCCESS; goto done; } } else if (ahc_search_qinfifo(ahc, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, pending_scb->hscb->tag, ROLE_INITIATOR, /*status*/0, SEARCH_COUNT) > 0) { disconnected = FALSE; } if (disconnected && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) { struct scb *bus_scb; bus_scb = ahc_lookup_scb(ahc, ahc_inb(ahc, SCB_TAG)); if (bus_scb == pending_scb) disconnected = FALSE; else if (flag != SCB_ABORT && ahc_inb(ahc, SAVED_SCSIID) == pending_scb->hscb->scsiid && ahc_inb(ahc, SAVED_LUN) == SCB_GET_LUN(pending_scb)) disconnected = FALSE; } /* * At this point, pending_scb is the scb associated with the * passed in command. That command is currently active on the * bus, is in the disconnected state, or we're hoping to find * a command for the same target active on the bus to abuse to * send a BDR. Queue the appropriate message based on which of * these states we are in. */ last_phase = ahc_inb(ahc, LASTPHASE); saved_scbptr = ahc_inb(ahc, SCBPTR); active_scb_index = ahc_inb(ahc, SCB_TAG); saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); if (last_phase != P_BUSFREE && (pending_scb->hscb->tag == active_scb_index || (flag == SCB_DEVICE_RESET && SCSIID_TARGET(ahc, saved_scsiid) == scmd_id(cmd)))) { /* * We're active on the bus, so assert ATN * and hope that the target responds. */ pending_scb = ahc_lookup_scb(ahc, active_scb_index); pending_scb->flags |= SCB_RECOVERY_SCB|flag; ahc_outb(ahc, MSG_OUT, HOST_MSG); ahc_outb(ahc, SCSISIGO, last_phase|ATNO); scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n"); wait = TRUE; } else if (disconnected) { /* * Actually re-queue this SCB in an attempt * to select the device before it reconnects. * In either case (selection or reselection), * we will now issue the approprate message * to the timed-out device. * * Set the MK_MESSAGE control bit indicating * that we desire to send a message. We * also set the disconnected flag since * in the paging case there is no guarantee * that our SCB control byte matches the * version on the card. We don't want the * sequencer to abort the command thinking * an unsolicited reselection occurred. */ pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED; pending_scb->flags |= SCB_RECOVERY_SCB|flag; /* * Remove any cached copy of this SCB in the * disconnected list in preparation for the * queuing of our abort SCB. We use the * same element in the SCB, SCB_NEXT, for * both the qinfifo and the disconnected list. */ ahc_search_disc_list(ahc, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, pending_scb->hscb->tag, /*stop_on_first*/TRUE, /*remove*/TRUE, /*save_state*/FALSE); /* * In the non-paging case, the sequencer will * never re-reference the in-core SCB. * To make sure we are notified during * reselection, set the MK_MESSAGE flag in * the card's copy of the SCB. */ if ((ahc->flags & AHC_PAGESCBS) == 0) { ahc_outb(ahc, SCBPTR, pending_scb->hscb->tag); ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL)|MK_MESSAGE); } /* * Clear out any entries in the QINFIFO first * so we are the next SCB for this target * to run. */ ahc_search_qinfifo(ahc, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahc_qinfifo_requeue_tail(ahc, pending_scb); ahc_outb(ahc, SCBPTR, saved_scbptr); ahc_print_path(ahc, pending_scb); printk("Device is disconnected, re-queuing SCB\n"); wait = TRUE; } else { scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n"); retval = FAILED; goto done; } no_cmd: /* * Our assumption is that if we don't have the command, no * recovery action was required, so we return success. Again, * the semantics of the mid-layer recovery engine are not * well defined, so this may change in time. */ retval = SUCCESS; done: if (paused) ahc_unpause(ahc); if (wait) { DECLARE_COMPLETION_ONSTACK(done); ahc->platform_data->eh_done = &done; ahc_unlock(ahc, &flags); printk("Recovery code sleeping\n"); if (!wait_for_completion_timeout(&done, 5 * HZ)) { ahc_lock(ahc, &flags); ahc->platform_data->eh_done = NULL; ahc_unlock(ahc, &flags); printk("Timer Expired\n"); retval = FAILED; } printk("Recovery code awake\n"); } else ahc_unlock(ahc, &flags); return (retval); } static void ahc_linux_set_width(struct scsi_target *starget, int width) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); struct ahc_devinfo devinfo; unsigned long flags; ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); ahc_lock(ahc, &flags); ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE); ahc_unlock(ahc, &flags); } static void ahc_linux_set_period(struct scsi_target *starget, int period) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); struct ahc_tmode_tstate *tstate; struct ahc_initiator_tinfo *tinfo = ahc_fetch_transinfo(ahc, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahc_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options; unsigned long flags; unsigned long offset = tinfo->goal.offset; const struct ahc_syncrate *syncrate; if (offset == 0) offset = MAX_OFFSET; if (period < 9) period = 9; /* 12.5ns is our minimum */ if (period == 9) { if (spi_max_width(starget)) ppr_options |= MSG_EXT_PPR_DT_REQ; else /* need wide for DT and need DT for 12.5 ns */ period = 10; } ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); /* all PPR requests apart from QAS require wide transfers */ if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) { if (spi_width(starget) == 0) ppr_options &= MSG_EXT_PPR_QAS_REQ; } syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); ahc_lock(ahc, &flags); ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_GOAL, FALSE); ahc_unlock(ahc, &flags); } static void ahc_linux_set_offset(struct scsi_target *starget, int offset) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); struct ahc_tmode_tstate *tstate; struct ahc_initiator_tinfo *tinfo = ahc_fetch_transinfo(ahc, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahc_devinfo devinfo; unsigned int ppr_options = 0; unsigned int period = 0; unsigned long flags; const struct ahc_syncrate *syncrate = NULL; ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); if (offset != 0) { syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); period = tinfo->goal.period; ppr_options = tinfo->goal.ppr_options; } ahc_lock(ahc, &flags); ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_GOAL, FALSE); ahc_unlock(ahc, &flags); } static void ahc_linux_set_dt(struct scsi_target *starget, int dt) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); struct ahc_tmode_tstate *tstate; struct ahc_initiator_tinfo *tinfo = ahc_fetch_transinfo(ahc, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahc_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_DT_REQ; unsigned int period = tinfo->goal.period; unsigned int width = tinfo->goal.width; unsigned long flags; const struct ahc_syncrate *syncrate; if (dt && spi_max_width(starget)) { ppr_options |= MSG_EXT_PPR_DT_REQ; if (!width) ahc_linux_set_width(starget, 1); } else if (period == 9) period = 10; /* if resetting DT, period must be >= 25ns */ ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); ahc_lock(ahc, &flags); ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset, ppr_options, AHC_TRANS_GOAL, FALSE); ahc_unlock(ahc, &flags); } #if 0 /* FIXME: This code claims to support IU and QAS. However, the actual * sequencer code and aic7xxx_core have no support for these parameters and * will get into a bad state if they're negotiated. Do not enable this * unless you know what you're doing */ static void ahc_linux_set_qas(struct scsi_target *starget, int qas) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); struct ahc_tmode_tstate *tstate; struct ahc_initiator_tinfo *tinfo = ahc_fetch_transinfo(ahc, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahc_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_QAS_REQ; unsigned int period = tinfo->goal.period; unsigned long flags; struct ahc_syncrate *syncrate; if (qas) ppr_options |= MSG_EXT_PPR_QAS_REQ; ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); ahc_lock(ahc, &flags); ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset, ppr_options, AHC_TRANS_GOAL, FALSE); ahc_unlock(ahc, &flags); } static void ahc_linux_set_iu(struct scsi_target *starget, int iu) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata); struct ahc_tmode_tstate *tstate; struct ahc_initiator_tinfo *tinfo = ahc_fetch_transinfo(ahc, starget->channel + 'A', shost->this_id, starget->id, &tstate); struct ahc_devinfo devinfo; unsigned int ppr_options = tinfo->goal.ppr_options & ~MSG_EXT_PPR_IU_REQ; unsigned int period = tinfo->goal.period; unsigned long flags; struct ahc_syncrate *syncrate; if (iu) ppr_options |= MSG_EXT_PPR_IU_REQ; ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, starget->channel + 'A', ROLE_INITIATOR); syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); ahc_lock(ahc, &flags); ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset, ppr_options, AHC_TRANS_GOAL, FALSE); ahc_unlock(ahc, &flags); } #endif static void ahc_linux_get_signalling(struct Scsi_Host *shost) { struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata; unsigned long flags; u8 mode; if (!(ahc->features & AHC_ULTRA2)) { /* non-LVD chipset, may not have SBLKCTL reg */ spi_signalling(shost) = ahc->features & AHC_HVD ? SPI_SIGNAL_HVD : SPI_SIGNAL_SE; return; } ahc_lock(ahc, &flags); ahc_pause(ahc); mode = ahc_inb(ahc, SBLKCTL); ahc_unpause(ahc); ahc_unlock(ahc, &flags); if (mode & ENAB40) spi_signalling(shost) = SPI_SIGNAL_LVD; else if (mode & ENAB20) spi_signalling(shost) = SPI_SIGNAL_SE; else spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; } static struct spi_function_template ahc_linux_transport_functions = { .set_offset = ahc_linux_set_offset, .show_offset = 1, .set_period = ahc_linux_set_period, .show_period = 1, .set_width = ahc_linux_set_width, .show_width = 1, .set_dt = ahc_linux_set_dt, .show_dt = 1, #if 0 .set_iu = ahc_linux_set_iu, .show_iu = 1, .set_qas = ahc_linux_set_qas, .show_qas = 1, #endif .get_signalling = ahc_linux_get_signalling, }; static int __init ahc_linux_init(void) { /* * If we've been passed any parameters, process them now. */ if (aic7xxx) aic7xxx_setup(aic7xxx); ahc_linux_transport_template = spi_attach_transport(&ahc_linux_transport_functions); if (!ahc_linux_transport_template) return -ENODEV; scsi_transport_reserve_device(ahc_linux_transport_template, sizeof(struct ahc_linux_device)); ahc_linux_pci_init(); ahc_linux_eisa_init(); return 0; } static void ahc_linux_exit(void) { ahc_linux_pci_exit(); ahc_linux_eisa_exit(); spi_release_transport(ahc_linux_transport_template); } module_init(ahc_linux_init); module_exit(ahc_linux_exit);
linux-master
drivers/scsi/aic7xxx/aic7xxx_osm.c
/* * Linux driver attachment glue for PCI based U320 controllers. * * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm_pci.c#25 $ */ #include "aic79xx_osm.h" #include "aic79xx_inline.h" #include "aic79xx_pci.h" /* Define the macro locally since it's different for different class of chips. */ #define ID(x) \ ID2C(x), \ ID2C(IDIROC(x)) static const struct pci_device_id ahd_linux_pci_id_table[] = { /* aic7901 based controllers */ ID(ID_AHA_29320A), ID(ID_AHA_29320ALP), ID(ID_AHA_29320LPE), /* aic7902 based controllers */ ID(ID_AHA_29320), ID(ID_AHA_29320B), ID(ID_AHA_29320LP), ID(ID_AHA_39320), ID(ID_AHA_39320_B), ID(ID_AHA_39320A), ID(ID_AHA_39320D), ID(ID_AHA_39320D_HP), ID(ID_AHA_39320D_B), ID(ID_AHA_39320D_B_HP), /* Generic chip probes for devices we don't know exactly. */ ID16(ID_AIC7901 & ID_9005_GENERIC_MASK), ID(ID_AIC7901A & ID_DEV_VENDOR_MASK), ID16(ID_AIC7902 & ID_9005_GENERIC_MASK), { 0 } }; MODULE_DEVICE_TABLE(pci, ahd_linux_pci_id_table); static int __maybe_unused ahd_linux_pci_dev_suspend(struct device *dev) { struct ahd_softc *ahd = dev_get_drvdata(dev); int rc; if ((rc = ahd_suspend(ahd))) return rc; ahd_pci_suspend(ahd); return rc; } static int __maybe_unused ahd_linux_pci_dev_resume(struct device *dev) { struct ahd_softc *ahd = dev_get_drvdata(dev); ahd_pci_resume(ahd); ahd_resume(ahd); return 0; } static void ahd_linux_pci_dev_remove(struct pci_dev *pdev) { struct ahd_softc *ahd = pci_get_drvdata(pdev); u_long s; if (ahd->platform_data && ahd->platform_data->host) scsi_remove_host(ahd->platform_data->host); ahd_lock(ahd, &s); ahd_intr_enable(ahd, FALSE); ahd_unlock(ahd, &s); ahd_free(ahd); } static void ahd_linux_pci_inherit_flags(struct ahd_softc *ahd) { struct pci_dev *pdev = ahd->dev_softc, *master_pdev; unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); master_pdev = pci_get_slot(pdev->bus, master_devfn); if (master_pdev) { struct ahd_softc *master = pci_get_drvdata(master_pdev); if (master) { ahd->flags &= ~AHD_BIOS_ENABLED; ahd->flags |= master->flags & AHD_BIOS_ENABLED; } else printk(KERN_ERR "aic79xx: no multichannel peer found!\n"); pci_dev_put(master_pdev); } } static int ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { char buf[80]; struct ahd_softc *ahd; ahd_dev_softc_t pci; const struct ahd_pci_identity *entry; char *name; int error; struct device *dev = &pdev->dev; pci = pdev; entry = ahd_find_pci_device(pci); if (entry == NULL) return (-ENODEV); /* * Allocate a softc for this card and * set it up for attachment by our * common detect routine. */ sprintf(buf, "ahd_pci:%d:%d:%d", ahd_get_pci_bus(pci), ahd_get_pci_slot(pci), ahd_get_pci_function(pci)); name = kstrdup(buf, GFP_ATOMIC); if (name == NULL) return (-ENOMEM); ahd = ahd_alloc(NULL, name); if (ahd == NULL) return (-ENOMEM); if (pci_enable_device(pdev)) { ahd_free(ahd); return (-ENODEV); } pci_set_master(pdev); if (sizeof(dma_addr_t) > 4) { const u64 required_mask = dma_get_required_mask(dev); if (required_mask > DMA_BIT_MASK(39) && dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) ahd->flags |= AHD_64BIT_ADDRESSING; else if (required_mask > DMA_BIT_MASK(32) && dma_set_mask(dev, DMA_BIT_MASK(39)) == 0) ahd->flags |= AHD_39BIT_ADDRESSING; else dma_set_mask(dev, DMA_BIT_MASK(32)); } else { dma_set_mask(dev, DMA_BIT_MASK(32)); } ahd->dev_softc = pci; error = ahd_pci_config(ahd, entry); if (error != 0) { ahd_free(ahd); return (-error); } /* * Second Function PCI devices need to inherit some * * settings from function 0. */ if ((ahd->features & AHD_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0) ahd_linux_pci_inherit_flags(ahd); pci_set_drvdata(pdev, ahd); ahd_linux_register_host(ahd, &aic79xx_driver_template); return (0); } static SIMPLE_DEV_PM_OPS(ahd_linux_pci_dev_pm_ops, ahd_linux_pci_dev_suspend, ahd_linux_pci_dev_resume); static struct pci_driver aic79xx_pci_driver = { .name = "aic79xx", .probe = ahd_linux_pci_dev_probe, .driver.pm = &ahd_linux_pci_dev_pm_ops, .remove = ahd_linux_pci_dev_remove, .id_table = ahd_linux_pci_id_table }; int ahd_linux_pci_init(void) { return pci_register_driver(&aic79xx_pci_driver); } void ahd_linux_pci_exit(void) { pci_unregister_driver(&aic79xx_pci_driver); } static int ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, resource_size_t *base, resource_size_t *base2) { *base = pci_resource_start(ahd->dev_softc, 0); /* * This is really the 3rd bar and should be at index 2, * but the Linux PCI code doesn't know how to "count" 64bit * bars. */ *base2 = pci_resource_start(ahd->dev_softc, 3); if (*base == 0 || *base2 == 0) return (ENOMEM); if (!request_region(*base, 256, "aic79xx")) return (ENOMEM); if (!request_region(*base2, 256, "aic79xx")) { release_region(*base, 256); return (ENOMEM); } return (0); } static int ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd, resource_size_t *bus_addr, uint8_t __iomem **maddr) { resource_size_t start; resource_size_t base_page; u_long base_offset; int error = 0; if (aic79xx_allow_memio == 0) return (ENOMEM); if ((ahd->bugs & AHD_PCIX_MMAPIO_BUG) != 0) return (ENOMEM); start = pci_resource_start(ahd->dev_softc, 1); base_page = start & PAGE_MASK; base_offset = start - base_page; if (start != 0) { *bus_addr = start; if (!request_mem_region(start, 0x1000, "aic79xx")) error = ENOMEM; if (!error) { *maddr = ioremap(base_page, base_offset + 512); if (*maddr == NULL) { error = ENOMEM; release_mem_region(start, 0x1000); } else *maddr += base_offset; } } else error = ENOMEM; return (error); } int ahd_pci_map_registers(struct ahd_softc *ahd) { uint32_t command; resource_size_t base; uint8_t __iomem *maddr; int error; /* * If its allowed, we prefer memory mapped access. */ command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, 4); command &= ~(PCIM_CMD_PORTEN|PCIM_CMD_MEMEN); base = 0; maddr = NULL; error = ahd_linux_pci_reserve_mem_region(ahd, &base, &maddr); if (error == 0) { ahd->platform_data->mem_busaddr = base; ahd->tags[0] = BUS_SPACE_MEMIO; ahd->bshs[0].maddr = maddr; ahd->tags[1] = BUS_SPACE_MEMIO; ahd->bshs[1].maddr = maddr + 0x100; ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command | PCIM_CMD_MEMEN, 4); if (ahd_pci_test_register_access(ahd) != 0) { printk("aic79xx: PCI Device %d:%d:%d " "failed memory mapped test. Using PIO.\n", ahd_get_pci_bus(ahd->dev_softc), ahd_get_pci_slot(ahd->dev_softc), ahd_get_pci_function(ahd->dev_softc)); iounmap(maddr); release_mem_region(ahd->platform_data->mem_busaddr, 0x1000); ahd->bshs[0].maddr = NULL; maddr = NULL; } else command |= PCIM_CMD_MEMEN; } else if (bootverbose) { printk("aic79xx: PCI%d:%d:%d MEM region 0x%llx " "unavailable. Cannot memory map device.\n", ahd_get_pci_bus(ahd->dev_softc), ahd_get_pci_slot(ahd->dev_softc), ahd_get_pci_function(ahd->dev_softc), (unsigned long long)base); } if (maddr == NULL) { resource_size_t base2; error = ahd_linux_pci_reserve_io_regions(ahd, &base, &base2); if (error == 0) { ahd->tags[0] = BUS_SPACE_PIO; ahd->tags[1] = BUS_SPACE_PIO; ahd->bshs[0].ioport = (u_long)base; ahd->bshs[1].ioport = (u_long)base2; command |= PCIM_CMD_PORTEN; } else { printk("aic79xx: PCI%d:%d:%d IO regions 0x%llx and " "0x%llx unavailable. Cannot map device.\n", ahd_get_pci_bus(ahd->dev_softc), ahd_get_pci_slot(ahd->dev_softc), ahd_get_pci_function(ahd->dev_softc), (unsigned long long)base, (unsigned long long)base2); } } ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, 4); return (error); } int ahd_pci_map_int(struct ahd_softc *ahd) { int error; error = request_irq(ahd->dev_softc->irq, ahd_linux_isr, IRQF_SHARED, "aic79xx", ahd); if (!error) ahd->platform_data->irq = ahd->dev_softc->irq; return (-error); } void ahd_power_state_change(struct ahd_softc *ahd, ahd_power_state new_state) { pci_set_power_state(ahd->dev_softc, new_state); }
linux-master
drivers/scsi/aic7xxx/aic79xx_osm_pci.c
/* * Aic7xxx SCSI host adapter firmware assembler symbol table implementation * * Copyright (c) 1997 Justin T. Gibbs. * Copyright (c) 2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_symbol.c#24 $ * * $FreeBSD$ */ #include <sys/types.h> #include "aicdb.h" #include <fcntl.h> #include <inttypes.h> #include <regex.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sysexits.h> #include <ctype.h> #include "aicasm_symbol.h" #include "aicasm.h" static DB *symtable; symbol_t * symbol_create(char *name) { symbol_t *new_symbol; new_symbol = (symbol_t *)malloc(sizeof(symbol_t)); if (new_symbol == NULL) { perror("Unable to create new symbol"); exit(EX_SOFTWARE); } memset(new_symbol, 0, sizeof(*new_symbol)); new_symbol->name = strdup(name); if (new_symbol->name == NULL) stop("Unable to strdup symbol name", EX_SOFTWARE); new_symbol->type = UNINITIALIZED; new_symbol->count = 1; return (new_symbol); } void symbol_delete(symbol_t *symbol) { if (symtable != NULL) { DBT key; key.data = symbol->name; key.size = strlen(symbol->name); symtable->del(symtable, &key, /*flags*/0); } switch(symbol->type) { case SCBLOC: case SRAMLOC: case REGISTER: if (symbol->info.rinfo != NULL) free(symbol->info.rinfo); break; case ALIAS: if (symbol->info.ainfo != NULL) free(symbol->info.ainfo); break; case MASK: case FIELD: case ENUM: case ENUM_ENTRY: if (symbol->info.finfo != NULL) { symlist_free(&symbol->info.finfo->symrefs); free(symbol->info.finfo); } break; case DOWNLOAD_CONST: case CONST: if (symbol->info.cinfo != NULL) free(symbol->info.cinfo); break; case LABEL: if (symbol->info.linfo != NULL) free(symbol->info.linfo); break; case UNINITIALIZED: default: break; } free(symbol->name); free(symbol); } void symtable_open() { symtable = dbopen(/*filename*/NULL, O_CREAT | O_NONBLOCK | O_RDWR, /*mode*/0, DB_HASH, /*openinfo*/NULL); if (symtable == NULL) { perror("Symbol table creation failed"); exit(EX_SOFTWARE); /* NOTREACHED */ } } void symtable_close() { if (symtable != NULL) { DBT key; DBT data; while (symtable->seq(symtable, &key, &data, R_FIRST) == 0) { symbol_t *stored_ptr; memcpy(&stored_ptr, data.data, sizeof(stored_ptr)); symbol_delete(stored_ptr); } symtable->close(symtable); } } /* * The semantics of get is to return an uninitialized symbol entry * if a lookup fails. */ symbol_t * symtable_get(char *name) { symbol_t *stored_ptr; DBT key; DBT data; int retval; key.data = (void *)name; key.size = strlen(name); if ((retval = symtable->get(symtable, &key, &data, /*flags*/0)) != 0) { if (retval == -1) { perror("Symbol table get operation failed"); exit(EX_SOFTWARE); /* NOTREACHED */ } else if (retval == 1) { /* Symbol wasn't found, so create a new one */ symbol_t *new_symbol; new_symbol = symbol_create(name); data.data = &new_symbol; data.size = sizeof(new_symbol); if (symtable->put(symtable, &key, &data, /*flags*/0) !=0) { perror("Symtable put failed"); exit(EX_SOFTWARE); } return (new_symbol); } else { perror("Unexpected return value from db get routine"); exit(EX_SOFTWARE); /* NOTREACHED */ } } memcpy(&stored_ptr, data.data, sizeof(stored_ptr)); stored_ptr->count++; data.data = &stored_ptr; if (symtable->put(symtable, &key, &data, /*flags*/0) !=0) { perror("Symtable put failed"); exit(EX_SOFTWARE); } return (stored_ptr); } symbol_node_t * symlist_search(symlist_t *symlist, char *symname) { symbol_node_t *curnode; curnode = SLIST_FIRST(symlist); while(curnode != NULL) { if (strcmp(symname, curnode->symbol->name) == 0) break; curnode = SLIST_NEXT(curnode, links); } return (curnode); } void symlist_add(symlist_t *symlist, symbol_t *symbol, int how) { symbol_node_t *newnode; newnode = (symbol_node_t *)malloc(sizeof(symbol_node_t)); if (newnode == NULL) { stop("symlist_add: Unable to malloc symbol_node", EX_SOFTWARE); /* NOTREACHED */ } newnode->symbol = symbol; if (how == SYMLIST_SORT) { symbol_node_t *curnode; int field; field = FALSE; switch(symbol->type) { case REGISTER: case SCBLOC: case SRAMLOC: break; case FIELD: case MASK: case ENUM: case ENUM_ENTRY: field = TRUE; break; default: stop("symlist_add: Invalid symbol type for sorting", EX_SOFTWARE); /* NOTREACHED */ } curnode = SLIST_FIRST(symlist); if (curnode == NULL || (field && (curnode->symbol->type > newnode->symbol->type || (curnode->symbol->type == newnode->symbol->type && (curnode->symbol->info.finfo->value > newnode->symbol->info.finfo->value)))) || (!field && (curnode->symbol->info.rinfo->address > newnode->symbol->info.rinfo->address))) { SLIST_INSERT_HEAD(symlist, newnode, links); return; } while (1) { if (SLIST_NEXT(curnode, links) == NULL) { SLIST_INSERT_AFTER(curnode, newnode, links); break; } else { symbol_t *cursymbol; cursymbol = SLIST_NEXT(curnode, links)->symbol; if ((field && (cursymbol->type > symbol->type || (cursymbol->type == symbol->type && (cursymbol->info.finfo->value > symbol->info.finfo->value)))) || (!field && (cursymbol->info.rinfo->address > symbol->info.rinfo->address))) { SLIST_INSERT_AFTER(curnode, newnode, links); break; } } curnode = SLIST_NEXT(curnode, links); } } else { SLIST_INSERT_HEAD(symlist, newnode, links); } } void symlist_free(symlist_t *symlist) { symbol_node_t *node1, *node2; node1 = SLIST_FIRST(symlist); while (node1 != NULL) { node2 = SLIST_NEXT(node1, links); free(node1); node1 = node2; } SLIST_INIT(symlist); } void symlist_merge(symlist_t *symlist_dest, symlist_t *symlist_src1, symlist_t *symlist_src2) { symbol_node_t *node; *symlist_dest = *symlist_src1; while((node = SLIST_FIRST(symlist_src2)) != NULL) { SLIST_REMOVE_HEAD(symlist_src2, links); SLIST_INSERT_HEAD(symlist_dest, node, links); } /* These are now empty */ SLIST_INIT(symlist_src1); SLIST_INIT(symlist_src2); } void aic_print_file_prologue(FILE *ofile) { if (ofile == NULL) return; fprintf(ofile, "/*\n" " * DO NOT EDIT - This file is automatically generated\n" " * from the following source files:\n" " *\n" "%s */\n", versions); } void aic_print_include(FILE *dfile, char *include_file) { if (dfile == NULL) return; fprintf(dfile, "\n#include \"%s\"\n\n", include_file); } void aic_print_reg_dump_types(FILE *ofile) { if (ofile == NULL) return; fprintf(ofile, "typedef int (%sreg_print_t)(u_int, u_int *, u_int);\n" "typedef struct %sreg_parse_entry {\n" " char *name;\n" " uint8_t value;\n" " uint8_t mask;\n" "} %sreg_parse_entry_t;\n" "\n", prefix, prefix, prefix); } static void aic_print_reg_dump_start(FILE *dfile, symbol_node_t *regnode) { if (dfile == NULL) return; fprintf(dfile, "static const %sreg_parse_entry_t %s_parse_table[] = {\n", prefix, regnode->symbol->name); } static void aic_print_reg_dump_end(FILE *ofile, FILE *dfile, symbol_node_t *regnode, u_int num_entries) { char *lower_name; char *letter; lower_name = strdup(regnode->symbol->name); if (lower_name == NULL) stop("Unable to strdup symbol name", EX_SOFTWARE); for (letter = lower_name; *letter != '\0'; letter++) *letter = tolower(*letter); if (dfile != NULL) { if (num_entries != 0) fprintf(dfile, "\n" "};\n" "\n"); fprintf(dfile, "int\n" "%s%s_print(u_int regvalue, u_int *cur_col, u_int wrap)\n" "{\n" " return (%sprint_register(%s%s, %d, \"%s\",\n" " 0x%02x, regvalue, cur_col, wrap));\n" "}\n" "\n", prefix, lower_name, prefix, num_entries != 0 ? regnode->symbol->name : "NULL", num_entries != 0 ? "_parse_table" : "", num_entries, regnode->symbol->name, regnode->symbol->info.rinfo->address); } fprintf(ofile, "#if AIC_DEBUG_REGISTERS\n" "%sreg_print_t %s%s_print;\n" "#else\n" "#define %s%s_print(regvalue, cur_col, wrap) \\\n" " %sprint_register(NULL, 0, \"%s\", 0x%02x, regvalue, cur_col, wrap)\n" "#endif\n" "\n", prefix, prefix, lower_name, prefix, lower_name, prefix, regnode->symbol->name, regnode->symbol->info.rinfo->address); } static void aic_print_reg_dump_entry(FILE *dfile, symbol_node_t *curnode) { int num_tabs; if (dfile == NULL) return; fprintf(dfile, " { \"%s\",", curnode->symbol->name); num_tabs = 3 - (strlen(curnode->symbol->name) + 5) / 8; while (num_tabs-- > 0) fputc('\t', dfile); fprintf(dfile, "0x%02x, 0x%02x }", curnode->symbol->info.finfo->value, curnode->symbol->info.finfo->mask); } void symtable_dump(FILE *ofile, FILE *dfile) { /* * Sort the registers by address with a simple insertion sort. * Put bitmasks next to the first register that defines them. * Put constants at the end. */ symlist_t registers; symlist_t masks; symlist_t constants; symlist_t download_constants; symlist_t aliases; symlist_t exported_labels; symbol_node_t *curnode; symbol_node_t *regnode; DBT key; DBT data; int flag; int reg_count = 0, reg_used = 0; u_int i; if (symtable == NULL) return; SLIST_INIT(&registers); SLIST_INIT(&masks); SLIST_INIT(&constants); SLIST_INIT(&download_constants); SLIST_INIT(&aliases); SLIST_INIT(&exported_labels); flag = R_FIRST; while (symtable->seq(symtable, &key, &data, flag) == 0) { symbol_t *cursym; memcpy(&cursym, data.data, sizeof(cursym)); switch(cursym->type) { case REGISTER: case SCBLOC: case SRAMLOC: symlist_add(&registers, cursym, SYMLIST_SORT); break; case MASK: case FIELD: case ENUM: case ENUM_ENTRY: symlist_add(&masks, cursym, SYMLIST_SORT); break; case CONST: symlist_add(&constants, cursym, SYMLIST_INSERT_HEAD); break; case DOWNLOAD_CONST: symlist_add(&download_constants, cursym, SYMLIST_INSERT_HEAD); break; case ALIAS: symlist_add(&aliases, cursym, SYMLIST_INSERT_HEAD); break; case LABEL: if (cursym->info.linfo->exported == 0) break; symlist_add(&exported_labels, cursym, SYMLIST_INSERT_HEAD); break; default: break; } flag = R_NEXT; } /* Register dianostic functions/declarations first. */ aic_print_file_prologue(ofile); aic_print_reg_dump_types(ofile); aic_print_file_prologue(dfile); aic_print_include(dfile, stock_include_file); SLIST_FOREACH(curnode, &registers, links) { if (curnode->symbol->dont_generate_debug_code) continue; switch(curnode->symbol->type) { case REGISTER: case SCBLOC: case SRAMLOC: { symlist_t *fields; symbol_node_t *fieldnode; int num_entries; num_entries = 0; reg_count++; if (curnode->symbol->count == 1) break; fields = &curnode->symbol->info.rinfo->fields; SLIST_FOREACH(fieldnode, fields, links) { if (num_entries == 0) aic_print_reg_dump_start(dfile, curnode); else if (dfile != NULL) fputs(",\n", dfile); num_entries++; aic_print_reg_dump_entry(dfile, fieldnode); } aic_print_reg_dump_end(ofile, dfile, curnode, num_entries); reg_used++; } default: break; } } fprintf(stderr, "%s: %d of %d register definitions used\n", appname, reg_used, reg_count); /* Fold in the masks and bits */ while (SLIST_FIRST(&masks) != NULL) { char *regname; curnode = SLIST_FIRST(&masks); SLIST_REMOVE_HEAD(&masks, links); regnode = SLIST_FIRST(&curnode->symbol->info.finfo->symrefs); regname = regnode->symbol->name; regnode = symlist_search(&registers, regname); SLIST_INSERT_AFTER(regnode, curnode, links); } /* Add the aliases */ while (SLIST_FIRST(&aliases) != NULL) { char *regname; curnode = SLIST_FIRST(&aliases); SLIST_REMOVE_HEAD(&aliases, links); regname = curnode->symbol->info.ainfo->parent->name; regnode = symlist_search(&registers, regname); SLIST_INSERT_AFTER(regnode, curnode, links); } /* Output generated #defines. */ while (SLIST_FIRST(&registers) != NULL) { symbol_node_t *curnode; u_int value; char *tab_str; char *tab_str2; curnode = SLIST_FIRST(&registers); SLIST_REMOVE_HEAD(&registers, links); switch(curnode->symbol->type) { case REGISTER: case SCBLOC: case SRAMLOC: fprintf(ofile, "\n"); value = curnode->symbol->info.rinfo->address; tab_str = "\t"; tab_str2 = "\t\t"; break; case ALIAS: { symbol_t *parent; parent = curnode->symbol->info.ainfo->parent; value = parent->info.rinfo->address; tab_str = "\t"; tab_str2 = "\t\t"; break; } case MASK: case FIELD: case ENUM: case ENUM_ENTRY: value = curnode->symbol->info.finfo->value; tab_str = "\t\t"; tab_str2 = "\t"; break; default: value = 0; /* Quiet compiler */ tab_str = NULL; tab_str2 = NULL; stop("symtable_dump: Invalid symbol type " "encountered", EX_SOFTWARE); break; } fprintf(ofile, "#define%s%-16s%s0x%02x\n", tab_str, curnode->symbol->name, tab_str2, value); free(curnode); } fprintf(ofile, "\n\n"); while (SLIST_FIRST(&constants) != NULL) { symbol_node_t *curnode; curnode = SLIST_FIRST(&constants); SLIST_REMOVE_HEAD(&constants, links); fprintf(ofile, "#define\t%-8s\t0x%02x\n", curnode->symbol->name, curnode->symbol->info.cinfo->value); free(curnode); } fprintf(ofile, "\n\n/* Downloaded Constant Definitions */\n"); for (i = 0; SLIST_FIRST(&download_constants) != NULL; i++) { symbol_node_t *curnode; curnode = SLIST_FIRST(&download_constants); SLIST_REMOVE_HEAD(&download_constants, links); fprintf(ofile, "#define\t%-8s\t0x%02x\n", curnode->symbol->name, curnode->symbol->info.cinfo->value); free(curnode); } fprintf(ofile, "#define\tDOWNLOAD_CONST_COUNT\t0x%02x\n", i); fprintf(ofile, "\n\n/* Exported Labels */\n"); while (SLIST_FIRST(&exported_labels) != NULL) { symbol_node_t *curnode; curnode = SLIST_FIRST(&exported_labels); SLIST_REMOVE_HEAD(&exported_labels, links); fprintf(ofile, "#define\tLABEL_%-8s\t0x%02x\n", curnode->symbol->name, curnode->symbol->info.linfo->address); free(curnode); } }
linux-master
drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
/* * Aic7xxx SCSI host adapter firmware assembler * * Copyright (c) 1997, 1998, 2000, 2001 Justin T. Gibbs. * Copyright (c) 2001, 2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm.c#23 $ * * $FreeBSD$ */ #include <sys/types.h> #include <sys/mman.h> #include <ctype.h> #include <inttypes.h> #include <regex.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sysexits.h> #include <unistd.h> #if linux #include <endian.h> #else #include <machine/endian.h> #endif #include "aicasm.h" #include "aicasm_symbol.h" #include "aicasm_insformat.h" typedef struct patch { STAILQ_ENTRY(patch) links; int patch_func; u_int begin; u_int skip_instr; u_int skip_patch; } patch_t; STAILQ_HEAD(patch_list, patch) patches; static void usage(void); static void back_patch(void); static void output_code(void); static void output_listing(char *ifilename); static void dump_scope(scope_t *scope); static void emit_patch(scope_t *scope, int patch); static int check_patch(patch_t **start_patch, int start_instr, int *skip_addr, int *func_vals); struct path_list search_path; int includes_search_curdir; char *appname; char *stock_include_file; FILE *ofile; char *ofilename; char *regfilename; FILE *regfile; char *listfilename; FILE *listfile; char *regdiagfilename; FILE *regdiagfile; int src_mode; int dst_mode; static STAILQ_HEAD(,instruction) seq_program; struct cs_tailq cs_tailq; struct scope_list scope_stack; symlist_t patch_functions; #if DEBUG extern int yy_flex_debug; extern int mm_flex_debug; extern int yydebug; extern int mmdebug; #endif extern FILE *yyin; extern int yyparse(void); int main(int argc, char *argv[]); int main(int argc, char *argv[]) { extern char *optarg; extern int optind; int ch; int retval; char *inputfilename; scope_t *sentinal; STAILQ_INIT(&patches); SLIST_INIT(&search_path); STAILQ_INIT(&seq_program); TAILQ_INIT(&cs_tailq); SLIST_INIT(&scope_stack); /* Set Sentinal scope node */ sentinal = scope_alloc(); sentinal->type = SCOPE_ROOT; includes_search_curdir = 1; appname = *argv; regfile = NULL; listfile = NULL; #if DEBUG yy_flex_debug = 0; mm_flex_debug = 0; yydebug = 0; mmdebug = 0; #endif while ((ch = getopt(argc, argv, "d:i:l:n:o:p:r:I:")) != -1) { switch(ch) { case 'd': #if DEBUG if (strcmp(optarg, "s") == 0) { yy_flex_debug = 1; mm_flex_debug = 1; } else if (strcmp(optarg, "p") == 0) { yydebug = 1; mmdebug = 1; } else { fprintf(stderr, "%s: -d Requires either an " "'s' or 'p' argument\n", appname); usage(); } #else stop("-d: Assembler not built with debugging " "information", EX_SOFTWARE); #endif break; case 'i': stock_include_file = optarg; break; case 'l': /* Create a program listing */ if ((listfile = fopen(optarg, "w")) == NULL) { perror(optarg); stop(NULL, EX_CANTCREAT); } listfilename = optarg; break; case 'n': /* Don't complain about the -nostdinc directrive */ if (strcmp(optarg, "ostdinc")) { fprintf(stderr, "%s: Unknown option -%c%s\n", appname, ch, optarg); usage(); /* NOTREACHED */ } break; case 'o': if ((ofile = fopen(optarg, "w")) == NULL) { perror(optarg); stop(NULL, EX_CANTCREAT); } ofilename = optarg; break; case 'p': /* Create Register Diagnostic "printing" Functions */ if ((regdiagfile = fopen(optarg, "w")) == NULL) { perror(optarg); stop(NULL, EX_CANTCREAT); } regdiagfilename = optarg; break; case 'r': if ((regfile = fopen(optarg, "w")) == NULL) { perror(optarg); stop(NULL, EX_CANTCREAT); } regfilename = optarg; break; case 'I': { path_entry_t include_dir; if (strcmp(optarg, "-") == 0) { if (includes_search_curdir == 0) { fprintf(stderr, "%s: Warning - '-I-' " "specified multiple " "times\n", appname); } includes_search_curdir = 0; for (include_dir = SLIST_FIRST(&search_path); include_dir != NULL; include_dir = SLIST_NEXT(include_dir, links)) /* * All entries before a '-I-' only * apply to includes specified with * quotes instead of "<>". */ include_dir->quoted_includes_only = 1; } else { include_dir = (path_entry_t)malloc(sizeof(*include_dir)); if (include_dir == NULL) { perror(optarg); stop(NULL, EX_OSERR); } include_dir->directory = strdup(optarg); if (include_dir->directory == NULL) { perror(optarg); stop(NULL, EX_OSERR); } include_dir->quoted_includes_only = 0; SLIST_INSERT_HEAD(&search_path, include_dir, links); } break; } case '?': default: usage(); /* NOTREACHED */ } } argc -= optind; argv += optind; if (argc != 1) { fprintf(stderr, "%s: No input file specified\n", appname); usage(); /* NOTREACHED */ } if (regdiagfile != NULL && (regfile == NULL || stock_include_file == NULL)) { fprintf(stderr, "%s: The -p option requires the -r and -i options.\n", appname); usage(); /* NOTREACHED */ } symtable_open(); inputfilename = *argv; include_file(*argv, SOURCE_FILE); retval = yyparse(); if (retval == 0) { if (SLIST_FIRST(&scope_stack) == NULL || SLIST_FIRST(&scope_stack)->type != SCOPE_ROOT) { stop("Unterminated conditional expression", EX_DATAERR); /* NOTREACHED */ } /* Process outmost scope */ process_scope(SLIST_FIRST(&scope_stack)); /* * Decend the tree of scopes and insert/emit * patches as appropriate. We perform a depth first * traversal, recursively handling each scope. */ /* start at the root scope */ dump_scope(SLIST_FIRST(&scope_stack)); /* Patch up forward jump addresses */ back_patch(); if (ofile != NULL) output_code(); if (regfile != NULL) symtable_dump(regfile, regdiagfile); if (listfile != NULL) output_listing(inputfilename); } stop(NULL, 0); /* NOTREACHED */ return (0); } static void usage() { (void)fprintf(stderr, "usage: %-16s [-nostdinc] [-I-] [-I directory] [-o output_file]\n" " [-r register_output_file [-p register_diag_file -i includefile]]\n" " [-l program_list_file]\n" " input_file\n", appname); exit(EX_USAGE); } static void back_patch() { struct instruction *cur_instr; for (cur_instr = STAILQ_FIRST(&seq_program); cur_instr != NULL; cur_instr = STAILQ_NEXT(cur_instr, links)) { if (cur_instr->patch_label != NULL) { struct ins_format3 *f3_instr; u_int address; if (cur_instr->patch_label->type != LABEL) { char buf[255]; snprintf(buf, sizeof(buf), "Undefined label %s", cur_instr->patch_label->name); stop(buf, EX_DATAERR); /* NOTREACHED */ } f3_instr = &cur_instr->format.format3; address = f3_instr->address; address += cur_instr->patch_label->info.linfo->address; f3_instr->address = address; } } } static void output_code() { struct instruction *cur_instr; patch_t *cur_patch; critical_section_t *cs; symbol_node_t *cur_node; int instrcount; instrcount = 0; fprintf(ofile, "/*\n" " * DO NOT EDIT - This file is automatically generated\n" " * from the following source files:\n" " *\n" "%s */\n", versions); fprintf(ofile, "static const uint8_t seqprog[] = {\n"); for (cur_instr = STAILQ_FIRST(&seq_program); cur_instr != NULL; cur_instr = STAILQ_NEXT(cur_instr, links)) { fprintf(ofile, "%s\t0x%02x, 0x%02x, 0x%02x, 0x%02x", cur_instr == STAILQ_FIRST(&seq_program) ? "" : ",\n", #ifdef __LITTLE_ENDIAN cur_instr->format.bytes[0], cur_instr->format.bytes[1], cur_instr->format.bytes[2], cur_instr->format.bytes[3]); #else cur_instr->format.bytes[3], cur_instr->format.bytes[2], cur_instr->format.bytes[1], cur_instr->format.bytes[0]); #endif instrcount++; } fprintf(ofile, "\n};\n\n"); if (patch_arg_list == NULL) stop("Patch argument list not defined", EX_DATAERR); /* * Output patch information. Patch functions first. */ fprintf(ofile, "typedef int %spatch_func_t (%s);\n", prefix, patch_arg_list); for (cur_node = SLIST_FIRST(&patch_functions); cur_node != NULL; cur_node = SLIST_NEXT(cur_node,links)) { fprintf(ofile, "static %spatch_func_t %spatch%d_func;\n" "\n" "static int\n" "%spatch%d_func(%s)\n" "{\n" " return (%s);\n" "}\n\n", prefix, prefix, cur_node->symbol->info.condinfo->func_num, prefix, cur_node->symbol->info.condinfo->func_num, patch_arg_list, cur_node->symbol->name); } fprintf(ofile, "static const struct patch {\n" " %spatch_func_t *patch_func;\n" " uint32_t begin :10,\n" " skip_instr :10,\n" " skip_patch :12;\n" "} patches[] = {\n", prefix); for (cur_patch = STAILQ_FIRST(&patches); cur_patch != NULL; cur_patch = STAILQ_NEXT(cur_patch,links)) { fprintf(ofile, "%s\t{ %spatch%d_func, %d, %d, %d }", cur_patch == STAILQ_FIRST(&patches) ? "" : ",\n", prefix, cur_patch->patch_func, cur_patch->begin, cur_patch->skip_instr, cur_patch->skip_patch); } fprintf(ofile, "\n};\n\n"); fprintf(ofile, "static const struct cs {\n" " uint16_t begin;\n" " uint16_t end;\n" "} critical_sections[] = {\n"); for (cs = TAILQ_FIRST(&cs_tailq); cs != NULL; cs = TAILQ_NEXT(cs, links)) { fprintf(ofile, "%s\t{ %d, %d }", cs == TAILQ_FIRST(&cs_tailq) ? "" : ",\n", cs->begin_addr, cs->end_addr); } fprintf(ofile, "\n};\n\n"); fprintf(ofile, "#define NUM_CRITICAL_SECTIONS ARRAY_SIZE(critical_sections)\n"); fprintf(stderr, "%s: %d instructions used\n", appname, instrcount); } static void dump_scope(scope_t *scope) { scope_t *cur_scope; /* * Emit the first patch for this scope */ emit_patch(scope, 0); /* * Dump each scope within this one. */ cur_scope = TAILQ_FIRST(&scope->inner_scope); while (cur_scope != NULL) { dump_scope(cur_scope); cur_scope = TAILQ_NEXT(cur_scope, scope_links); } /* * Emit the second, closing, patch for this scope */ emit_patch(scope, 1); } void emit_patch(scope_t *scope, int patch) { patch_info_t *pinfo; patch_t *new_patch; pinfo = &scope->patches[patch]; if (pinfo->skip_instr == 0) /* No-Op patch */ return; new_patch = (patch_t *)malloc(sizeof(*new_patch)); if (new_patch == NULL) stop("Could not malloc patch structure", EX_OSERR); memset(new_patch, 0, sizeof(*new_patch)); if (patch == 0) { new_patch->patch_func = scope->func_num; new_patch->begin = scope->begin_addr; } else { new_patch->patch_func = 0; new_patch->begin = scope->end_addr; } new_patch->skip_instr = pinfo->skip_instr; new_patch->skip_patch = pinfo->skip_patch; STAILQ_INSERT_TAIL(&patches, new_patch, links); } void output_listing(char *ifilename) { char buf[1024]; FILE *ifile; struct instruction *cur_instr; patch_t *cur_patch; symbol_node_t *cur_func; int *func_values; int instrcount; int instrptr; int line; int func_count; int skip_addr; instrcount = 0; instrptr = 0; line = 1; skip_addr = 0; if ((ifile = fopen(ifilename, "r")) == NULL) { perror(ifilename); stop(NULL, EX_DATAERR); } /* * Determine which options to apply to this listing. */ for (func_count = 0, cur_func = SLIST_FIRST(&patch_functions); cur_func != NULL; cur_func = SLIST_NEXT(cur_func, links)) func_count++; func_values = NULL; if (func_count != 0) { func_values = (int *)malloc(func_count * sizeof(int)); if (func_values == NULL) stop("Could not malloc", EX_OSERR); func_values[0] = 0; /* FALSE func */ func_count--; /* * Ask the user to fill in the return values for * the rest of the functions. */ for (cur_func = SLIST_FIRST(&patch_functions); cur_func != NULL && SLIST_NEXT(cur_func, links) != NULL; cur_func = SLIST_NEXT(cur_func, links), func_count--) { int input; fprintf(stdout, "\n(%s)\n", cur_func->symbol->name); fprintf(stdout, "Enter the return value for " "this expression[T/F]:"); while (1) { input = getchar(); input = toupper(input); if (input == 'T') { func_values[func_count] = 1; break; } else if (input == 'F') { func_values[func_count] = 0; break; } } if (isatty(fileno(stdin)) == 0) putchar(input); } fprintf(stdout, "\nThanks!\n"); } /* Now output the listing */ cur_patch = STAILQ_FIRST(&patches); for (cur_instr = STAILQ_FIRST(&seq_program); cur_instr != NULL; cur_instr = STAILQ_NEXT(cur_instr, links), instrcount++) { if (check_patch(&cur_patch, instrcount, &skip_addr, func_values) == 0) { /* Don't count this instruction as it is in a patch * that was removed. */ continue; } while (line < cur_instr->srcline) { fgets(buf, sizeof(buf), ifile); fprintf(listfile, " \t%s", buf); line++; } fprintf(listfile, "%04x %02x%02x%02x%02x", instrptr, #ifdef __LITTLE_ENDIAN cur_instr->format.bytes[0], cur_instr->format.bytes[1], cur_instr->format.bytes[2], cur_instr->format.bytes[3]); #else cur_instr->format.bytes[3], cur_instr->format.bytes[2], cur_instr->format.bytes[1], cur_instr->format.bytes[0]); #endif /* * Macro expansions can cause several instructions * to be output for a single source line. Only * advance the line once in these cases. */ if (line == cur_instr->srcline) { fgets(buf, sizeof(buf), ifile); fprintf(listfile, "\t%s", buf); line++; } else { fprintf(listfile, "\n"); } instrptr++; } /* Dump the remainder of the file */ while(fgets(buf, sizeof(buf), ifile) != NULL) fprintf(listfile, " %s", buf); fclose(ifile); } static int check_patch(patch_t **start_patch, int start_instr, int *skip_addr, int *func_vals) { patch_t *cur_patch; cur_patch = *start_patch; while (cur_patch != NULL && start_instr == cur_patch->begin) { if (func_vals[cur_patch->patch_func] == 0) { int skip; /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; for (skip = cur_patch->skip_patch; skip > 0 && cur_patch != NULL; skip--) cur_patch = STAILQ_NEXT(cur_patch, links); } else { /* Accepted this patch. Advance to the next * one and wait for our intruction pointer to * hit this point. */ cur_patch = STAILQ_NEXT(cur_patch, links); } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } /* * Print out error information if appropriate, and clean up before * terminating the program. */ void stop(const char *string, int err_code) { if (string != NULL) { fprintf(stderr, "%s: ", appname); if (yyfilename != NULL) { fprintf(stderr, "Stopped at file %s, line %d - ", yyfilename, yylineno); } fprintf(stderr, "%s\n", string); } if (ofile != NULL) { fclose(ofile); if (err_code != 0) { fprintf(stderr, "%s: Removing %s due to error\n", appname, ofilename); unlink(ofilename); } } if (regfile != NULL) { fclose(regfile); if (err_code != 0) { fprintf(stderr, "%s: Removing %s due to error\n", appname, regfilename); unlink(regfilename); } } if (listfile != NULL) { fclose(listfile); if (err_code != 0) { fprintf(stderr, "%s: Removing %s due to error\n", appname, listfilename); unlink(listfilename); } } symlist_free(&patch_functions); symtable_close(); exit(err_code); } struct instruction * seq_alloc() { struct instruction *new_instr; new_instr = (struct instruction *)malloc(sizeof(struct instruction)); if (new_instr == NULL) stop("Unable to malloc instruction object", EX_SOFTWARE); memset(new_instr, 0, sizeof(*new_instr)); STAILQ_INSERT_TAIL(&seq_program, new_instr, links); new_instr->srcline = yylineno; return new_instr; } critical_section_t * cs_alloc() { critical_section_t *new_cs; new_cs= (critical_section_t *)malloc(sizeof(critical_section_t)); if (new_cs == NULL) stop("Unable to malloc critical_section object", EX_SOFTWARE); memset(new_cs, 0, sizeof(*new_cs)); TAILQ_INSERT_TAIL(&cs_tailq, new_cs, links); return new_cs; } scope_t * scope_alloc() { scope_t *new_scope; new_scope = (scope_t *)malloc(sizeof(scope_t)); if (new_scope == NULL) stop("Unable to malloc scope object", EX_SOFTWARE); memset(new_scope, 0, sizeof(*new_scope)); TAILQ_INIT(&new_scope->inner_scope); if (SLIST_FIRST(&scope_stack) != NULL) { TAILQ_INSERT_TAIL(&SLIST_FIRST(&scope_stack)->inner_scope, new_scope, scope_links); } /* This patch is now the current scope */ SLIST_INSERT_HEAD(&scope_stack, new_scope, scope_stack_links); return new_scope; } void process_scope(scope_t *scope) { /* * We are "leaving" this scope. We should now have * enough information to process the lists of scopes * we encapsulate. */ scope_t *cur_scope; u_int skip_patch_count; u_int skip_instr_count; cur_scope = TAILQ_LAST(&scope->inner_scope, scope_tailq); skip_patch_count = 0; skip_instr_count = 0; while (cur_scope != NULL) { u_int patch0_patch_skip; patch0_patch_skip = 0; switch (cur_scope->type) { case SCOPE_IF: case SCOPE_ELSE_IF: if (skip_instr_count != 0) { /* Create a tail patch */ patch0_patch_skip++; cur_scope->patches[1].skip_patch = skip_patch_count + 1; cur_scope->patches[1].skip_instr = skip_instr_count; } /* Count Head patch */ patch0_patch_skip++; /* Count any patches contained in our inner scope */ patch0_patch_skip += cur_scope->inner_scope_patches; cur_scope->patches[0].skip_patch = patch0_patch_skip; cur_scope->patches[0].skip_instr = cur_scope->end_addr - cur_scope->begin_addr; skip_instr_count += cur_scope->patches[0].skip_instr; skip_patch_count += patch0_patch_skip; if (cur_scope->type == SCOPE_IF) { scope->inner_scope_patches += skip_patch_count; skip_patch_count = 0; skip_instr_count = 0; } break; case SCOPE_ELSE: /* Count any patches contained in our innter scope */ skip_patch_count += cur_scope->inner_scope_patches; skip_instr_count += cur_scope->end_addr - cur_scope->begin_addr; break; case SCOPE_ROOT: stop("Unexpected scope type encountered", EX_SOFTWARE); /* NOTREACHED */ } cur_scope = TAILQ_PREV(cur_scope, scope_tailq, scope_links); } }
linux-master
drivers/scsi/aic7xxx/aicasm/aicasm.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010-2015 PMC-Sierra, Inc. ([email protected]) * 2016-2017 Microsemi Corp. ([email protected]) * * Module Name: * rx.c * * Abstract: Hardware miniport for Drawbridge specific hardware functions. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/time.h> #include <linux/interrupt.h> #include <scsi/scsi_host.h> #include "aacraid.h" static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) { struct aac_dev *dev = dev_id; unsigned long bellbits; u8 intstat = rx_readb(dev, MUnit.OISR); /* * Read mask and invert because drawbridge is reversed. * This allows us to only service interrupts that have * been enabled. * Check to see if this is our interrupt. If it isn't just return */ if (likely(intstat & ~(dev->OIMR))) { bellbits = rx_readl(dev, OutboundDoorbellReg); if (unlikely(bellbits & DoorBellPrintfReady)) { aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); } else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) { rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); } else if (likely(bellbits & DoorBellAdapterNormRespReady)) { rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); aac_response_normal(&dev->queues->queue[HostNormRespQueue]); } else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) { rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); } else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) { rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); } return IRQ_HANDLED; } return IRQ_NONE; } static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) { int isAif, isFastResponse, isSpecial; struct aac_dev *dev = dev_id; u32 Index = rx_readl(dev, MUnit.OutboundQueue); if (unlikely(Index == 0xFFFFFFFFL)) Index = rx_readl(dev, MUnit.OutboundQueue); if (likely(Index != 0xFFFFFFFFL)) { do { isAif = isFastResponse = isSpecial = 0; if (Index & 0x00000002L) { isAif = 1; if (Index == 0xFFFFFFFEL) isSpecial = 1; Index &= ~0x00000002L; } else { if (Index & 0x00000001L) isFastResponse = 1; Index >>= 2; } if (!isSpecial) { if (unlikely(aac_intr_normal(dev, Index, isAif, isFastResponse, NULL))) { rx_writel(dev, MUnit.OutboundQueue, Index); rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); } } Index = rx_readl(dev, MUnit.OutboundQueue); } while (Index != 0xFFFFFFFFL); return IRQ_HANDLED; } return IRQ_NONE; } /** * aac_rx_disable_interrupt - Disable interrupts * @dev: Adapter */ static void aac_rx_disable_interrupt(struct aac_dev *dev) { rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); } /** * aac_rx_enable_interrupt_producer - Enable interrupts * @dev: Adapter */ static void aac_rx_enable_interrupt_producer(struct aac_dev *dev) { rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); } /** * aac_rx_enable_interrupt_message - Enable interrupts * @dev: Adapter */ static void aac_rx_enable_interrupt_message(struct aac_dev *dev) { rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); } /** * rx_sync_cmd - send a command and wait * @dev: Adapter * @command: Command to execute * @p1: first parameter * @p2: second parameter * @p3: third parameter * @p4: forth parameter * @p5: fifth parameter * @p6: sixth parameter * @status: adapter status * @r1: first return value * @r2: second return value * @r3: third return value * @r4: forth return value * * This routine will send a synchronous command to the adapter and wait * for its completion. */ static int rx_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) { unsigned long start; int ok; /* * Write the command into Mailbox 0 */ writel(command, &dev->IndexRegs->Mailbox[0]); /* * Write the parameters into Mailboxes 1 - 6 */ writel(p1, &dev->IndexRegs->Mailbox[1]); writel(p2, &dev->IndexRegs->Mailbox[2]); writel(p3, &dev->IndexRegs->Mailbox[3]); writel(p4, &dev->IndexRegs->Mailbox[4]); /* * Clear the synch command doorbell to start on a clean slate. */ rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); /* * Disable doorbell interrupts */ rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); /* * Force the completion of the mask register write before issuing * the interrupt. */ rx_readb (dev, MUnit.OIMR); /* * Signal that there is a new synch command */ rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0); ok = 0; start = jiffies; /* * Wait up to 30 seconds */ while (time_before(jiffies, start+30*HZ)) { udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ /* * Mon960 will set doorbell0 bit when it has completed the command. */ if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) { /* * Clear the doorbell. */ rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); ok = 1; break; } /* * Yield the processor in case we are slow */ msleep(1); } if (unlikely(ok != 1)) { /* * Restore interrupt mask even though we timed out */ aac_adapter_enable_int(dev); return -ETIMEDOUT; } /* * Pull the synch status from Mailbox 0. */ if (status) *status = readl(&dev->IndexRegs->Mailbox[0]); if (r1) *r1 = readl(&dev->IndexRegs->Mailbox[1]); if (r2) *r2 = readl(&dev->IndexRegs->Mailbox[2]); if (r3) *r3 = readl(&dev->IndexRegs->Mailbox[3]); if (r4) *r4 = readl(&dev->IndexRegs->Mailbox[4]); /* * Clear the synch command doorbell. */ rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); /* * Restore interrupt mask */ aac_adapter_enable_int(dev); return 0; } /** * aac_rx_interrupt_adapter - interrupt adapter * @dev: Adapter * * Send an interrupt to the i960 and breakpoint it. */ static void aac_rx_interrupt_adapter(struct aac_dev *dev) { rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } /** * aac_rx_notify_adapter - send an event to the adapter * @dev: Adapter * @event: Event to send * * Notify the i960 that something it probably cares about has * happened. */ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event) { switch (event) { case AdapNormCmdQue: rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1); break; case HostNormRespNotFull: rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4); break; case AdapNormRespQue: rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2); break; case HostNormCmdNotFull: rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3); break; case HostShutdown: break; case FastIo: rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6); break; case AdapPrintfDone: rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5); break; default: BUG(); break; } } /** * aac_rx_start_adapter - activate adapter * @dev: Adapter * * Start up processing on an i960 based AAC adapter */ static void aac_rx_start_adapter(struct aac_dev *dev) { union aac_init *init; init = dev->init; init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds()); // We can only use a 32 bit address here rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } /** * aac_rx_check_health * @dev: device to check if healthy * * Will attempt to determine if the specified adapter is alive and * capable of handling requests, returning 0 if alive. */ static int aac_rx_check_health(struct aac_dev *dev) { u32 status = rx_readl(dev, MUnit.OMRx[0]); /* * Check to see if the board failed any self tests. */ if (unlikely(status & SELF_TEST_FAILED)) return -1; /* * Check to see if the board panic'd. */ if (unlikely(status & KERNEL_PANIC)) { char * buffer; struct POSTSTATUS { __le32 Post_Command; __le32 Post_Address; } * post; dma_addr_t paddr, baddr; int ret; if (likely((status & 0xFF000000L) == 0xBC000000L)) return (status >> 16) & 0xFF; buffer = dma_alloc_coherent(&dev->pdev->dev, 512, &baddr, GFP_KERNEL); ret = -2; if (unlikely(buffer == NULL)) return ret; post = dma_alloc_coherent(&dev->pdev->dev, sizeof(struct POSTSTATUS), &paddr, GFP_KERNEL); if (unlikely(post == NULL)) { dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr); return ret; } memset(buffer, 0, 512); post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS); post->Post_Address = cpu_to_le32(baddr); rx_writel(dev, MUnit.IMRx[0], paddr); rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); dma_free_coherent(&dev->pdev->dev, sizeof(struct POSTSTATUS), post, paddr); if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) { ret = (hex_to_bin(buffer[2]) << 4) + hex_to_bin(buffer[3]); } dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr); return ret; } /* * Wait for the adapter to be up and running. */ if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) return -3; /* * Everything is OK */ return 0; } /** * aac_rx_deliver_producer * @fib: fib to issue * * Will send a fib, returning 0 if successful. */ int aac_rx_deliver_producer(struct fib * fib) { struct aac_dev *dev = fib->dev; struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; u32 Index; unsigned long nointr = 0; aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr); atomic_inc(&q->numpending); *(q->headers.producer) = cpu_to_le32(Index + 1); if (!(nointr & aac_config.irq_mod)) aac_adapter_notify(dev, AdapNormCmdQueue); return 0; } /** * aac_rx_deliver_message * @fib: fib to issue * * Will send a fib, returning 0 if successful. */ static int aac_rx_deliver_message(struct fib * fib) { struct aac_dev *dev = fib->dev; struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; u32 Index; u64 addr; volatile void __iomem *device; unsigned long count = 10000000L; /* 50 seconds */ atomic_inc(&q->numpending); for(;;) { Index = rx_readl(dev, MUnit.InboundQueue); if (unlikely(Index == 0xFFFFFFFFL)) Index = rx_readl(dev, MUnit.InboundQueue); if (likely(Index != 0xFFFFFFFFL)) break; if (--count == 0) { atomic_dec(&q->numpending); return -ETIMEDOUT; } udelay(5); } device = dev->base + Index; addr = fib->hw_fib_pa; writel((u32)(addr & 0xffffffff), device); device += sizeof(u32); writel((u32)(addr >> 32), device); device += sizeof(u32); writel(le16_to_cpu(fib->hw_fib_va->header.Size), device); rx_writel(dev, MUnit.InboundQueue, Index); return 0; } /** * aac_rx_ioremap * @dev: adapter * @size: mapping resize request * */ static int aac_rx_ioremap(struct aac_dev * dev, u32 size) { if (!size) { iounmap(dev->regs.rx); return 0; } dev->base = dev->regs.rx = ioremap(dev->base_start, size); if (dev->base == NULL) return -1; dev->IndexRegs = &dev->regs.rx->IndexRegs; return 0; } static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) { u32 var = 0; if (!(dev->supplement_adapter_info.supported_options2 & AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) { if (bled) printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", dev->name, dev->id, bled); else { bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); if (!bled && (var != 0x00000001) && (var != 0x3803000F)) bled = -EINVAL; } if (bled && (bled != -ETIMEDOUT)) bled = aac_adapter_sync_cmd(dev, IOP_RESET, 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); if (bled && (bled != -ETIMEDOUT)) return -EINVAL; } if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */ rx_writel(dev, MUnit.reserved2, 3); msleep(5000); /* Delay 5 seconds */ var = 0x00000001; } if (bled && (var != 0x00000001)) return -EINVAL; ssleep(5); if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) return -ENODEV; if (startup_timeout < 300) startup_timeout = 300; return 0; } /** * aac_rx_select_comm - Select communications method * @dev: Adapter * @comm: communications method */ int aac_rx_select_comm(struct aac_dev *dev, int comm) { switch (comm) { case AAC_COMM_PRODUCER: dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer; dev->a_ops.adapter_intr = aac_rx_intr_producer; dev->a_ops.adapter_deliver = aac_rx_deliver_producer; break; case AAC_COMM_MESSAGE: dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message; dev->a_ops.adapter_intr = aac_rx_intr_message; dev->a_ops.adapter_deliver = aac_rx_deliver_message; break; default: return 1; } return 0; } /** * _aac_rx_init - initialize an i960 based AAC card * @dev: device to configure * * Allocate and set up resources for the i960 based AAC variants. The * device_interface in the commregion will be allocated and linked * to the comm region. */ int _aac_rx_init(struct aac_dev *dev) { unsigned long start; unsigned long status; int restart = 0; int instance = dev->id; const char * name = dev->name; if (aac_adapter_ioremap(dev, dev->base_size)) { printk(KERN_WARNING "%s: unable to map adapter.\n", name); goto error_iounmap; } /* Failure to reset here is an option ... */ dev->a_ops.adapter_sync_cmd = rx_sync_cmd; dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt; dev->OIMR = status = rx_readb (dev, MUnit.OIMR); if (((status & 0x0c) != 0x0c) || dev->init_reset) { dev->init_reset = false; if (!aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) { /* Make sure the Hardware FIFO is empty */ while ((++restart < 512) && (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL)); } } /* * Check to see if the board panic'd while booting. */ status = rx_readl(dev, MUnit.OMRx[0]); if (status & KERNEL_PANIC) { if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev), IOP_HWSOFT_RESET)) goto error_iounmap; ++restart; } /* * Check to see if the board failed any self tests. */ status = rx_readl(dev, MUnit.OMRx[0]); if (status & SELF_TEST_FAILED) { printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); goto error_iounmap; } /* * Check to see if the monitor panic'd while booting. */ if (status & MONITOR_PANIC) { printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); goto error_iounmap; } start = jiffies; /* * Wait for the adapter to be up and running. Wait up to 3 minutes */ while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING)) { if ((restart && (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || time_after(jiffies, start+HZ*startup_timeout)) { printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", dev->name, instance, status); goto error_iounmap; } if (!restart && ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || time_after(jiffies, start + HZ * ((startup_timeout > 60) ? (startup_timeout - 60) : (startup_timeout / 2))))) { if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev), IOP_HWSOFT_RESET))) start = jiffies; ++restart; } msleep(1); } if (restart && aac_commit) aac_commit = 1; /* * Fill in the common function dispatch table. */ dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; dev->a_ops.adapter_notify = aac_rx_notify_adapter; dev->a_ops.adapter_sync_cmd = rx_sync_cmd; dev->a_ops.adapter_check_health = aac_rx_check_health; dev->a_ops.adapter_restart = aac_rx_restart_adapter; dev->a_ops.adapter_start = aac_rx_start_adapter; /* * First clear out all interrupts. Then enable the one's that we * can handle. */ aac_adapter_comm(dev, AAC_COMM_PRODUCER); aac_adapter_disable_int(dev); rx_writel(dev, MUnit.ODR, 0xffffffff); aac_adapter_enable_int(dev); if (aac_init_adapter(dev) == NULL) goto error_iounmap; aac_adapter_comm(dev, dev->comm_interface); dev->sync_mode = 0; /* sync. mode not supported */ dev->msi = aac_msi && !pci_enable_msi(dev->pdev); if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, IRQF_SHARED, "aacraid", dev) < 0) { if (dev->msi) pci_disable_msi(dev->pdev); printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance); goto error_iounmap; } dev->dbg_base = dev->base_start; dev->dbg_base_mapped = dev->base; dev->dbg_size = dev->base_size; aac_adapter_enable_int(dev); /* * Tell the adapter that all is configured, and it can * start accepting requests */ aac_rx_start_adapter(dev); return 0; error_iounmap: return -1; } int aac_rx_init(struct aac_dev *dev) { /* * Fill in the function dispatch table. */ dev->a_ops.adapter_ioremap = aac_rx_ioremap; dev->a_ops.adapter_comm = aac_rx_select_comm; return _aac_rx_init(dev); }
linux-master
drivers/scsi/aacraid/rx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010-2015 PMC-Sierra, Inc. ([email protected]) * 2016-2017 Microsemi Corp. ([email protected]) * * Module Name: * commsup.c * * Abstract: Contain all routines that are required for FSA host/adapter * communication. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/crash_dump.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/bcd.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include "aacraid.h" /** * fib_map_alloc - allocate the fib objects * @dev: Adapter to allocate for * * Allocate and map the shared PCI space for the FIB blocks used to * talk to the Adaptec firmware. */ static int fib_map_alloc(struct aac_dev *dev) { if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE) dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; else dev->max_cmd_size = dev->max_fib_size; if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) { dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; } else { dev->max_cmd_size = dev->max_fib_size; } dprintk((KERN_INFO "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n", &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue, AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev, (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1), &dev->hw_fib_pa, GFP_KERNEL); if (dev->hw_fib_va == NULL) return -ENOMEM; return 0; } /** * aac_fib_map_free - free the fib objects * @dev: Adapter to free * * Free the PCI mappings and the memory allocated for FIB blocks * on this adapter. */ void aac_fib_map_free(struct aac_dev *dev) { size_t alloc_size; size_t fib_size; int num_fibs; if(!dev->hw_fib_va || !dev->max_cmd_size) return; num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB; fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr); alloc_size = fib_size * num_fibs + ALIGN32 - 1; dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va, dev->hw_fib_pa); dev->hw_fib_va = NULL; dev->hw_fib_pa = 0; } void aac_fib_vector_assign(struct aac_dev *dev) { u32 i = 0; u32 vector = 1; struct fib *fibptr = NULL; for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) { if ((dev->max_msix == 1) || (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1) - dev->vector_cap))) { fibptr->vector_no = 0; } else { fibptr->vector_no = vector; vector++; if (vector == dev->max_msix) vector = 1; } } } /** * aac_fib_setup - setup the fibs * @dev: Adapter to set up * * Allocate the PCI space for the fibs, map it and then initialise the * fib area, the unmapped fib data and also the free list */ int aac_fib_setup(struct aac_dev * dev) { struct fib *fibptr; struct hw_fib *hw_fib; dma_addr_t hw_fib_pa; int i; u32 max_cmds; while (((i = fib_map_alloc(dev)) == -ENOMEM) && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) { max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1; dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB; if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) dev->init->r7.max_io_commands = cpu_to_le32(max_cmds); } if (i<0) return -ENOMEM; memset(dev->hw_fib_va, 0, (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); /* 32 byte alignment for PMC */ hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); hw_fib = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + (hw_fib_pa - dev->hw_fib_pa)); /* add Xport header */ hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + sizeof(struct aac_fib_xporthdr)); hw_fib_pa += sizeof(struct aac_fib_xporthdr); /* * Initialise the fibs */ for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) { fibptr->flags = 0; fibptr->size = sizeof(struct fib); fibptr->dev = dev; fibptr->hw_fib_va = hw_fib; fibptr->data = (void *) fibptr->hw_fib_va->data; fibptr->next = fibptr+1; /* Forward chain the fibs */ init_completion(&fibptr->event_wait); spin_lock_init(&fibptr->event_lock); hw_fib->header.XferState = cpu_to_le32(0xffffffff); hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */ fibptr->hw_fib_pa = hw_fib_pa; fibptr->hw_sgl_pa = hw_fib_pa + offsetof(struct aac_hba_cmd_req, sge[2]); /* * one element is for the ptr to the separate sg list, * second element for 32 byte alignment */ fibptr->hw_error_pa = hw_fib_pa + offsetof(struct aac_native_hba, resp.resp_bytes[0]); hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)); hw_fib_pa = hw_fib_pa + dev->max_cmd_size + sizeof(struct aac_fib_xporthdr); } /* *Assign vector numbers to fibs */ aac_fib_vector_assign(dev); /* * Add the fib chain to the free list */ dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL; /* * Set 8 fibs aside for management tools */ dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue]; return 0; } /** * aac_fib_alloc_tag-allocate a fib using tags * @dev: Adapter to allocate the fib for * @scmd: SCSI command * * Allocate a fib from the adapter fib pool using tags * from the blk layer. */ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd) { struct fib *fibptr; u32 blk_tag; int i; blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); i = blk_mq_unique_tag_to_tag(blk_tag); fibptr = &dev->fibs[i]; /* * Null out fields that depend on being zero at the start of * each I/O */ fibptr->hw_fib_va->header.XferState = 0; fibptr->type = FSAFS_NTC_FIB_CONTEXT; fibptr->callback_data = NULL; fibptr->callback = NULL; fibptr->flags = 0; return fibptr; } /** * aac_fib_alloc - allocate a fib * @dev: Adapter to allocate the fib for * * Allocate a fib from the adapter fib pool. If the pool is empty we * return NULL. */ struct fib *aac_fib_alloc(struct aac_dev *dev) { struct fib * fibptr; unsigned long flags; spin_lock_irqsave(&dev->fib_lock, flags); fibptr = dev->free_fib; if(!fibptr){ spin_unlock_irqrestore(&dev->fib_lock, flags); return fibptr; } dev->free_fib = fibptr->next; spin_unlock_irqrestore(&dev->fib_lock, flags); /* * Set the proper node type code and node byte size */ fibptr->type = FSAFS_NTC_FIB_CONTEXT; fibptr->size = sizeof(struct fib); /* * Null out fields that depend on being zero at the start of * each I/O */ fibptr->hw_fib_va->header.XferState = 0; fibptr->flags = 0; fibptr->callback = NULL; fibptr->callback_data = NULL; return fibptr; } /** * aac_fib_free - free a fib * @fibptr: fib to free up * * Frees up a fib and places it on the appropriate queue */ void aac_fib_free(struct fib *fibptr) { unsigned long flags; if (fibptr->done == 2) return; spin_lock_irqsave(&fibptr->dev->fib_lock, flags); if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) aac_config.fib_timeouts++; if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) && fibptr->hw_fib_va->header.XferState != 0) { printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", (void*)fibptr, le32_to_cpu(fibptr->hw_fib_va->header.XferState)); } fibptr->next = fibptr->dev->free_fib; fibptr->dev->free_fib = fibptr; spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags); } /** * aac_fib_init - initialise a fib * @fibptr: The fib to initialize * * Set up the generic fib fields ready for use */ void aac_fib_init(struct fib *fibptr) { struct hw_fib *hw_fib = fibptr->hw_fib_va; memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr)); hw_fib->header.StructType = FIB_MAGIC; hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); } /** * fib_dealloc - deallocate a fib * @fibptr: fib to deallocate * * Will deallocate and return to the free pool the FIB pointed to by the * caller. */ static void fib_dealloc(struct fib * fibptr) { struct hw_fib *hw_fib = fibptr->hw_fib_va; hw_fib->header.XferState = 0; } /* * Commuication primitives define and support the queuing method we use to * support host to adapter commuication. All queue accesses happen through * these routines and are the only routines which have a knowledge of the * how these queues are implemented. */ /** * aac_get_entry - get a queue entry * @dev: Adapter * @qid: Queue Number * @entry: Entry return * @index: Index return * @nonotify: notification control * * With a priority the routine returns a queue entry if the queue has free entries. If the queue * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is * returned. */ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) { struct aac_queue * q; unsigned long idx; /* * All of the queues wrap when they reach the end, so we check * to see if they have reached the end and if they have we just * set the index back to zero. This is a wrap. You could or off * the high bits in all updates but this is a bit faster I think. */ q = &dev->queues->queue[qid]; idx = *index = le32_to_cpu(*(q->headers.producer)); /* Interrupt Moderation, only interrupt for first two entries */ if (idx != le32_to_cpu(*(q->headers.consumer))) { if (--idx == 0) { if (qid == AdapNormCmdQueue) idx = ADAP_NORM_CMD_ENTRIES; else idx = ADAP_NORM_RESP_ENTRIES; } if (idx != le32_to_cpu(*(q->headers.consumer))) *nonotify = 1; } if (qid == AdapNormCmdQueue) { if (*index >= ADAP_NORM_CMD_ENTRIES) *index = 0; /* Wrap to front of the Producer Queue. */ } else { if (*index >= ADAP_NORM_RESP_ENTRIES) *index = 0; /* Wrap to front of the Producer Queue. */ } /* Queue is full */ if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { printk(KERN_WARNING "Queue %d full, %u outstanding.\n", qid, atomic_read(&q->numpending)); return 0; } else { *entry = q->base + *index; return 1; } } /** * aac_queue_get - get the next free QE * @dev: Adapter * @index: Returned index * @qid: Queue number * @hw_fib: Fib to associate with the queue entry * @wait: Wait if queue full * @fibptr: Driver fib object to go with fib * @nonotify: Don't notify the adapter * * Gets the next free QE off the requested priorty adapter command * queue and associates the Fib with the QE. The QE represented by * index is ready to insert on the queue when this routine returns * success. */ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) { struct aac_entry * entry = NULL; int map = 0; if (qid == AdapNormCmdQueue) { /* if no entries wait for some if caller wants to */ while (!aac_get_entry(dev, qid, &entry, index, nonotify)) { printk(KERN_ERR "GetEntries failed\n"); } /* * Setup queue entry with a command, status and fib mapped */ entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); map = 1; } else { while (!aac_get_entry(dev, qid, &entry, index, nonotify)) { /* if no entries wait for some if caller wants to */ } /* * Setup queue entry with command, status and fib mapped */ entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); entry->addr = hw_fib->header.SenderFibAddress; /* Restore adapters pointer to the FIB */ hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ map = 0; } /* * If MapFib is true than we need to map the Fib and put pointers * in the queue entry. */ if (map) entry->addr = cpu_to_le32(fibptr->hw_fib_pa); return 0; } /* * Define the highest level of host to adapter communication routines. * These routines will support host to adapter FS commuication. These * routines have no knowledge of the commuication method used. This level * sends and receives FIBs. This level has no knowledge of how these FIBs * get passed back and forth. */ /** * aac_fib_send - send a fib to the adapter * @command: Command to send * @fibptr: The fib * @size: Size of fib data area * @priority: Priority of Fib * @wait: Async/sync select * @reply: True if a reply is wanted * @callback: Called with reply * @callback_data: Passed to callback * * Sends the requested FIB to the adapter and optionally will wait for a * response FIB. If the caller does not wish to wait for a response than * an event to wait on must be supplied. This event will be set when a * response FIB is received from the adapter. */ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *callback_data) { struct aac_dev * dev = fibptr->dev; struct hw_fib * hw_fib = fibptr->hw_fib_va; unsigned long flags = 0; unsigned long mflags = 0; unsigned long sflags = 0; if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) return -EBUSY; if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)) return -EINVAL; /* * There are 5 cases with the wait and response requested flags. * The only invalid cases are if the caller requests to wait and * does not request a response and if the caller does not want a * response and the Fib is not allocated from pool. If a response * is not requested the Fib will just be deallocaed by the DPC * routine when the response comes back from the adapter. No * further processing will be done besides deleting the Fib. We * will have a debug mode where the adapter can notify the host * it had a problem and the host can log that fact. */ fibptr->flags = 0; if (wait && !reply) { return -EINVAL; } else if (!wait && reply) { hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected); FIB_COUNTER_INCREMENT(aac_config.AsyncSent); } else if (!wait && !reply) { hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected); FIB_COUNTER_INCREMENT(aac_config.NoResponseSent); } else if (wait && reply) { hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); FIB_COUNTER_INCREMENT(aac_config.NormalSent); } /* * Map the fib into 32bits by using the fib number */ hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); /* use the same shifted value for handle to be compatible * with the new native hba command handle */ hw_fib->header.Handle = cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); /* * Set FIB state to indicate where it came from and if we want a * response from the adapter. Also load the command from the * caller. * * Map the hw fib pointer as a 32bit value */ hw_fib->header.Command = cpu_to_le16(command); hw_fib->header.XferState |= cpu_to_le32(SentFromHost); /* * Set the size of the Fib we want to send to the adapter */ hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { return -EMSGSIZE; } /* * Get a queue entry connect the FIB to it and send an notify * the adapter a command is ready. */ hw_fib->header.XferState |= cpu_to_le32(NormalPriority); /* * Fill in the Callback and CallbackContext if we are not * going to wait. */ if (!wait) { fibptr->callback = callback; fibptr->callback_data = callback_data; fibptr->flags = FIB_CONTEXT_FLAG; } fibptr->done = 0; FIB_COUNTER_INCREMENT(aac_config.FibsSent); dprintk((KERN_DEBUG "Fib contents:.\n")); dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command))); dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command))); dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState))); dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va)); dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); if (!dev->queues) return -EBUSY; if (wait) { spin_lock_irqsave(&dev->manage_lock, mflags); if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { printk(KERN_INFO "No management Fibs Available:%d\n", dev->management_fib_count); spin_unlock_irqrestore(&dev->manage_lock, mflags); return -EBUSY; } dev->management_fib_count++; spin_unlock_irqrestore(&dev->manage_lock, mflags); spin_lock_irqsave(&fibptr->event_lock, flags); } if (dev->sync_mode) { if (wait) spin_unlock_irqrestore(&fibptr->event_lock, flags); spin_lock_irqsave(&dev->sync_lock, sflags); if (dev->sync_fib) { list_add_tail(&fibptr->fiblink, &dev->sync_fib_list); spin_unlock_irqrestore(&dev->sync_lock, sflags); } else { dev->sync_fib = fibptr; spin_unlock_irqrestore(&dev->sync_lock, sflags); aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } if (wait) { fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; if (wait_for_completion_interruptible(&fibptr->event_wait)) { fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT; return -EFAULT; } return 0; } return -EINPROGRESS; } if (aac_adapter_deliver(fibptr) != 0) { printk(KERN_ERR "aac_fib_send: returned -EBUSY\n"); if (wait) { spin_unlock_irqrestore(&fibptr->event_lock, flags); spin_lock_irqsave(&dev->manage_lock, mflags); dev->management_fib_count--; spin_unlock_irqrestore(&dev->manage_lock, mflags); } return -EBUSY; } /* * If the caller wanted us to wait for response wait now. */ if (wait) { spin_unlock_irqrestore(&fibptr->event_lock, flags); /* Only set for first known interruptable command */ if (wait < 0) { /* * *VERY* Dangerous to time out a command, the * assumption is made that we have no hope of * functioning because an interrupt routing or other * hardware failure has occurred. */ unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */ while (!try_wait_for_completion(&fibptr->event_wait)) { int blink; if (time_is_before_eq_jiffies(timeout)) { struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; atomic_dec(&q->numpending); if (wait == -1) { printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" "Usually a result of a PCI interrupt routing problem;\n" "update mother board BIOS or consider utilizing one of\n" "the SAFE mode kernel options (acpi, apic etc)\n"); } return -ETIMEDOUT; } if (unlikely(aac_pci_offline(dev))) return -EFAULT; if ((blink = aac_adapter_check_health(dev)) > 0) { if (wait == -1) { printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n" "Usually a result of a serious unrecoverable hardware problem\n", blink); } return -EFAULT; } /* * Allow other processes / CPUS to use core */ schedule(); } } else if (wait_for_completion_interruptible(&fibptr->event_wait)) { /* Do nothing ... satisfy * wait_for_completion_interruptible must_check */ } spin_lock_irqsave(&fibptr->event_lock, flags); if (fibptr->done == 0) { fibptr->done = 2; /* Tell interrupt we aborted */ spin_unlock_irqrestore(&fibptr->event_lock, flags); return -ERESTARTSYS; } spin_unlock_irqrestore(&fibptr->event_lock, flags); BUG_ON(fibptr->done == 0); if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) return -ETIMEDOUT; return 0; } /* * If the user does not want a response than return success otherwise * return pending */ if (reply) return -EINPROGRESS; else return 0; } int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, void *callback_data) { struct aac_dev *dev = fibptr->dev; int wait; unsigned long flags = 0; unsigned long mflags = 0; struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *) fibptr->hw_fib_va; fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA); if (callback) { wait = 0; fibptr->callback = callback; fibptr->callback_data = callback_data; } else wait = 1; hbacmd->iu_type = command; if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { /* bit1 of request_id must be 0 */ hbacmd->request_id = cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD; } else return -EINVAL; if (wait) { spin_lock_irqsave(&dev->manage_lock, mflags); if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { spin_unlock_irqrestore(&dev->manage_lock, mflags); return -EBUSY; } dev->management_fib_count++; spin_unlock_irqrestore(&dev->manage_lock, mflags); spin_lock_irqsave(&fibptr->event_lock, flags); } if (aac_adapter_deliver(fibptr) != 0) { if (wait) { spin_unlock_irqrestore(&fibptr->event_lock, flags); spin_lock_irqsave(&dev->manage_lock, mflags); dev->management_fib_count--; spin_unlock_irqrestore(&dev->manage_lock, mflags); } return -EBUSY; } FIB_COUNTER_INCREMENT(aac_config.NativeSent); if (wait) { spin_unlock_irqrestore(&fibptr->event_lock, flags); if (unlikely(aac_pci_offline(dev))) return -EFAULT; fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; if (wait_for_completion_interruptible(&fibptr->event_wait)) fibptr->done = 2; fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT); spin_lock_irqsave(&fibptr->event_lock, flags); if ((fibptr->done == 0) || (fibptr->done == 2)) { fibptr->done = 2; /* Tell interrupt we aborted */ spin_unlock_irqrestore(&fibptr->event_lock, flags); return -ERESTARTSYS; } spin_unlock_irqrestore(&fibptr->event_lock, flags); WARN_ON(fibptr->done == 0); if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) return -ETIMEDOUT; return 0; } return -EINPROGRESS; } /** * aac_consumer_get - get the top of the queue * @dev: Adapter * @q: Queue * @entry: Return entry * * Will return a pointer to the entry on the top of the queue requested that * we are a consumer of, and return the address of the queue entry. It does * not change the state of the queue. */ int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) { u32 index; int status; if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) { status = 0; } else { /* * The consumer index must be wrapped if we have reached * the end of the queue, else we just use the entry * pointed to by the header index */ if (le32_to_cpu(*q->headers.consumer) >= q->entries) index = 0; else index = le32_to_cpu(*q->headers.consumer); *entry = q->base + index; status = 1; } return(status); } /** * aac_consumer_free - free consumer entry * @dev: Adapter * @q: Queue * @qid: Queue ident * * Frees up the current top of the queue we are a consumer of. If the * queue was full notify the producer that the queue is no longer full. */ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) { int wasfull = 0; u32 notify; if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) wasfull = 1; if (le32_to_cpu(*q->headers.consumer) >= q->entries) *q->headers.consumer = cpu_to_le32(1); else le32_add_cpu(q->headers.consumer, 1); if (wasfull) { switch (qid) { case HostNormCmdQueue: notify = HostNormCmdNotFull; break; case HostNormRespQueue: notify = HostNormRespNotFull; break; default: BUG(); return; } aac_adapter_notify(dev, notify); } } /** * aac_fib_adapter_complete - complete adapter issued fib * @fibptr: fib to complete * @size: size of fib * * Will do all necessary work to complete a FIB that was sent from * the adapter. */ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) { struct hw_fib * hw_fib = fibptr->hw_fib_va; struct aac_dev * dev = fibptr->dev; struct aac_queue * q; unsigned long nointr = 0; unsigned long qflags; if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { kfree(hw_fib); return 0; } if (hw_fib->header.XferState == 0) { if (dev->comm_interface == AAC_COMM_MESSAGE) kfree(hw_fib); return 0; } /* * If we plan to do anything check the structure type first. */ if (hw_fib->header.StructType != FIB_MAGIC && hw_fib->header.StructType != FIB_MAGIC2 && hw_fib->header.StructType != FIB_MAGIC2_64) { if (dev->comm_interface == AAC_COMM_MESSAGE) kfree(hw_fib); return -EINVAL; } /* * This block handles the case where the adapter had sent us a * command and we have finished processing the command. We * call completeFib when we are done processing the command * and want to send a response back to the adapter. This will * send the completed cdb to the adapter. */ if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { if (dev->comm_interface == AAC_COMM_MESSAGE) { kfree (hw_fib); } else { u32 index; hw_fib->header.XferState |= cpu_to_le32(HostProcessed); if (size) { size += sizeof(struct aac_fibhdr); if (size > le16_to_cpu(hw_fib->header.SenderSize)) return -EMSGSIZE; hw_fib->header.Size = cpu_to_le16(size); } q = &dev->queues->queue[AdapNormRespQueue]; spin_lock_irqsave(q->lock, qflags); aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); *(q->headers.producer) = cpu_to_le32(index + 1); spin_unlock_irqrestore(q->lock, qflags); if (!(nointr & (int)aac_config.irq_mod)) aac_adapter_notify(dev, AdapNormRespQueue); } } else { printk(KERN_WARNING "aac_fib_adapter_complete: " "Unknown xferstate detected.\n"); BUG(); } return 0; } /** * aac_fib_complete - fib completion handler * @fibptr: FIB to complete * * Will do all necessary work to complete a FIB. */ int aac_fib_complete(struct fib *fibptr) { struct hw_fib * hw_fib = fibptr->hw_fib_va; if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) { fib_dealloc(fibptr); return 0; } /* * Check for a fib which has already been completed or with a * status wait timeout */ if (hw_fib->header.XferState == 0 || fibptr->done == 2) return 0; /* * If we plan to do anything check the structure type first. */ if (hw_fib->header.StructType != FIB_MAGIC && hw_fib->header.StructType != FIB_MAGIC2 && hw_fib->header.StructType != FIB_MAGIC2_64) return -EINVAL; /* * This block completes a cdb which orginated on the host and we * just need to deallocate the cdb or reinit it. At this point the * command is complete that we had sent to the adapter and this * cdb could be reused. */ if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) { fib_dealloc(fibptr); } else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost)) { /* * This handles the case when the host has aborted the I/O * to the adapter because the adapter is not responding */ fib_dealloc(fibptr); } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) { fib_dealloc(fibptr); } else { BUG(); } return 0; } /** * aac_printf - handle printf from firmware * @dev: Adapter * @val: Message info * * Print a message passed to us by the controller firmware on the * Adaptec board */ void aac_printf(struct aac_dev *dev, u32 val) { char *cp = dev->printfbuf; if (dev->printf_enabled) { int length = val & 0xffff; int level = (val >> 16) & 0xffff; /* * The size of the printfbuf is set in port.c * There is no variable or define for it */ if (length > 255) length = 255; if (cp[length] != 0) cp[length] = 0; if (level == LOG_AAC_HIGH_ERROR) printk(KERN_WARNING "%s:%s", dev->name, cp); else printk(KERN_INFO "%s:%s", dev->name, cp); } memset(cp, 0, 256); } static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index) { return le32_to_cpu(((__le32 *)aifcmd->data)[index]); } static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd) { switch (aac_aif_data(aifcmd, 1)) { case AifBuCacheDataLoss: if (aac_aif_data(aifcmd, 2)) dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n", aac_aif_data(aifcmd, 2)); else dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n"); break; case AifBuCacheDataRecover: if (aac_aif_data(aifcmd, 2)) dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n", aac_aif_data(aifcmd, 2)); else dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n"); break; } } #define AIF_SNIFF_TIMEOUT (500*HZ) /** * aac_handle_aif - Handle a message from the firmware * @dev: Which adapter this fib is from * @fibptr: Pointer to fibptr from adapter * * This routine handles a driver notify fib from the adapter and * dispatches it to the appropriate routine for handling. */ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) { struct hw_fib * hw_fib = fibptr->hw_fib_va; struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; u32 channel, id, lun, container; struct scsi_device *device; enum { NOTHING, DELETE, ADD, CHANGE } device_config_needed = NOTHING; /* Sniff for container changes */ if (!dev || !dev->fsa_dev) return; container = channel = id = lun = (u32)-1; /* * We have set this up to try and minimize the number of * re-configures that take place. As a result of this when * certain AIF's come in we will set a flag waiting for another * type of AIF before setting the re-config flag. */ switch (le32_to_cpu(aifcmd->command)) { case AifCmdDriverNotify: switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) { case AifRawDeviceRemove: container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); if ((container >> 28)) { container = (u32)-1; break; } channel = (container >> 24) & 0xF; if (channel >= dev->maximum_num_channels) { container = (u32)-1; break; } id = container & 0xFFFF; if (id >= dev->maximum_num_physicals) { container = (u32)-1; break; } lun = (container >> 16) & 0xFF; container = (u32)-1; channel = aac_phys_to_logical(channel); device_config_needed = DELETE; break; /* * Morph or Expand complete */ case AifDenMorphComplete: case AifDenVolumeExtendComplete: container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); if (container >= dev->maximum_num_containers) break; /* * Find the scsi_device associated with the SCSI * address. Make sure we have the right array, and if * so set the flag to initiate a new re-config once we * see an AifEnConfigChange AIF come through. */ if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { device = scsi_device_lookup(dev->scsi_host_ptr, CONTAINER_TO_CHANNEL(container), CONTAINER_TO_ID(container), CONTAINER_TO_LUN(container)); if (device) { dev->fsa_dev[container].config_needed = CHANGE; dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; dev->fsa_dev[container].config_waiting_stamp = jiffies; scsi_device_put(device); } } } /* * If we are waiting on something and this happens to be * that thing then set the re-configure flag. */ if (container != (u32)-1) { if (container >= dev->maximum_num_containers) break; if ((dev->fsa_dev[container].config_waiting_on == le32_to_cpu(*(__le32 *)aifcmd->data)) && time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) dev->fsa_dev[container].config_waiting_on = 0; } else for (container = 0; container < dev->maximum_num_containers; ++container) { if ((dev->fsa_dev[container].config_waiting_on == le32_to_cpu(*(__le32 *)aifcmd->data)) && time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) dev->fsa_dev[container].config_waiting_on = 0; } break; case AifCmdEventNotify: switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) { case AifEnBatteryEvent: dev->cache_protected = (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3)); break; /* * Add an Array. */ case AifEnAddContainer: container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); if (container >= dev->maximum_num_containers) break; dev->fsa_dev[container].config_needed = ADD; dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; dev->fsa_dev[container].config_waiting_stamp = jiffies; break; /* * Delete an Array. */ case AifEnDeleteContainer: container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); if (container >= dev->maximum_num_containers) break; dev->fsa_dev[container].config_needed = DELETE; dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; dev->fsa_dev[container].config_waiting_stamp = jiffies; break; /* * Container change detected. If we currently are not * waiting on something else, setup to wait on a Config Change. */ case AifEnContainerChange: container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); if (container >= dev->maximum_num_containers) break; if (dev->fsa_dev[container].config_waiting_on && time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) break; dev->fsa_dev[container].config_needed = CHANGE; dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; dev->fsa_dev[container].config_waiting_stamp = jiffies; break; case AifEnConfigChange: break; case AifEnAddJBOD: case AifEnDeleteJBOD: container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); if ((container >> 28)) { container = (u32)-1; break; } channel = (container >> 24) & 0xF; if (channel >= dev->maximum_num_channels) { container = (u32)-1; break; } id = container & 0xFFFF; if (id >= dev->maximum_num_physicals) { container = (u32)-1; break; } lun = (container >> 16) & 0xFF; container = (u32)-1; channel = aac_phys_to_logical(channel); device_config_needed = (((__le32 *)aifcmd->data)[0] == cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE; if (device_config_needed == ADD) { device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun); if (device) { scsi_remove_device(device); scsi_device_put(device); } } break; case AifEnEnclosureManagement: /* * If in JBOD mode, automatic exposure of new * physical target to be suppressed until configured. */ if (dev->jbod) break; switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) { case EM_DRIVE_INSERTION: case EM_DRIVE_REMOVAL: case EM_SES_DRIVE_INSERTION: case EM_SES_DRIVE_REMOVAL: container = le32_to_cpu( ((__le32 *)aifcmd->data)[2]); if ((container >> 28)) { container = (u32)-1; break; } channel = (container >> 24) & 0xF; if (channel >= dev->maximum_num_channels) { container = (u32)-1; break; } id = container & 0xFFFF; lun = (container >> 16) & 0xFF; container = (u32)-1; if (id >= dev->maximum_num_physicals) { /* legacy dev_t ? */ if ((0x2000 <= id) || lun || channel || ((channel = (id >> 7) & 0x3F) >= dev->maximum_num_channels)) break; lun = (id >> 4) & 7; id &= 0xF; } channel = aac_phys_to_logical(channel); device_config_needed = ((((__le32 *)aifcmd->data)[3] == cpu_to_le32(EM_DRIVE_INSERTION)) || (((__le32 *)aifcmd->data)[3] == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ? ADD : DELETE; break; } break; case AifBuManagerEvent: aac_handle_aif_bu(dev, aifcmd); break; } /* * If we are waiting on something and this happens to be * that thing then set the re-configure flag. */ if (container != (u32)-1) { if (container >= dev->maximum_num_containers) break; if ((dev->fsa_dev[container].config_waiting_on == le32_to_cpu(*(__le32 *)aifcmd->data)) && time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) dev->fsa_dev[container].config_waiting_on = 0; } else for (container = 0; container < dev->maximum_num_containers; ++container) { if ((dev->fsa_dev[container].config_waiting_on == le32_to_cpu(*(__le32 *)aifcmd->data)) && time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) dev->fsa_dev[container].config_waiting_on = 0; } break; case AifCmdJobProgress: /* * These are job progress AIF's. When a Clear is being * done on a container it is initially created then hidden from * the OS. When the clear completes we don't get a config * change so we monitor the job status complete on a clear then * wait for a container change. */ if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) && (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] || ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) { for (container = 0; container < dev->maximum_num_containers; ++container) { /* * Stomp on all config sequencing for all * containers? */ dev->fsa_dev[container].config_waiting_on = AifEnContainerChange; dev->fsa_dev[container].config_needed = ADD; dev->fsa_dev[container].config_waiting_stamp = jiffies; } } if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) && ((__le32 *)aifcmd->data)[6] == 0 && ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) { for (container = 0; container < dev->maximum_num_containers; ++container) { /* * Stomp on all config sequencing for all * containers? */ dev->fsa_dev[container].config_waiting_on = AifEnContainerChange; dev->fsa_dev[container].config_needed = DELETE; dev->fsa_dev[container].config_waiting_stamp = jiffies; } } break; } container = 0; retry_next: if (device_config_needed == NOTHING) { for (; container < dev->maximum_num_containers; ++container) { if ((dev->fsa_dev[container].config_waiting_on == 0) && (dev->fsa_dev[container].config_needed != NOTHING) && time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) { device_config_needed = dev->fsa_dev[container].config_needed; dev->fsa_dev[container].config_needed = NOTHING; channel = CONTAINER_TO_CHANNEL(container); id = CONTAINER_TO_ID(container); lun = CONTAINER_TO_LUN(container); break; } } } if (device_config_needed == NOTHING) return; /* * If we decided that a re-configuration needs to be done, * schedule it here on the way out the door, please close the door * behind you. */ /* * Find the scsi_device associated with the SCSI address, * and mark it as changed, invalidating the cache. This deals * with changes to existing device IDs. */ if (!dev || !dev->scsi_host_ptr) return; /* * force reload of disk info via aac_probe_container */ if ((channel == CONTAINER_CHANNEL) && (device_config_needed != NOTHING)) { if (dev->fsa_dev[container].valid == 1) dev->fsa_dev[container].valid = 2; aac_probe_container(dev, container); } device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun); if (device) { switch (device_config_needed) { case DELETE: #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE)) scsi_remove_device(device); #else if (scsi_device_online(device)) { scsi_device_set_state(device, SDEV_OFFLINE); sdev_printk(KERN_INFO, device, "Device offlined - %s\n", (channel == CONTAINER_CHANNEL) ? "array deleted" : "enclosure services event"); } #endif break; case ADD: if (!scsi_device_online(device)) { sdev_printk(KERN_INFO, device, "Device online - %s\n", (channel == CONTAINER_CHANNEL) ? "array created" : "enclosure services event"); scsi_device_set_state(device, SDEV_RUNNING); } fallthrough; case CHANGE: if ((channel == CONTAINER_CHANNEL) && (!dev->fsa_dev[container].valid)) { #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE)) scsi_remove_device(device); #else if (!scsi_device_online(device)) break; scsi_device_set_state(device, SDEV_OFFLINE); sdev_printk(KERN_INFO, device, "Device offlined - %s\n", "array failed"); #endif break; } scsi_rescan_device(device); break; default: break; } scsi_device_put(device); device_config_needed = NOTHING; } if (device_config_needed == ADD) scsi_add_device(dev->scsi_host_ptr, channel, id, lun); if (channel == CONTAINER_CHANNEL) { container++; device_config_needed = NOTHING; goto retry_next; } } static void aac_schedule_bus_scan(struct aac_dev *aac) { if (aac->sa_firmware) aac_schedule_safw_scan_worker(aac); else aac_schedule_src_reinit_aif_worker(aac); } static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) { int index, quirks; int retval; struct Scsi_Host *host = aac->scsi_host_ptr; int jafo = 0; int bled; u64 dmamask; int num_of_fibs = 0; /* * Assumptions: * - host is locked, unless called by the aacraid thread. * (a matter of convenience, due to legacy issues surrounding * eh_host_adapter_reset). * - in_reset is asserted, so no new i/o is getting to the * card. * - The card is dead, or will be very shortly ;-/ so no new * commands are completing in the interrupt service. */ aac_adapter_disable_int(aac); if (aac->thread && aac->thread->pid != current->pid) { spin_unlock_irq(host->host_lock); kthread_stop(aac->thread); aac->thread = NULL; jafo = 1; } /* * If a positive health, means in a known DEAD PANIC * state and the adapter could be reset to `try again'. */ bled = forced ? 0 : aac_adapter_check_health(aac); retval = aac_adapter_restart(aac, bled, reset_type); if (retval) goto out; /* * Loop through the fibs, close the synchronous FIBS */ retval = 1; num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB; for (index = 0; index < num_of_fibs; index++) { struct fib *fib = &aac->fibs[index]; __le32 XferState = fib->hw_fib_va->header.XferState; bool is_response_expected = false; if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) && (XferState & cpu_to_le32(ResponseExpected))) is_response_expected = true; if (is_response_expected || fib->flags & FIB_CONTEXT_FLAG_WAIT) { unsigned long flagv; spin_lock_irqsave(&fib->event_lock, flagv); complete(&fib->event_wait); spin_unlock_irqrestore(&fib->event_lock, flagv); schedule(); retval = 0; } } /* Give some extra time for ioctls to complete. */ if (retval == 0) ssleep(2); index = aac->cardtype; /* * Re-initialize the adapter, first free resources, then carefully * apply the initialization sequence to come back again. Only risk * is a change in Firmware dropping cache, it is assumed the caller * will ensure that i/o is queisced and the card is flushed in that * case. */ aac_free_irq(aac); aac_fib_map_free(aac); dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, aac->comm_phys); aac_adapter_ioremap(aac, 0); aac->comm_addr = NULL; aac->comm_phys = 0; kfree(aac->queues); aac->queues = NULL; kfree(aac->fsa_dev); aac->fsa_dev = NULL; dmamask = DMA_BIT_MASK(32); quirks = aac_get_driver_ident(index)->quirks; if (quirks & AAC_QUIRK_31BIT) retval = dma_set_mask(&aac->pdev->dev, dmamask); else if (!(quirks & AAC_QUIRK_SRC)) retval = dma_set_mask(&aac->pdev->dev, dmamask); else retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask); if (quirks & AAC_QUIRK_31BIT && !retval) { dmamask = DMA_BIT_MASK(31); retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask); } if (retval) goto out; if ((retval = (*(aac_get_driver_ident(index)->init))(aac))) goto out; if (jafo) { aac->thread = kthread_run(aac_command_thread, aac, "%s", aac->name); if (IS_ERR(aac->thread)) { retval = PTR_ERR(aac->thread); aac->thread = NULL; goto out; } } (void)aac_get_adapter_info(aac); if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) { host->sg_tablesize = 34; host->max_sectors = (host->sg_tablesize * 8) + 112; } if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) { host->sg_tablesize = 17; host->max_sectors = (host->sg_tablesize * 8) + 112; } aac_get_config_status(aac, 1); aac_get_containers(aac); /* * This is where the assumption that the Adapter is quiesced * is important. */ scsi_host_complete_all_commands(host, DID_RESET); retval = 0; out: aac->in_reset = 0; /* * Issue bus rescan to catch any configuration that might have * occurred */ if (!retval && !is_kdump_kernel()) { dev_info(&aac->pdev->dev, "Scheduling bus rescan\n"); aac_schedule_bus_scan(aac); } if (jafo) { spin_lock_irq(host->host_lock); } return retval; } int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) { unsigned long flagv = 0; int retval, unblock_retval; struct Scsi_Host *host = aac->scsi_host_ptr; int bled; if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) return -EBUSY; if (aac->in_reset) { spin_unlock_irqrestore(&aac->fib_lock, flagv); return -EBUSY; } aac->in_reset = 1; spin_unlock_irqrestore(&aac->fib_lock, flagv); /* * Wait for all commands to complete to this specific * target (block maximum 60 seconds). Although not necessary, * it does make us a good storage citizen. */ scsi_host_block(host); /* Quiesce build, flush cache, write through mode */ if (forced < 2) aac_send_shutdown(aac); spin_lock_irqsave(host->host_lock, flagv); bled = forced ? forced : (aac_check_reset != 0 && aac_check_reset != 1); retval = _aac_reset_adapter(aac, bled, reset_type); spin_unlock_irqrestore(host->host_lock, flagv); unblock_retval = scsi_host_unblock(host, SDEV_RUNNING); if (!retval) retval = unblock_retval; if ((forced < 2) && (retval == -ENODEV)) { /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */ struct fib * fibctx = aac_fib_alloc(aac); if (fibctx) { struct aac_pause *cmd; int status; aac_fib_init(fibctx); cmd = (struct aac_pause *) fib_data(fibctx); cmd->command = cpu_to_le32(VM_ContainerConfig); cmd->type = cpu_to_le32(CT_PAUSE_IO); cmd->timeout = cpu_to_le32(1); cmd->min = cpu_to_le32(1); cmd->noRescan = cpu_to_le32(1); cmd->count = cpu_to_le32(0); status = aac_fib_send(ContainerCommand, fibctx, sizeof(struct aac_pause), FsaNormal, -2 /* Timeout silently */, 1, NULL, NULL); if (status >= 0) aac_fib_complete(fibctx); /* FIB should be freed only after getting * the response from the F/W */ if (status != -ERESTARTSYS) aac_fib_free(fibctx); } } return retval; } int aac_check_health(struct aac_dev * aac) { int BlinkLED; unsigned long time_now, flagv = 0; struct list_head * entry; /* Extending the scope of fib_lock slightly to protect aac->in_reset */ if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) return 0; if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) { spin_unlock_irqrestore(&aac->fib_lock, flagv); return 0; /* OK */ } aac->in_reset = 1; /* Fake up an AIF: * aac_aifcmd.command = AifCmdEventNotify = 1 * aac_aifcmd.seqnum = 0xFFFFFFFF * aac_aifcmd.data[0] = AifEnExpEvent = 23 * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3 * aac.aifcmd.data[2] = AifHighPriority = 3 * aac.aifcmd.data[3] = BlinkLED */ time_now = jiffies/HZ; entry = aac->fib_list.next; /* * For each Context that is on the * fibctxList, make a copy of the * fib, and then set the event to wake up the * thread that is waiting for it. */ while (entry != &aac->fib_list) { /* * Extract the fibctx */ struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next); struct hw_fib * hw_fib; struct fib * fib; /* * Check if the queue is getting * backlogged */ if (fibctx->count > 20) { /* * It's *not* jiffies folks, * but jiffies / HZ, so do not * panic ... */ u32 time_last = fibctx->jiffies; /* * Has it been > 2 minutes * since the last read off * the queue? */ if ((time_now - time_last) > aif_timeout) { entry = entry->next; aac_close_fib_context(aac, fibctx); continue; } } /* * Warning: no sleep allowed while * holding spinlock */ hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC); fib = kzalloc(sizeof(struct fib), GFP_ATOMIC); if (fib && hw_fib) { struct aac_aifcmd * aif; fib->hw_fib_va = hw_fib; fib->dev = aac; aac_fib_init(fib); fib->type = FSAFS_NTC_FIB_CONTEXT; fib->size = sizeof (struct fib); fib->data = hw_fib->data; aif = (struct aac_aifcmd *)hw_fib->data; aif->command = cpu_to_le32(AifCmdEventNotify); aif->seqnum = cpu_to_le32(0xFFFFFFFF); ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent); ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic); ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority); ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED); /* * Put the FIB onto the * fibctx's fibs */ list_add_tail(&fib->fiblink, &fibctx->fib_list); fibctx->count++; /* * Set the event to wake up the * thread that will waiting. */ complete(&fibctx->completion); } else { printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); kfree(fib); kfree(hw_fib); } entry = entry->next; } spin_unlock_irqrestore(&aac->fib_lock, flagv); if (BlinkLED < 0) { printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n", aac->name, BlinkLED); goto out; } printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); out: aac->in_reset = 0; return BlinkLED; } static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target) { return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers; } static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev, int bus, int target) { if (bus != CONTAINER_CHANNEL) bus = aac_phys_to_logical(bus); return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0); } static int aac_add_safw_device(struct aac_dev *dev, int bus, int target) { if (bus != CONTAINER_CHANNEL) bus = aac_phys_to_logical(bus); return scsi_add_device(dev->scsi_host_ptr, bus, target, 0); } static void aac_put_safw_scsi_device(struct scsi_device *sdev) { if (sdev) scsi_device_put(sdev); } static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target) { struct scsi_device *sdev; sdev = aac_lookup_safw_scsi_device(dev, bus, target); scsi_remove_device(sdev); aac_put_safw_scsi_device(sdev); } static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev, int bus, int target) { return dev->hba_map[bus][target].scan_counter == dev->scan_counter; } static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target) { if (is_safw_raid_volume(dev, bus, target)) return dev->fsa_dev[target].valid; else return aac_is_safw_scan_count_equal(dev, bus, target); } static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target) { int is_exposed = 0; struct scsi_device *sdev; sdev = aac_lookup_safw_scsi_device(dev, bus, target); if (sdev) is_exposed = 1; aac_put_safw_scsi_device(sdev); return is_exposed; } static int aac_update_safw_host_devices(struct aac_dev *dev) { int i; int bus; int target; int is_exposed = 0; int rcode = 0; rcode = aac_setup_safw_adapter(dev); if (unlikely(rcode < 0)) { goto out; } for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) { bus = get_bus_number(i); target = get_target_number(i); is_exposed = aac_is_safw_device_exposed(dev, bus, target); if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed) aac_add_safw_device(dev, bus, target); else if (!aac_is_safw_target_valid(dev, bus, target) && is_exposed) aac_remove_safw_device(dev, bus, target); } out: return rcode; } static int aac_scan_safw_host(struct aac_dev *dev) { int rcode = 0; rcode = aac_update_safw_host_devices(dev); if (rcode) aac_schedule_safw_scan_worker(dev); return rcode; } int aac_scan_host(struct aac_dev *dev) { int rcode = 0; mutex_lock(&dev->scan_mutex); if (dev->sa_firmware) rcode = aac_scan_safw_host(dev); else scsi_scan_host(dev->scsi_host_ptr); mutex_unlock(&dev->scan_mutex); return rcode; } void aac_src_reinit_aif_worker(struct work_struct *work) { struct aac_dev *dev = container_of(to_delayed_work(work), struct aac_dev, src_reinit_aif_worker); wait_event(dev->scsi_host_ptr->host_wait, !scsi_host_in_recovery(dev->scsi_host_ptr)); aac_reinit_aif(dev, dev->cardtype); } /** * aac_handle_sa_aif - Handle a message from the firmware * @dev: Which adapter this fib is from * @fibptr: Pointer to fibptr from adapter * * This routine handles a driver notify fib from the adapter and * dispatches it to the appropriate routine for handling. */ static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr) { int i; u32 events = 0; if (fibptr->hbacmd_size & SA_AIF_HOTPLUG) events = SA_AIF_HOTPLUG; else if (fibptr->hbacmd_size & SA_AIF_HARDWARE) events = SA_AIF_HARDWARE; else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE) events = SA_AIF_PDEV_CHANGE; else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE) events = SA_AIF_LDEV_CHANGE; else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE) events = SA_AIF_BPSTAT_CHANGE; else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE) events = SA_AIF_BPCFG_CHANGE; switch (events) { case SA_AIF_HOTPLUG: case SA_AIF_HARDWARE: case SA_AIF_PDEV_CHANGE: case SA_AIF_LDEV_CHANGE: case SA_AIF_BPCFG_CHANGE: aac_scan_host(dev); break; case SA_AIF_BPSTAT_CHANGE: /* currently do nothing */ break; } for (i = 1; i <= 10; ++i) { events = src_readl(dev, MUnit.IDR); if (events & (1<<23)) { pr_warn(" AIF not cleared by firmware - %d/%d)\n", i, 10); ssleep(1); } } } static int get_fib_count(struct aac_dev *dev) { unsigned int num = 0; struct list_head *entry; unsigned long flagv; /* * Warning: no sleep allowed while * holding spinlock. We take the estimate * and pre-allocate a set of fibs outside the * lock. */ num = le32_to_cpu(dev->init->r7.adapter_fibs_size) / sizeof(struct hw_fib); /* some extra */ spin_lock_irqsave(&dev->fib_lock, flagv); entry = dev->fib_list.next; while (entry != &dev->fib_list) { entry = entry->next; ++num; } spin_unlock_irqrestore(&dev->fib_lock, flagv); return num; } static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool, struct fib **fib_pool, unsigned int num) { struct hw_fib **hw_fib_p; struct fib **fib_p; hw_fib_p = hw_fib_pool; fib_p = fib_pool; while (hw_fib_p < &hw_fib_pool[num]) { *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL); if (!(*(hw_fib_p++))) { --hw_fib_p; break; } *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL); if (!(*(fib_p++))) { kfree(*(--hw_fib_p)); break; } } /* * Get the actual number of allocated fibs */ num = hw_fib_p - hw_fib_pool; return num; } static void wakeup_fibctx_threads(struct aac_dev *dev, struct hw_fib **hw_fib_pool, struct fib **fib_pool, struct fib *fib, struct hw_fib *hw_fib, unsigned int num) { unsigned long flagv; struct list_head *entry; struct hw_fib **hw_fib_p; struct fib **fib_p; u32 time_now, time_last; struct hw_fib *hw_newfib; struct fib *newfib; struct aac_fib_context *fibctx; time_now = jiffies/HZ; spin_lock_irqsave(&dev->fib_lock, flagv); entry = dev->fib_list.next; /* * For each Context that is on the * fibctxList, make a copy of the * fib, and then set the event to wake up the * thread that is waiting for it. */ hw_fib_p = hw_fib_pool; fib_p = fib_pool; while (entry != &dev->fib_list) { /* * Extract the fibctx */ fibctx = list_entry(entry, struct aac_fib_context, next); /* * Check if the queue is getting * backlogged */ if (fibctx->count > 20) { /* * It's *not* jiffies folks, * but jiffies / HZ so do not * panic ... */ time_last = fibctx->jiffies; /* * Has it been > 2 minutes * since the last read off * the queue? */ if ((time_now - time_last) > aif_timeout) { entry = entry->next; aac_close_fib_context(dev, fibctx); continue; } } /* * Warning: no sleep allowed while * holding spinlock */ if (hw_fib_p >= &hw_fib_pool[num]) { pr_warn("aifd: didn't allocate NewFib\n"); entry = entry->next; continue; } hw_newfib = *hw_fib_p; *(hw_fib_p++) = NULL; newfib = *fib_p; *(fib_p++) = NULL; /* * Make the copy of the FIB */ memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); memcpy(newfib, fib, sizeof(struct fib)); newfib->hw_fib_va = hw_newfib; /* * Put the FIB onto the * fibctx's fibs */ list_add_tail(&newfib->fiblink, &fibctx->fib_list); fibctx->count++; /* * Set the event to wake up the * thread that is waiting. */ complete(&fibctx->completion); entry = entry->next; } /* * Set the status of this FIB */ *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); aac_fib_adapter_complete(fib, sizeof(u32)); spin_unlock_irqrestore(&dev->fib_lock, flagv); } static void aac_process_events(struct aac_dev *dev) { struct hw_fib *hw_fib; struct fib *fib; unsigned long flags; spinlock_t *t_lock; t_lock = dev->queues->queue[HostNormCmdQueue].lock; spin_lock_irqsave(t_lock, flags); while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { struct list_head *entry; struct aac_aifcmd *aifcmd; unsigned int num; struct hw_fib **hw_fib_pool, **hw_fib_p; struct fib **fib_pool, **fib_p; set_current_state(TASK_RUNNING); entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; list_del(entry); t_lock = dev->queues->queue[HostNormCmdQueue].lock; spin_unlock_irqrestore(t_lock, flags); fib = list_entry(entry, struct fib, fiblink); hw_fib = fib->hw_fib_va; if (dev->sa_firmware) { /* Thor AIF */ aac_handle_sa_aif(dev, fib); aac_fib_adapter_complete(fib, (u16)sizeof(u32)); goto free_fib; } /* * We will process the FIB here or pass it to a * worker thread that is TBD. We Really can't * do anything at this point since we don't have * anything defined for this thread to do. */ memset(fib, 0, sizeof(struct fib)); fib->type = FSAFS_NTC_FIB_CONTEXT; fib->size = sizeof(struct fib); fib->hw_fib_va = hw_fib; fib->data = hw_fib->data; fib->dev = dev; /* * We only handle AifRequest fibs from the adapter. */ aifcmd = (struct aac_aifcmd *) hw_fib->data; if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { /* Handle Driver Notify Events */ aac_handle_aif(dev, fib); *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); aac_fib_adapter_complete(fib, (u16)sizeof(u32)); goto free_fib; } /* * The u32 here is important and intended. We are using * 32bit wrapping time to fit the adapter field */ /* Sniff events */ if (aifcmd->command == cpu_to_le32(AifCmdEventNotify) || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) { aac_handle_aif(dev, fib); } /* * get number of fibs to process */ num = get_fib_count(dev); if (!num) goto free_fib; hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *), GFP_KERNEL); if (!hw_fib_pool) goto free_fib; fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL); if (!fib_pool) goto free_hw_fib_pool; /* * Fill up fib pointer pools with actual fibs * and hw_fibs */ num = fillup_pools(dev, hw_fib_pool, fib_pool, num); if (!num) goto free_mem; /* * wakeup the thread that is waiting for * the response from fw (ioctl) */ wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool, fib, hw_fib, num); free_mem: /* Free up the remaining resources */ hw_fib_p = hw_fib_pool; fib_p = fib_pool; while (hw_fib_p < &hw_fib_pool[num]) { kfree(*hw_fib_p); kfree(*fib_p); ++fib_p; ++hw_fib_p; } kfree(fib_pool); free_hw_fib_pool: kfree(hw_fib_pool); free_fib: kfree(fib); t_lock = dev->queues->queue[HostNormCmdQueue].lock; spin_lock_irqsave(t_lock, flags); } /* * There are no more AIF's */ t_lock = dev->queues->queue[HostNormCmdQueue].lock; spin_unlock_irqrestore(t_lock, flags); } static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, u32 datasize) { struct aac_srb *srbcmd; struct sgmap64 *sg64; dma_addr_t addr; char *dma_buf; struct fib *fibptr; int ret = -ENOMEM; u32 vbus, vid; fibptr = aac_fib_alloc(dev); if (!fibptr) goto out; dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr, GFP_KERNEL); if (!dma_buf) goto fib_free_out; aac_fib_init(fibptr); vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus); vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target); srbcmd = (struct aac_srb *)fib_data(fibptr); srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); srbcmd->channel = cpu_to_le32(vbus); srbcmd->id = cpu_to_le32(vid); srbcmd->lun = 0; srbcmd->flags = cpu_to_le32(SRB_DataOut); srbcmd->timeout = cpu_to_le32(10); srbcmd->retry_limit = 0; srbcmd->cdb_size = cpu_to_le32(12); srbcmd->count = cpu_to_le32(datasize); memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); srbcmd->cdb[0] = BMIC_OUT; srbcmd->cdb[6] = WRITE_HOST_WELLNESS; memcpy(dma_buf, (char *)wellness_str, datasize); sg64 = (struct sgmap64 *)&srbcmd->sg; sg64->count = cpu_to_le32(1); sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16)); sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); sg64->sg[0].count = cpu_to_le32(datasize); ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb), FsaNormal, 1, 1, NULL, NULL); dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr); /* * Do not set XferState to zero unless * receives a response from F/W */ if (ret >= 0) aac_fib_complete(fibptr); /* * FIB should be freed only after * getting the response from the F/W */ if (ret != -ERESTARTSYS) goto fib_free_out; out: return ret; fib_free_out: aac_fib_free(fibptr); goto out; } static int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now) { struct tm cur_tm; char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ"; u32 datasize = sizeof(wellness_str); time64_t local_time; int ret = -ENODEV; if (!dev->sa_firmware) goto out; local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60)); time64_to_tm(local_time, 0, &cur_tm); cur_tm.tm_mon += 1; cur_tm.tm_year += 1900; wellness_str[8] = bin2bcd(cur_tm.tm_hour); wellness_str[9] = bin2bcd(cur_tm.tm_min); wellness_str[10] = bin2bcd(cur_tm.tm_sec); wellness_str[12] = bin2bcd(cur_tm.tm_mon); wellness_str[13] = bin2bcd(cur_tm.tm_mday); wellness_str[14] = bin2bcd(cur_tm.tm_year / 100); wellness_str[15] = bin2bcd(cur_tm.tm_year % 100); ret = aac_send_wellness_command(dev, wellness_str, datasize); out: return ret; } static int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now) { int ret = -ENOMEM; struct fib *fibptr; __le32 *info; fibptr = aac_fib_alloc(dev); if (!fibptr) goto out; aac_fib_init(fibptr); info = (__le32 *)fib_data(fibptr); *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */ ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal, 1, 1, NULL, NULL); /* * Do not set XferState to zero unless * receives a response from F/W */ if (ret >= 0) aac_fib_complete(fibptr); /* * FIB should be freed only after * getting the response from the F/W */ if (ret != -ERESTARTSYS) aac_fib_free(fibptr); out: return ret; } /** * aac_command_thread - command processing thread * @data: Adapter to monitor * * Waits on the commandready event in it's queue. When the event gets set * it will pull FIBs off it's queue. It will continue to pull FIBs off * until the queue is empty. When the queue is empty it will wait for * more FIBs. */ int aac_command_thread(void *data) { struct aac_dev *dev = data; DECLARE_WAITQUEUE(wait, current); unsigned long next_jiffies = jiffies + HZ; unsigned long next_check_jiffies = next_jiffies; long difference = HZ; /* * We can only have one thread per adapter for AIF's. */ if (dev->aif_thread) return -EINVAL; /* * Let the DPC know it has a place to send the AIF's to. */ dev->aif_thread = 1; add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); set_current_state(TASK_INTERRUPTIBLE); dprintk ((KERN_INFO "aac_command_thread start\n")); while (1) { aac_process_events(dev); /* * Background activity */ if ((time_before(next_check_jiffies,next_jiffies)) && ((difference = next_check_jiffies - jiffies) <= 0)) { next_check_jiffies = next_jiffies; if (aac_adapter_check_health(dev) == 0) { difference = ((long)(unsigned)check_interval) * HZ; next_check_jiffies = jiffies + difference; } else if (!dev->queues) break; } if (!time_before(next_check_jiffies,next_jiffies) && ((difference = next_jiffies - jiffies) <= 0)) { struct timespec64 now; int ret; /* Don't even try to talk to adapter if its sick */ ret = aac_adapter_check_health(dev); if (ret || !dev->queues) break; next_check_jiffies = jiffies + ((long)(unsigned)check_interval) * HZ; ktime_get_real_ts64(&now); /* Synchronize our watches */ if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec) && (now.tv_nsec > (NSEC_PER_SEC / HZ))) difference = HZ + HZ / 2 - now.tv_nsec / (NSEC_PER_SEC / HZ); else { if (now.tv_nsec > NSEC_PER_SEC / 2) ++now.tv_sec; if (dev->sa_firmware) ret = aac_send_safw_hostttime(dev, &now); else ret = aac_send_hosttime(dev, &now); difference = (long)(unsigned)update_interval*HZ; } next_jiffies = jiffies + difference; if (time_before(next_check_jiffies,next_jiffies)) difference = next_check_jiffies - jiffies; } if (difference <= 0) difference = 1; set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; /* * we probably want usleep_range() here instead of the * jiffies computation */ schedule_timeout(difference); if (kthread_should_stop()) break; } if (dev->queues) remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); dev->aif_thread = 0; return 0; } int aac_acquire_irq(struct aac_dev *dev) { int i; int j; int ret = 0; if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { for (i = 0; i < dev->max_msix; i++) { dev->aac_msix[i].vector_no = i; dev->aac_msix[i].dev = dev; if (request_irq(pci_irq_vector(dev->pdev, i), dev->a_ops.adapter_intr, 0, "aacraid", &(dev->aac_msix[i]))) { printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n", dev->name, dev->id, i); for (j = 0 ; j < i ; j++) free_irq(pci_irq_vector(dev->pdev, j), &(dev->aac_msix[j])); pci_disable_msix(dev->pdev); ret = -1; } } } else { dev->aac_msix[0].vector_no = 0; dev->aac_msix[0].dev = dev; if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) { if (dev->msi) pci_disable_msi(dev->pdev); printk(KERN_ERR "%s%d: Interrupt unavailable.\n", dev->name, dev->id); ret = -1; } } return ret; } void aac_free_irq(struct aac_dev *dev) { int i; if (aac_is_src(dev)) { if (dev->max_msix > 1) { for (i = 0; i < dev->max_msix; i++) free_irq(pci_irq_vector(dev->pdev, i), &(dev->aac_msix[i])); } else { free_irq(dev->pdev->irq, &(dev->aac_msix[0])); } } else { free_irq(dev->pdev->irq, dev); } if (dev->msi) pci_disable_msi(dev->pdev); else if (dev->max_msix > 1) pci_disable_msix(dev->pdev); }
linux-master
drivers/scsi/aacraid/commsup.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010-2015 PMC-Sierra, Inc. ([email protected]) * 2016-2017 Microsemi Corp. ([email protected]) * * Module Name: * rkt.c * * Abstract: Hardware miniport for Drawbridge specific hardware functions. */ #include <linux/blkdev.h> #include <scsi/scsi_host.h> #include "aacraid.h" #define AAC_NUM_IO_FIB_RKT (246 - AAC_NUM_MGT_FIB) /** * aac_rkt_select_comm - Select communications method * @dev: Adapter * @comm: communications method */ static int aac_rkt_select_comm(struct aac_dev *dev, int comm) { int retval; retval = aac_rx_select_comm(dev, comm); if (comm == AAC_COMM_MESSAGE) { /* * FIB Setup has already been done, but we can minimize the * damage by at least ensuring the OS never issues more * commands than we can handle. The Rocket adapters currently * can only handle 246 commands and 8 AIFs at the same time, * and in fact do notify us accordingly if we negotiate the * FIB size. The problem that causes us to add this check is * to ensure that we do not overdo it with the adapter when a * hard coded FIB override is being utilized. This special * case warrants this half baked, but convenient, check here. */ if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) { dev->init->r7.max_io_commands = cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB); dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT; } } return retval; } /** * aac_rkt_ioremap * @dev: device to ioremap * @size: mapping resize request * */ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size) { if (!size) { iounmap(dev->regs.rkt); return 0; } dev->base = dev->regs.rkt = ioremap(dev->base_start, size); if (dev->base == NULL) return -1; dev->IndexRegs = &dev->regs.rkt->IndexRegs; return 0; } /** * aac_rkt_init - initialize an i960 based AAC card * @dev: device to configure * * Allocate and set up resources for the i960 based AAC variants. The * device_interface in the commregion will be allocated and linked * to the comm region. */ int aac_rkt_init(struct aac_dev *dev) { /* * Fill in the function dispatch table. */ dev->a_ops.adapter_ioremap = aac_rkt_ioremap; dev->a_ops.adapter_comm = aac_rkt_select_comm; return _aac_rx_init(dev); }
linux-master
drivers/scsi/aacraid/rkt.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010-2015 PMC-Sierra, Inc. ([email protected]) * 2016-2017 Microsemi Corp. ([email protected]) * * Module Name: * linit.c * * Abstract: Linux Driver entry module for Adaptec RAID Array Controller */ #include <linux/compat.h> #include <linux/blkdev.h> #include <linux/blk-mq-pci.h> #include <linux/completion.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/syscalls.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/msdos_partition.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> #include <scsi/scsi_eh.h> #include "aacraid.h" #define AAC_DRIVER_VERSION "1.2.1" #ifndef AAC_DRIVER_BRANCH #define AAC_DRIVER_BRANCH "" #endif #define AAC_DRIVERNAME "aacraid" #ifdef AAC_DRIVER_BUILD #define _str(x) #x #define str(x) _str(x) #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH #else #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH #endif MODULE_AUTHOR("Red Hat Inc and Adaptec"); MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, " "Adaptec Advanced Raid Products, " "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(AAC_DRIVER_FULL_VERSION); static DEFINE_MUTEX(aac_mutex); static LIST_HEAD(aac_devices); static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED; char aac_driver_version[] = AAC_DRIVER_FULL_VERSION; /* * Because of the way Linux names scsi devices, the order in this table has * become important. Check for on-board Raid first, add-in cards second. * * Note: The last field is used to index into aac_drivers below. */ static const struct pci_device_id aac_pci_tbl[] = { { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */ { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */ { 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */ { 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */ { 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */ { 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */ { 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */ { 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */ { 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */ { 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */ { 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */ { 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */ { 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */ { 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */ { 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */ { 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */ { 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */ { 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */ { 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */ { 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */ { 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */ { 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */ { 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */ { 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */ { 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */ { 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */ { 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */ { 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */ { 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */ { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */ { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */ { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */ { 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */ { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */ { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */ { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */ { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */ { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */ { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */ { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */ { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */ { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */ { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */ { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */ { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */ { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */ { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */ { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */ { 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */ { 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */ { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */ { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */ { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */ { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */ { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/ { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/ { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/ { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */ { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */ { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */ { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */ { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */ { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */ { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */ { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */ { 0,} }; MODULE_DEVICE_TABLE(pci, aac_pci_tbl); /* * dmb - For now we add the number of channels to this structure. * In the future we should add a fib that reports the number of channels * for the card. At that time we can remove the channels from here */ static struct aac_driver_ident aac_drivers[] = { { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */ { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */ { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */ { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */ { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020ZCR ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025ZCR ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */ { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */ { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */ { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2820SA ", 1 }, /* AAR-2820SA (Intruder) */ { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2620SA ", 1 }, /* AAR-2620SA (Intruder) */ { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2420SA ", 1 }, /* AAR-2420SA (Intruder) */ { aac_rkt_init, "aacraid", "ICP ", "ICP9024RO ", 2 }, /* ICP9024RO (Lancer) */ { aac_rkt_init, "aacraid", "ICP ", "ICP9014RO ", 1 }, /* ICP9014RO (Lancer) */ { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */ { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */ { aac_rkt_init, "aacraid", "ICP ", "ICP5445AU ", 1 }, /* ICP5445AU (Hurricane44) */ { aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */ { aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */ { aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */ { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */ { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025SA ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */ { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */ { aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */ { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */ { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2026ZCR ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */ { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2610SA ", 1 }, /* SATA 6Ch (Bearcat) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005 ", 1 }, /* ASR-4005 */ { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */ { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */ { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000 ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */ { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-3800 ", 1 }, /* ASR-3800 (Hurricane44) */ { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/ { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/ { aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/ { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */ { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */ { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */ { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */ { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */ }; /** * aac_queuecommand - queue a SCSI command * @shost: Scsi host to queue command on * @cmd: SCSI command to queue * * Queues a command for execution by the associated Host Adapter. * * TODO: unify with aac_scsi_cmd(). */ static int aac_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL; return aac_scsi_cmd(cmd) ? FAILED : 0; } /** * aac_info - Returns the host adapter name * @shost: Scsi host to report on * * Returns a static string describing the device in question */ static const char *aac_info(struct Scsi_Host *shost) { struct aac_dev *dev = (struct aac_dev *)shost->hostdata; return aac_drivers[dev->cardtype].name; } /** * aac_get_driver_ident * @devtype: index into lookup table * * Returns a pointer to the entry in the driver lookup table. */ struct aac_driver_ident* aac_get_driver_ident(int devtype) { return &aac_drivers[devtype]; } /** * aac_biosparm - return BIOS parameters for disk * @sdev: The scsi device corresponding to the disk * @bdev: the block device corresponding to the disk * @capacity: the sector capacity of the disk * @geom: geometry block to fill in * * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk. * The default disk geometry is 64 heads, 32 sectors, and the appropriate * number of cylinders so as not to exceed drive capacity. In order for * disks equal to or larger than 1 GB to be addressable by the BIOS * without exceeding the BIOS limitation of 1024 cylinders, Extended * Translation should be enabled. With Extended Translation enabled, * drives between 1 GB inclusive and 2 GB exclusive are given a disk * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive * are given a disk geometry of 255 heads and 63 sectors. However, if * the BIOS detects that the Extended Translation setting does not match * the geometry in the partition table, then the translation inferred * from the partition table will be used by the BIOS, and a warning may * be displayed. */ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *geom) { struct diskparm *param = (struct diskparm *)geom; unsigned char *buf; dprintk((KERN_DEBUG "aac_biosparm.\n")); /* * Assuming extended translation is enabled - #REVISIT# */ if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */ if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */ param->heads = 255; param->sectors = 63; } else { param->heads = 128; param->sectors = 32; } } else { param->heads = 64; param->sectors = 32; } param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); /* * Read the first 1024 bytes from the disk device, if the boot * sector partition table is valid, search for a partition table * entry whose end_head matches one of the standard geometry * translations ( 64/32, 128/32, 255/63 ). */ buf = scsi_bios_ptable(bdev); if (!buf) return 0; if (*(__le16 *)(buf + 0x40) == cpu_to_le16(MSDOS_LABEL_MAGIC)) { struct msdos_partition *first = (struct msdos_partition *)buf; struct msdos_partition *entry = first; int saved_cylinders = param->cylinders; int num; unsigned char end_head, end_sec; for(num = 0; num < 4; num++) { end_head = entry->end_head; end_sec = entry->end_sector & 0x3f; if(end_head == 63) { param->heads = 64; param->sectors = 32; break; } else if(end_head == 127) { param->heads = 128; param->sectors = 32; break; } else if(end_head == 254) { param->heads = 255; param->sectors = 63; break; } entry++; } if (num == 4) { end_head = first->end_head; end_sec = first->end_sector & 0x3f; } param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); if (num < 4 && end_sec == param->sectors) { if (param->cylinders != saved_cylinders) { dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n", param->heads, param->sectors, num)); } } else if (end_head > 0 || end_sec > 0) { dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n", end_head + 1, end_sec, num)); dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n", param->heads, param->sectors)); } } kfree(buf); return 0; } /** * aac_slave_configure - compute queue depths * @sdev: SCSI device we are considering * * Selects queue depths for each target device based on the host adapter's * total capacity and the queue depth supported by the target device. * A queue depth of one automatically disables tagged queueing. */ static int aac_slave_configure(struct scsi_device *sdev) { struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; int chn, tid; unsigned int depth = 0; unsigned int set_timeout = 0; int timeout = 0; bool set_qd_dev_type = false; u8 devtype = 0; chn = aac_logical_to_phys(sdev_channel(sdev)); tid = sdev_id(sdev); if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) { devtype = aac->hba_map[chn][tid].devtype; if (devtype == AAC_DEVTYPE_NATIVE_RAW) { depth = aac->hba_map[chn][tid].qd_limit; set_timeout = 1; goto common_config; } if (devtype == AAC_DEVTYPE_ARC_RAW) { set_qd_dev_type = true; set_timeout = 1; goto common_config; } } if (aac->jbod && (sdev->type == TYPE_DISK)) sdev->removable = 1; if (sdev->type == TYPE_DISK && sdev_channel(sdev) != CONTAINER_CHANNEL && (!aac->jbod || sdev->inq_periph_qual) && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) { if (expose_physicals == 0) return -ENXIO; if (expose_physicals < 0) sdev->no_uld_attach = 1; } if (sdev->tagged_supported && sdev->type == TYPE_DISK && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) && !sdev->no_uld_attach) { struct scsi_device * dev; struct Scsi_Host *host = sdev->host; unsigned num_lsu = 0; unsigned num_one = 0; unsigned cid; set_timeout = 1; for (cid = 0; cid < aac->maximum_num_containers; ++cid) if (aac->fsa_dev[cid].valid) ++num_lsu; __shost_for_each_device(dev, host) { if (dev->tagged_supported && dev->type == TYPE_DISK && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) && !dev->no_uld_attach) { if ((sdev_channel(dev) != CONTAINER_CHANNEL) || !aac->fsa_dev[sdev_id(dev)].valid) { ++num_lsu; } } else { ++num_one; } } if (num_lsu == 0) ++num_lsu; depth = (host->can_queue - num_one) / num_lsu; if (sdev_channel(sdev) != NATIVE_CHANNEL) goto common_config; set_qd_dev_type = true; } common_config: /* * Check if SATA drive */ if (set_qd_dev_type) { if (strncmp(sdev->vendor, "ATA", 3) == 0) depth = 32; else depth = 64; } /* * Firmware has an individual device recovery time typically * of 35 seconds, give us a margin. Thor devices can take longer in * error recovery, hence different value. */ if (set_timeout) { timeout = aac->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT; blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); } if (depth > 256) depth = 256; else if (depth < 1) depth = 1; scsi_change_queue_depth(sdev, depth); sdev->tagged_supported = 1; return 0; } static void aac_map_queues(struct Scsi_Host *shost) { struct aac_dev *aac = (struct aac_dev *)shost->hostdata; blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], aac->pdev, 0); aac->use_map_queue = true; } /** * aac_change_queue_depth - alter queue depths * @sdev: SCSI device we are considering * @depth: desired queue depth * * Alters queue depths for target device based on the host adapter's * total capacity and the queue depth supported by the target device. */ static int aac_change_queue_depth(struct scsi_device *sdev, int depth) { struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata); int chn, tid, is_native_device = 0; chn = aac_logical_to_phys(sdev_channel(sdev)); tid = sdev_id(sdev); if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW) is_native_device = 1; if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && (sdev_channel(sdev) == CONTAINER_CHANNEL)) { struct scsi_device * dev; struct Scsi_Host *host = sdev->host; unsigned num = 0; __shost_for_each_device(dev, host) { if (dev->tagged_supported && (dev->type == TYPE_DISK) && (sdev_channel(dev) == CONTAINER_CHANNEL)) ++num; ++num; } if (num >= host->can_queue) num = host->can_queue - 1; if (depth > (host->can_queue - num)) depth = host->can_queue - num; if (depth > 256) depth = 256; else if (depth < 2) depth = 2; return scsi_change_queue_depth(sdev, depth); } else if (is_native_device) { scsi_change_queue_depth(sdev, aac->hba_map[chn][tid].qd_limit); } else { scsi_change_queue_depth(sdev, 1); } return sdev->queue_depth; } static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata); if (sdev_channel(sdev) != CONTAINER_CHANNEL) return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach ? "Hidden\n" : ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : "")); return snprintf(buf, PAGE_SIZE, "%s\n", get_container_type(aac->fsa_dev[sdev_id(sdev)].type)); } static struct device_attribute aac_raid_level_attr = { .attr = { .name = "level", .mode = S_IRUGO, }, .show = aac_show_raid_level }; static ssize_t aac_show_unique_id(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata); unsigned char sn[16]; memset(sn, 0, sizeof(sn)); if (sdev_channel(sdev) == CONTAINER_CHANNEL) memcpy(sn, aac->fsa_dev[sdev_id(sdev)].identifier, sizeof(sn)); return snprintf(buf, 16 * 2 + 2, "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n", sn[0], sn[1], sn[2], sn[3], sn[4], sn[5], sn[6], sn[7], sn[8], sn[9], sn[10], sn[11], sn[12], sn[13], sn[14], sn[15]); } static struct device_attribute aac_unique_id_attr = { .attr = { .name = "unique_id", .mode = 0444, }, .show = aac_show_unique_id }; static struct attribute *aac_dev_attrs[] = { &aac_raid_level_attr.attr, &aac_unique_id_attr.attr, NULL, }; ATTRIBUTE_GROUPS(aac_dev); static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) { int retval; struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; if (!capable(CAP_SYS_RAWIO)) return -EPERM; retval = aac_adapter_check_health(dev); if (retval) return -EBUSY; return aac_do_ioctl(dev, cmd, arg); } struct fib_count_data { int mlcnt; int llcnt; int ehcnt; int fwcnt; int krlcnt; }; static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data) { struct fib_count_data *fib_count = data; switch (aac_priv(scmnd)->owner) { case AAC_OWNER_FIRMWARE: fib_count->fwcnt++; break; case AAC_OWNER_ERROR_HANDLER: fib_count->ehcnt++; break; case AAC_OWNER_LOWLEVEL: fib_count->llcnt++; break; case AAC_OWNER_MIDLEVEL: fib_count->mlcnt++; break; default: fib_count->krlcnt++; break; } return true; } /* Called during SCSI EH, so we don't need to block requests */ static int get_num_of_incomplete_fibs(struct aac_dev *aac) { struct Scsi_Host *shost = aac->scsi_host_ptr; struct device *ctrl_dev; struct fib_count_data fcnt = { }; scsi_host_busy_iter(shost, fib_count_iter, &fcnt); ctrl_dev = &aac->pdev->dev; dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", fcnt.mlcnt); dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", fcnt.llcnt); dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", fcnt.ehcnt); dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fcnt.fwcnt); dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", fcnt.krlcnt); return fcnt.mlcnt + fcnt.llcnt + fcnt.ehcnt + fcnt.fwcnt; } static int aac_eh_abort(struct scsi_cmnd* cmd) { struct aac_cmd_priv *cmd_priv = aac_priv(cmd); struct scsi_device * dev = cmd->device; struct Scsi_Host * host = dev->host; struct aac_dev * aac = (struct aac_dev *)host->hostdata; int count, found; u32 bus, cid; int ret = FAILED; if (aac_adapter_check_health(aac)) return ret; bus = aac_logical_to_phys(scmd_channel(cmd)); cid = scmd_id(cmd); if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) { struct fib *fib; struct aac_hba_tm_req *tmf; int status; u64 address; pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n", AAC_DRIVERNAME, host->host_no, sdev_channel(dev), sdev_id(dev), (int)dev->lun); found = 0; for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { fib = &aac->fibs[count]; if (*(u8 *)fib->hw_fib_va != 0 && (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) && (fib->callback_data == cmd)) { found = 1; break; } } if (!found) return ret; /* start a HBA_TMF_ABORT_TASK TMF request */ fib = aac_fib_alloc(aac); if (!fib) return ret; tmf = (struct aac_hba_tm_req *)fib->hw_fib_va; memset(tmf, 0, sizeof(*tmf)); tmf->tmf = HBA_TMF_ABORT_TASK; tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus; tmf->lun[1] = cmd->device->lun; address = (u64)fib->hw_error_pa; tmf->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); tmf->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); fib->hbacmd_size = sizeof(*tmf); cmd_priv->sent_command = 0; status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib, (fib_callback) aac_hba_callback, (void *) cmd); if (status != -EINPROGRESS) { aac_fib_complete(fib); aac_fib_free(fib); return ret; } /* Wait up to 15 secs for completion */ for (count = 0; count < 15; ++count) { if (cmd_priv->sent_command) { ret = SUCCESS; break; } msleep(1000); } if (ret != SUCCESS) pr_err("%s: Host adapter abort request timed out\n", AAC_DRIVERNAME); } else { pr_err( "%s: Host adapter abort request.\n" "%s: Outstanding commands on (%d,%d,%d,%d):\n", AAC_DRIVERNAME, AAC_DRIVERNAME, host->host_no, sdev_channel(dev), sdev_id(dev), (int)dev->lun); switch (cmd->cmnd[0]) { case SERVICE_ACTION_IN_16: if (!(aac->raw_io_interface) || !(aac->raw_io_64) || ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) break; fallthrough; case INQUIRY: case READ_CAPACITY: /* * Mark associated FIB to not complete, * eh handler does this */ for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { struct fib *fib = &aac->fibs[count]; if (fib->hw_fib_va->header.XferState && (fib->flags & FIB_CONTEXT_FLAG) && (fib->callback_data == cmd)) { fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; cmd_priv->owner = AAC_OWNER_ERROR_HANDLER; ret = SUCCESS; } } break; case TEST_UNIT_READY: /* * Mark associated FIB to not complete, * eh handler does this */ for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { struct scsi_cmnd *command; struct fib *fib = &aac->fibs[count]; command = fib->callback_data; if ((fib->hw_fib_va->header.XferState & cpu_to_le32 (Async | NoResponseExpected)) && (fib->flags & FIB_CONTEXT_FLAG) && ((command)) && (command->device == cmd->device)) { fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; aac_priv(command)->owner = AAC_OWNER_ERROR_HANDLER; if (command == cmd) ret = SUCCESS; } } break; } } return ret; } static u8 aac_eh_tmf_lun_reset_fib(struct aac_hba_map_info *info, struct fib *fib, u64 tmf_lun) { struct aac_hba_tm_req *tmf; u64 address; /* start a HBA_TMF_LUN_RESET TMF request */ tmf = (struct aac_hba_tm_req *)fib->hw_fib_va; memset(tmf, 0, sizeof(*tmf)); tmf->tmf = HBA_TMF_LUN_RESET; tmf->it_nexus = info->rmw_nexus; int_to_scsilun(tmf_lun, (struct scsi_lun *)tmf->lun); address = (u64)fib->hw_error_pa; tmf->error_ptr_hi = cpu_to_le32 ((u32)(address >> 32)); tmf->error_ptr_lo = cpu_to_le32 ((u32)(address & 0xffffffff)); tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); fib->hbacmd_size = sizeof(*tmf); return HBA_IU_TYPE_SCSI_TM_REQ; } static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info, struct fib *fib) { struct aac_hba_reset_req *rst; u64 address; /* already tried, start a hard reset now */ rst = (struct aac_hba_reset_req *)fib->hw_fib_va; memset(rst, 0, sizeof(*rst)); rst->it_nexus = info->rmw_nexus; address = (u64)fib->hw_error_pa; rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); rst->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); fib->hbacmd_size = sizeof(*rst); return HBA_IU_TYPE_SATA_REQ; } static void aac_tmf_callback(void *context, struct fib *fibptr) { struct aac_hba_resp *err = &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err; struct aac_hba_map_info *info = context; int res; switch (err->service_response) { case HBA_RESP_SVCRES_TMF_REJECTED: res = -1; break; case HBA_RESP_SVCRES_TMF_LUN_INVALID: res = 0; break; case HBA_RESP_SVCRES_TMF_COMPLETE: case HBA_RESP_SVCRES_TMF_SUCCEEDED: res = 0; break; default: res = -2; break; } aac_fib_complete(fibptr); info->reset_state = res; } /* * aac_eh_dev_reset - Device reset command handling * @scsi_cmd: SCSI command block causing the reset * */ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) { struct scsi_device * dev = cmd->device; struct Scsi_Host * host = dev->host; struct aac_dev * aac = (struct aac_dev *)host->hostdata; struct aac_hba_map_info *info; int count; u32 bus, cid; struct fib *fib; int ret = FAILED; int status; u8 command; bus = aac_logical_to_phys(scmd_channel(cmd)); cid = scmd_id(cmd); if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS) return FAILED; info = &aac->hba_map[bus][cid]; if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && !(info->reset_state > 0))) return FAILED; pr_err("%s: Host device reset request. SCSI hang ?\n", AAC_DRIVERNAME); fib = aac_fib_alloc(aac); if (!fib) return ret; /* start a HBA_TMF_LUN_RESET TMF request */ command = aac_eh_tmf_lun_reset_fib(info, fib, dev->lun); info->reset_state = 1; status = aac_hba_send(command, fib, (fib_callback) aac_tmf_callback, (void *) info); if (status != -EINPROGRESS) { info->reset_state = 0; aac_fib_complete(fib); aac_fib_free(fib); return ret; } /* Wait up to 15 seconds for completion */ for (count = 0; count < 15; ++count) { if (info->reset_state == 0) { ret = info->reset_state == 0 ? SUCCESS : FAILED; break; } msleep(1000); } return ret; } /* * aac_eh_target_reset - Target reset command handling * @scsi_cmd: SCSI command block causing the reset * */ static int aac_eh_target_reset(struct scsi_cmnd *cmd) { struct scsi_device * dev = cmd->device; struct Scsi_Host * host = dev->host; struct aac_dev * aac = (struct aac_dev *)host->hostdata; struct aac_hba_map_info *info; int count; u32 bus, cid; int ret = FAILED; struct fib *fib; int status; u8 command; bus = aac_logical_to_phys(scmd_channel(cmd)); cid = scmd_id(cmd); if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS) return FAILED; info = &aac->hba_map[bus][cid]; if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && !(info->reset_state > 0))) return FAILED; pr_err("%s: Host target reset request. SCSI hang ?\n", AAC_DRIVERNAME); fib = aac_fib_alloc(aac); if (!fib) return ret; /* already tried, start a hard reset now */ command = aac_eh_tmf_hard_reset_fib(info, fib); info->reset_state = 2; status = aac_hba_send(command, fib, (fib_callback) aac_tmf_callback, (void *) info); if (status != -EINPROGRESS) { info->reset_state = 0; aac_fib_complete(fib); aac_fib_free(fib); return ret; } /* Wait up to 15 seconds for completion */ for (count = 0; count < 15; ++count) { if (info->reset_state <= 0) { ret = info->reset_state == 0 ? SUCCESS : FAILED; break; } msleep(1000); } return ret; } /* * aac_eh_bus_reset - Bus reset command handling * @scsi_cmd: SCSI command block causing the reset * */ static int aac_eh_bus_reset(struct scsi_cmnd* cmd) { struct scsi_device * dev = cmd->device; struct Scsi_Host * host = dev->host; struct aac_dev * aac = (struct aac_dev *)host->hostdata; int count; u32 cmd_bus; int status = 0; cmd_bus = aac_logical_to_phys(scmd_channel(cmd)); /* Mark the assoc. FIB to not complete, eh handler does this */ for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { struct fib *fib = &aac->fibs[count]; if (fib->hw_fib_va->header.XferState && (fib->flags & FIB_CONTEXT_FLAG) && (fib->flags & FIB_CONTEXT_FLAG_SCSI_CMD)) { struct aac_hba_map_info *info; u32 bus, cid; cmd = (struct scsi_cmnd *)fib->callback_data; bus = aac_logical_to_phys(scmd_channel(cmd)); if (bus != cmd_bus) continue; cid = scmd_id(cmd); info = &aac->hba_map[bus][cid]; if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || info->devtype != AAC_DEVTYPE_NATIVE_RAW) { fib->flags |= FIB_CONTEXT_FLAG_EH_RESET; aac_priv(cmd)->owner = AAC_OWNER_ERROR_HANDLER; } } } pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME); /* * Check the health of the controller */ status = aac_adapter_check_health(aac); if (status) dev_err(&aac->pdev->dev, "Adapter health - %d\n", status); count = get_num_of_incomplete_fibs(aac); return (count == 0) ? SUCCESS : FAILED; } /* * aac_eh_host_reset - Host reset command handling * @scsi_cmd: SCSI command block causing the reset * */ static int aac_eh_host_reset(struct scsi_cmnd *cmd) { struct scsi_device * dev = cmd->device; struct Scsi_Host * host = dev->host; struct aac_dev * aac = (struct aac_dev *)host->hostdata; int ret = FAILED; __le32 supported_options2 = 0; bool is_mu_reset; bool is_ignore_reset; bool is_doorbell_reset; /* * Check if reset is supported by the firmware */ supported_options2 = aac->supplement_adapter_info.supported_options2; is_mu_reset = supported_options2 & AAC_OPTION_MU_RESET; is_doorbell_reset = supported_options2 & AAC_OPTION_DOORBELL_RESET; is_ignore_reset = supported_options2 & AAC_OPTION_IGNORE_RESET; /* * This adapter needs a blind reset, only do so for * Adapters that support a register, instead of a commanded, * reset. */ if ((is_mu_reset || is_doorbell_reset) && aac_check_reset && (aac_check_reset != -1 || !is_ignore_reset)) { /* Bypass wait for command quiesce */ if (aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET) == 0) ret = SUCCESS; } /* * Reset EH state */ if (ret == SUCCESS) { int bus, cid; struct aac_hba_map_info *info; for (bus = 0; bus < AAC_MAX_BUSES; bus++) { for (cid = 0; cid < AAC_MAX_TARGETS; cid++) { info = &aac->hba_map[bus][cid]; if (info->devtype == AAC_DEVTYPE_NATIVE_RAW) info->reset_state = 0; } } } return ret; } /** * aac_cfg_open - open a configuration file * @inode: inode being opened * @file: file handle attached * * Called when the configuration device is opened. Does the needed * set up on the handle and then returns * * Bugs: This needs extending to check a given adapter is present * so we can support hot plugging, and to ref count adapters. */ static int aac_cfg_open(struct inode *inode, struct file *file) { struct aac_dev *aac; unsigned minor_number = iminor(inode); int err = -ENODEV; mutex_lock(&aac_mutex); /* BKL pushdown: nothing else protects this list */ list_for_each_entry(aac, &aac_devices, entry) { if (aac->id == minor_number) { file->private_data = aac; err = 0; break; } } mutex_unlock(&aac_mutex); return err; } /** * aac_cfg_ioctl - AAC configuration request * @file: file handle * @cmd: ioctl command code * @arg: argument * * Handles a configuration ioctl. Currently this involves wrapping it * up and feeding it into the nasty windowsalike glue layer. * * Bugs: Needs locking against parallel ioctls lower down * Bugs: Needs to handle hot plugging */ static long aac_cfg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct aac_dev *aac = (struct aac_dev *)file->private_data; if (!capable(CAP_SYS_RAWIO)) return -EPERM; return aac_do_ioctl(aac, cmd, (void __user *)arg); } static ssize_t aac_show_model(struct device *device, struct device_attribute *attr, char *buf) { struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; int len; if (dev->supplement_adapter_info.adapter_type_text[0]) { char *cp = dev->supplement_adapter_info.adapter_type_text; while (*cp && *cp != ' ') ++cp; while (*cp == ' ') ++cp; len = snprintf(buf, PAGE_SIZE, "%s\n", cp); } else len = snprintf(buf, PAGE_SIZE, "%s\n", aac_drivers[dev->cardtype].model); return len; } static ssize_t aac_show_vendor(struct device *device, struct device_attribute *attr, char *buf) { struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; struct aac_supplement_adapter_info *sup_adap_info; int len; sup_adap_info = &dev->supplement_adapter_info; if (sup_adap_info->adapter_type_text[0]) { char *cp = sup_adap_info->adapter_type_text; while (*cp && *cp != ' ') ++cp; len = snprintf(buf, PAGE_SIZE, "%.*s\n", (int)(cp - (char *)sup_adap_info->adapter_type_text), sup_adap_info->adapter_type_text); } else len = snprintf(buf, PAGE_SIZE, "%s\n", aac_drivers[dev->cardtype].vname); return len; } static ssize_t aac_show_flags(struct device *cdev, struct device_attribute *attr, char *buf) { int len = 0; struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata; if (nblank(dprintk(x))) len = snprintf(buf, PAGE_SIZE, "dprintk\n"); #ifdef AAC_DETAILED_STATUS_INFO len += scnprintf(buf + len, PAGE_SIZE - len, "AAC_DETAILED_STATUS_INFO\n"); #endif if (dev->raw_io_interface && dev->raw_io_64) len += scnprintf(buf + len, PAGE_SIZE - len, "SAI_READ_CAPACITY_16\n"); if (dev->jbod) len += scnprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n"); if (dev->supplement_adapter_info.supported_options2 & AAC_OPTION_POWER_MANAGEMENT) len += scnprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_POWER_MANAGEMENT\n"); if (dev->msi) len += scnprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n"); return len; } static ssize_t aac_show_kernel_version(struct device *device, struct device_attribute *attr, char *buf) { struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; int len, tmp; tmp = le32_to_cpu(dev->adapter_info.kernelrev); len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n", tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff, le32_to_cpu(dev->adapter_info.kernelbuild)); return len; } static ssize_t aac_show_monitor_version(struct device *device, struct device_attribute *attr, char *buf) { struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; int len, tmp; tmp = le32_to_cpu(dev->adapter_info.monitorrev); len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n", tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff, le32_to_cpu(dev->adapter_info.monitorbuild)); return len; } static ssize_t aac_show_bios_version(struct device *device, struct device_attribute *attr, char *buf) { struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; int len, tmp; tmp = le32_to_cpu(dev->adapter_info.biosrev); len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n", tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff, le32_to_cpu(dev->adapter_info.biosbuild)); return len; } static ssize_t aac_show_driver_version(struct device *device, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", aac_driver_version); } static ssize_t aac_show_serial_number(struct device *device, struct device_attribute *attr, char *buf) { struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; int len = 0; if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) len = snprintf(buf, 16, "%06X\n", le32_to_cpu(dev->adapter_info.serial[0])); if (len && !memcmp(&dev->supplement_adapter_info.mfg_pcba_serial_no[ sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no)-len], buf, len-1)) len = snprintf(buf, 16, "%.*s\n", (int)sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no), dev->supplement_adapter_info.mfg_pcba_serial_no); return min(len, 16); } static ssize_t aac_show_max_channel(struct device *device, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", class_to_shost(device)->max_channel); } static ssize_t aac_show_max_id(struct device *device, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", class_to_shost(device)->max_id); } static ssize_t aac_store_reset_adapter(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { int retval = -EACCES; if (!capable(CAP_SYS_ADMIN)) return retval; retval = aac_reset_adapter(shost_priv(class_to_shost(device)), buf[0] == '!', IOP_HWSOFT_RESET); if (retval >= 0) retval = count; return retval; } static ssize_t aac_show_reset_adapter(struct device *device, struct device_attribute *attr, char *buf) { struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; int len, tmp; tmp = aac_adapter_check_health(dev); if ((tmp == 0) && dev->in_reset) tmp = -EBUSY; len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp); return len; } static struct device_attribute aac_model = { .attr = { .name = "model", .mode = S_IRUGO, }, .show = aac_show_model, }; static struct device_attribute aac_vendor = { .attr = { .name = "vendor", .mode = S_IRUGO, }, .show = aac_show_vendor, }; static struct device_attribute aac_flags = { .attr = { .name = "flags", .mode = S_IRUGO, }, .show = aac_show_flags, }; static struct device_attribute aac_kernel_version = { .attr = { .name = "hba_kernel_version", .mode = S_IRUGO, }, .show = aac_show_kernel_version, }; static struct device_attribute aac_monitor_version = { .attr = { .name = "hba_monitor_version", .mode = S_IRUGO, }, .show = aac_show_monitor_version, }; static struct device_attribute aac_bios_version = { .attr = { .name = "hba_bios_version", .mode = S_IRUGO, }, .show = aac_show_bios_version, }; static struct device_attribute aac_lld_version = { .attr = { .name = "driver_version", .mode = 0444, }, .show = aac_show_driver_version, }; static struct device_attribute aac_serial_number = { .attr = { .name = "serial_number", .mode = S_IRUGO, }, .show = aac_show_serial_number, }; static struct device_attribute aac_max_channel = { .attr = { .name = "max_channel", .mode = S_IRUGO, }, .show = aac_show_max_channel, }; static struct device_attribute aac_max_id = { .attr = { .name = "max_id", .mode = S_IRUGO, }, .show = aac_show_max_id, }; static struct device_attribute aac_reset = { .attr = { .name = "reset_host", .mode = S_IWUSR|S_IRUGO, }, .store = aac_store_reset_adapter, .show = aac_show_reset_adapter, }; static struct attribute *aac_host_attrs[] = { &aac_model.attr, &aac_vendor.attr, &aac_flags.attr, &aac_kernel_version.attr, &aac_monitor_version.attr, &aac_bios_version.attr, &aac_lld_version.attr, &aac_serial_number.attr, &aac_max_channel.attr, &aac_max_id.attr, &aac_reset.attr, NULL }; ATTRIBUTE_GROUPS(aac_host); ssize_t aac_get_serial_number(struct device *device, char *buf) { return aac_show_serial_number(device, &aac_serial_number, buf); } static const struct file_operations aac_cfg_fops = { .owner = THIS_MODULE, .unlocked_ioctl = aac_cfg_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = aac_cfg_ioctl, #endif .open = aac_cfg_open, .llseek = noop_llseek, }; static const struct scsi_host_template aac_driver_template = { .module = THIS_MODULE, .name = "AAC", .proc_name = AAC_DRIVERNAME, .info = aac_info, .ioctl = aac_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = aac_ioctl, #endif .queuecommand = aac_queuecommand, .bios_param = aac_biosparm, .shost_groups = aac_host_groups, .slave_configure = aac_slave_configure, .map_queues = aac_map_queues, .change_queue_depth = aac_change_queue_depth, .sdev_groups = aac_dev_groups, .eh_abort_handler = aac_eh_abort, .eh_device_reset_handler = aac_eh_dev_reset, .eh_target_reset_handler = aac_eh_target_reset, .eh_bus_reset_handler = aac_eh_bus_reset, .eh_host_reset_handler = aac_eh_host_reset, .can_queue = AAC_NUM_IO_FIB, .this_id = MAXIMUM_NUM_CONTAINERS, .sg_tablesize = 16, .max_sectors = 128, #if (AAC_NUM_IO_FIB > 256) .cmd_per_lun = 256, #else .cmd_per_lun = AAC_NUM_IO_FIB, #endif .emulated = 1, .no_write_same = 1, .cmd_size = sizeof(struct aac_cmd_priv), }; static void __aac_shutdown(struct aac_dev * aac) { int i; mutex_lock(&aac->ioctl_mutex); aac->adapter_shutdown = 1; mutex_unlock(&aac->ioctl_mutex); if (aac->aif_thread) { int i; /* Clear out events first */ for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) { struct fib *fib = &aac->fibs[i]; if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) && (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) complete(&fib->event_wait); } kthread_stop(aac->thread); aac->thread = NULL; } aac_send_shutdown(aac); aac_adapter_disable_int(aac); if (aac_is_src(aac)) { if (aac->max_msix > 1) { for (i = 0; i < aac->max_msix; i++) { free_irq(pci_irq_vector(aac->pdev, i), &(aac->aac_msix[i])); } } else { free_irq(aac->pdev->irq, &(aac->aac_msix[0])); } } else { free_irq(aac->pdev->irq, aac); } if (aac->msi) pci_disable_msi(aac->pdev); else if (aac->max_msix > 1) pci_disable_msix(aac->pdev); } static void aac_init_char(void) { aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops); if (aac_cfg_major < 0) { pr_err("aacraid: unable to register \"aac\" device.\n"); } } void aac_reinit_aif(struct aac_dev *aac, unsigned int index) { /* * Firmware may send a AIF messages very early and the Driver may have * ignored as it is not fully ready to process the messages. Send * AIF to firmware so that if there are any unprocessed events they * can be processed now. */ if (aac_drivers[index].quirks & AAC_QUIRK_SRC) aac_intr_normal(aac, 0, 2, 0, NULL); } static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned index = id->driver_data; struct Scsi_Host *shost; struct aac_dev *aac; struct list_head *insert = &aac_devices; int error; int unique_id = 0; u64 dmamask; int mask_bits = 0; extern int aac_sync_mode; /* * Only series 7 needs freset. */ if (pdev->device == PMC_DEVICE_S7) pdev->needs_freset = 1; list_for_each_entry(aac, &aac_devices, entry) { if (aac->id > unique_id) break; insert = &aac->entry; unique_id++; } pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); error = pci_enable_device(pdev); if (error) goto out; if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) { error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (error) { dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed"); goto out_disable_pdev; } } /* * If the quirk31 bit is set, the adapter needs adapter * to driver communication memory to be allocated below 2gig */ if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) { dmamask = DMA_BIT_MASK(31); mask_bits = 31; } else { dmamask = DMA_BIT_MASK(32); mask_bits = 32; } error = dma_set_coherent_mask(&pdev->dev, dmamask); if (error) { dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n" , mask_bits); goto out_disable_pdev; } pci_set_master(pdev); shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev)); if (!shost) { error = -ENOMEM; goto out_disable_pdev; } shost->irq = pdev->irq; shost->unique_id = unique_id; shost->max_cmd_len = 16; if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT) aac_init_char(); aac = (struct aac_dev *)shost->hostdata; aac->base_start = pci_resource_start(pdev, 0); aac->scsi_host_ptr = shost; aac->pdev = pdev; aac->name = aac_driver_template.name; aac->id = shost->unique_id; aac->cardtype = index; INIT_LIST_HEAD(&aac->entry); if (aac_reset_devices || reset_devices) aac->init_reset = true; aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB, sizeof(struct fib), GFP_KERNEL); if (!aac->fibs) { error = -ENOMEM; goto out_free_host; } spin_lock_init(&aac->fib_lock); mutex_init(&aac->ioctl_mutex); mutex_init(&aac->scan_mutex); INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker); INIT_DELAYED_WORK(&aac->src_reinit_aif_worker, aac_src_reinit_aif_worker); /* * Map in the registers from the adapter. */ aac->base_size = AAC_MIN_FOOTPRINT_SIZE; if ((*aac_drivers[index].init)(aac)) { error = -ENODEV; goto out_unmap; } if (aac->sync_mode) { if (aac_sync_mode) printk(KERN_INFO "%s%d: Sync. mode enforced " "by driver parameter. This will cause " "a significant performance decrease!\n", aac->name, aac->id); else printk(KERN_INFO "%s%d: Async. mode not supported " "by current driver, sync. mode enforced." "\nPlease update driver to get full performance.\n", aac->name, aac->id); } /* * Start any kernel threads needed */ aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME); if (IS_ERR(aac->thread)) { printk(KERN_ERR "aacraid: Unable to create command thread.\n"); error = PTR_ERR(aac->thread); aac->thread = NULL; goto out_deinit; } aac->maximum_num_channels = aac_drivers[index].channels; error = aac_get_adapter_info(aac); if (error < 0) goto out_deinit; /* * Lets override negotiations and drop the maximum SG limit to 34 */ if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) && (shost->sg_tablesize > 34)) { shost->sg_tablesize = 34; shost->max_sectors = (shost->sg_tablesize * 8) + 112; } if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) && (shost->sg_tablesize > 17)) { shost->sg_tablesize = 17; shost->max_sectors = (shost->sg_tablesize * 8) + 112; } if (aac->adapter_info.options & AAC_OPT_NEW_COMM) shost->max_segment_size = shost->max_sectors << 9; else shost->max_segment_size = 65536; /* * Firmware printf works only with older firmware. */ if (aac_drivers[index].quirks & AAC_QUIRK_34SG) aac->printf_enabled = 1; else aac->printf_enabled = 0; /* * max channel will be the physical channels plus 1 virtual channel * all containers are on the virtual channel 0 (CONTAINER_CHANNEL) * physical channels are address by their actual physical number+1 */ if (aac->nondasd_support || expose_physicals || aac->jbod) shost->max_channel = aac->maximum_num_channels; else shost->max_channel = 0; aac_get_config_status(aac, 0); aac_get_containers(aac); list_add(&aac->entry, insert); shost->max_id = aac->maximum_num_containers; if (shost->max_id < aac->maximum_num_physicals) shost->max_id = aac->maximum_num_physicals; if (shost->max_id < MAXIMUM_NUM_CONTAINERS) shost->max_id = MAXIMUM_NUM_CONTAINERS; else shost->this_id = shost->max_id; if (!aac->sa_firmware && aac_drivers[index].quirks & AAC_QUIRK_SRC) aac_intr_normal(aac, 0, 2, 0, NULL); /* * dmb - we may need to move the setting of these parms somewhere else once * we get a fib that can report the actual numbers */ shost->max_lun = AAC_MAX_LUN; pci_set_drvdata(pdev, shost); shost->nr_hw_queues = aac->max_msix; shost->host_tagset = 1; error = scsi_add_host(shost, &pdev->dev); if (error) goto out_deinit; aac_scan_host(aac); pci_save_state(pdev); return 0; out_deinit: __aac_shutdown(aac); out_unmap: aac_fib_map_free(aac); if (aac->comm_addr) dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, aac->comm_phys); kfree(aac->queues); aac_adapter_ioremap(aac, 0); kfree(aac->fibs); kfree(aac->fsa_dev); out_free_host: scsi_host_put(shost); out_disable_pdev: pci_disable_device(pdev); out: return error; } static void aac_release_resources(struct aac_dev *aac) { aac_adapter_disable_int(aac); aac_free_irq(aac); } static int aac_acquire_resources(struct aac_dev *dev) { unsigned long status; /* * First clear out all interrupts. Then enable the one's that we * can handle. */ while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING) || status == 0xffffffff) msleep(20); aac_adapter_disable_int(dev); aac_adapter_enable_int(dev); if (aac_is_src(dev)) aac_define_int_mode(dev); if (dev->msi_enabled) aac_src_access_devreg(dev, AAC_ENABLE_MSIX); if (aac_acquire_irq(dev)) goto error_iounmap; aac_adapter_enable_int(dev); /*max msix may change after EEH * Re-assign vectors to fibs */ aac_fib_vector_assign(dev); if (!dev->sync_mode) { /* After EEH recovery or suspend resume, max_msix count * may change, therefore updating in init as well. */ dev->init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix); aac_adapter_start(dev); } return 0; error_iounmap: return -1; } static int __maybe_unused aac_suspend(struct device *dev) { struct Scsi_Host *shost = dev_get_drvdata(dev); struct aac_dev *aac = (struct aac_dev *)shost->hostdata; scsi_host_block(shost); aac_cancel_rescan_worker(aac); aac_send_shutdown(aac); aac_release_resources(aac); return 0; } static int __maybe_unused aac_resume(struct device *dev) { struct Scsi_Host *shost = dev_get_drvdata(dev); struct aac_dev *aac = (struct aac_dev *)shost->hostdata; if (aac_acquire_resources(aac)) goto fail_device; /* * reset this flag to unblock ioctl() as it was set at * aac_send_shutdown() to block ioctls from upperlayer */ aac->adapter_shutdown = 0; scsi_host_unblock(shost, SDEV_RUNNING); return 0; fail_device: printk(KERN_INFO "%s%d: resume failed.\n", aac->name, aac->id); scsi_host_put(shost); return -ENODEV; } static void aac_shutdown(struct pci_dev *dev) { struct Scsi_Host *shost = pci_get_drvdata(dev); scsi_host_block(shost); __aac_shutdown((struct aac_dev *)shost->hostdata); } static void aac_remove_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct aac_dev *aac = (struct aac_dev *)shost->hostdata; aac_cancel_rescan_worker(aac); aac->use_map_queue = false; scsi_remove_host(shost); __aac_shutdown(aac); aac_fib_map_free(aac); dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, aac->comm_phys); kfree(aac->queues); aac_adapter_ioremap(aac, 0); kfree(aac->fibs); kfree(aac->fsa_dev); list_del(&aac->entry); scsi_host_put(shost); pci_disable_device(pdev); if (list_empty(&aac_devices)) { unregister_chrdev(aac_cfg_major, "aac"); aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT; } } static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t error) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct aac_dev *aac = shost_priv(shost); dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error); switch (error) { case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: aac->handle_pci_error = 1; scsi_host_block(shost); aac_cancel_rescan_worker(aac); scsi_host_complete_all_commands(shost, DID_NO_CONNECT); aac_release_resources(aac); aac_adapter_ioremap(aac, 0); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: aac->handle_pci_error = 1; scsi_host_complete_all_commands(shost, DID_NO_CONNECT); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev) { dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n"); return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev) { dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n"); pci_restore_state(pdev); if (pci_enable_device(pdev)) { dev_warn(&pdev->dev, "aacraid: failed to enable slave\n"); goto fail_device; } pci_set_master(pdev); if (pci_enable_device_mem(pdev)) { dev_err(&pdev->dev, "pci_enable_device_mem failed\n"); goto fail_device; } return PCI_ERS_RESULT_RECOVERED; fail_device: dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n"); return PCI_ERS_RESULT_DISCONNECT; } static void aac_pci_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct aac_dev *aac = (struct aac_dev *)shost_priv(shost); if (aac_adapter_ioremap(aac, aac->base_size)) { dev_err(&pdev->dev, "aacraid: ioremap failed\n"); /* remap failed, go back ... */ aac->comm_interface = AAC_COMM_PRODUCER; if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) { dev_warn(&pdev->dev, "aacraid: unable to map adapter.\n"); return; } } msleep(10000); aac_acquire_resources(aac); /* * reset this flag to unblock ioctl() as it was set * at aac_send_shutdown() to block ioctls from upperlayer */ aac->adapter_shutdown = 0; aac->handle_pci_error = 0; scsi_host_unblock(shost, SDEV_RUNNING); aac_scan_host(aac); pci_save_state(pdev); dev_err(&pdev->dev, "aacraid: PCI error - resume\n"); } static struct pci_error_handlers aac_pci_err_handler = { .error_detected = aac_pci_error_detected, .mmio_enabled = aac_pci_mmio_enabled, .slot_reset = aac_pci_slot_reset, .resume = aac_pci_resume, }; static SIMPLE_DEV_PM_OPS(aac_pm_ops, aac_suspend, aac_resume); static struct pci_driver aac_pci_driver = { .name = AAC_DRIVERNAME, .id_table = aac_pci_tbl, .probe = aac_probe_one, .remove = aac_remove_one, .driver.pm = &aac_pm_ops, .shutdown = aac_shutdown, .err_handler = &aac_pci_err_handler, }; static int __init aac_init(void) { int error; printk(KERN_INFO "Adaptec %s driver %s\n", AAC_DRIVERNAME, aac_driver_version); error = pci_register_driver(&aac_pci_driver); if (error < 0) return error; aac_init_char(); return 0; } static void __exit aac_exit(void) { if (aac_cfg_major > -1) unregister_chrdev(aac_cfg_major, "aac"); pci_unregister_driver(&aac_pci_driver); } module_init(aac_init); module_exit(aac_exit);
linux-master
drivers/scsi/aacraid/linit.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010-2015 PMC-Sierra, Inc. ([email protected]) * 2016-2017 Microsemi Corp. ([email protected]) * * Module Name: * aachba.c * * Abstract: Contains Interfaces to manage IOs. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/blkdev.h> #include <linux/uaccess.h> #include <linux/module.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "aacraid.h" /* values for inqd_pdt: Peripheral device type in plain English */ #define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */ #define INQD_PDT_PROC 0x03 /* Processor device */ #define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */ #define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */ #define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */ #define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */ #define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */ #define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */ /* * Sense codes */ #define SENCODE_NO_SENSE 0x00 #define SENCODE_END_OF_DATA 0x00 #define SENCODE_BECOMING_READY 0x04 #define SENCODE_INIT_CMD_REQUIRED 0x04 #define SENCODE_UNRECOVERED_READ_ERROR 0x11 #define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A #define SENCODE_INVALID_COMMAND 0x20 #define SENCODE_LBA_OUT_OF_RANGE 0x21 #define SENCODE_INVALID_CDB_FIELD 0x24 #define SENCODE_LUN_NOT_SUPPORTED 0x25 #define SENCODE_INVALID_PARAM_FIELD 0x26 #define SENCODE_PARAM_NOT_SUPPORTED 0x26 #define SENCODE_PARAM_VALUE_INVALID 0x26 #define SENCODE_RESET_OCCURRED 0x29 #define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E #define SENCODE_INQUIRY_DATA_CHANGED 0x3F #define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39 #define SENCODE_DIAGNOSTIC_FAILURE 0x40 #define SENCODE_INTERNAL_TARGET_FAILURE 0x44 #define SENCODE_INVALID_MESSAGE_ERROR 0x49 #define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c #define SENCODE_OVERLAPPED_COMMAND 0x4E /* * Additional sense codes */ #define ASENCODE_NO_SENSE 0x00 #define ASENCODE_END_OF_DATA 0x05 #define ASENCODE_BECOMING_READY 0x01 #define ASENCODE_INIT_CMD_REQUIRED 0x02 #define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00 #define ASENCODE_INVALID_COMMAND 0x00 #define ASENCODE_LBA_OUT_OF_RANGE 0x00 #define ASENCODE_INVALID_CDB_FIELD 0x00 #define ASENCODE_LUN_NOT_SUPPORTED 0x00 #define ASENCODE_INVALID_PARAM_FIELD 0x00 #define ASENCODE_PARAM_NOT_SUPPORTED 0x01 #define ASENCODE_PARAM_VALUE_INVALID 0x02 #define ASENCODE_RESET_OCCURRED 0x00 #define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00 #define ASENCODE_INQUIRY_DATA_CHANGED 0x03 #define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00 #define ASENCODE_DIAGNOSTIC_FAILURE 0x80 #define ASENCODE_INTERNAL_TARGET_FAILURE 0x00 #define ASENCODE_INVALID_MESSAGE_ERROR 0x00 #define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00 #define ASENCODE_OVERLAPPED_COMMAND 0x00 #define BYTE0(x) (unsigned char)(x) #define BYTE1(x) (unsigned char)((x) >> 8) #define BYTE2(x) (unsigned char)((x) >> 16) #define BYTE3(x) (unsigned char)((x) >> 24) /* MODE_SENSE data format */ typedef struct { struct { u8 data_length; u8 med_type; u8 dev_par; u8 bd_length; } __attribute__((packed)) hd; struct { u8 dens_code; u8 block_count[3]; u8 reserved; u8 block_length[3]; } __attribute__((packed)) bd; u8 mpc_buf[3]; } __attribute__((packed)) aac_modep_data; /* MODE_SENSE_10 data format */ typedef struct { struct { u8 data_length[2]; u8 med_type; u8 dev_par; u8 rsrvd[2]; u8 bd_length[2]; } __attribute__((packed)) hd; struct { u8 dens_code; u8 block_count[3]; u8 reserved; u8 block_length[3]; } __attribute__((packed)) bd; u8 mpc_buf[3]; } __attribute__((packed)) aac_modep10_data; /*------------------------------------------------------------------------------ * S T R U C T S / T Y P E D E F S *----------------------------------------------------------------------------*/ /* SCSI inquiry data */ struct inquiry_data { u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */ u8 inqd_dtq; /* RMB | Device Type Qualifier */ u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */ u8 inqd_rdf; /* AENC | TrmIOP | Response data format */ u8 inqd_len; /* Additional length (n-4) */ u8 inqd_pad1[2];/* Reserved - must be zero */ u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ u8 inqd_vid[8]; /* Vendor ID */ u8 inqd_pid[16];/* Product ID */ u8 inqd_prl[4]; /* Product Revision Level */ }; /* Added for VPD 0x83 */ struct tvpd_id_descriptor_type_1 { u8 codeset:4; /* VPD_CODE_SET */ u8 reserved:4; u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */ u8 reserved2:4; u8 reserved3; u8 identifierlength; u8 venid[8]; u8 productid[16]; u8 serialnumber[8]; /* SN in ASCII */ }; struct tvpd_id_descriptor_type_2 { u8 codeset:4; /* VPD_CODE_SET */ u8 reserved:4; u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */ u8 reserved2:4; u8 reserved3; u8 identifierlength; struct teu64id { u32 Serial; /* The serial number supposed to be 40 bits, * bit we only support 32, so make the last byte zero. */ u8 reserved; u8 venid[3]; } eu64id; }; struct tvpd_id_descriptor_type_3 { u8 codeset : 4; /* VPD_CODE_SET */ u8 reserved : 4; u8 identifiertype : 4; /* VPD_IDENTIFIER_TYPE */ u8 reserved2 : 4; u8 reserved3; u8 identifierlength; u8 Identifier[16]; }; struct tvpd_page83 { u8 DeviceType:5; u8 DeviceTypeQualifier:3; u8 PageCode; u8 reserved; u8 PageLength; struct tvpd_id_descriptor_type_1 type1; struct tvpd_id_descriptor_type_2 type2; struct tvpd_id_descriptor_type_3 type3; }; /* * M O D U L E G L O B A L S */ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap); static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg); static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg); static long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max); static long aac_build_sghba(struct scsi_cmnd *scsicmd, struct aac_hba_cmd_req *hbacmd, int sg_max, u64 sg_address); static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new); static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd); static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); static int aac_send_hba_fib(struct scsi_cmnd *scsicmd); #ifdef AAC_DETAILED_STATUS_INFO static char *aac_get_status_string(u32 status); #endif /* * Non dasd selection is handled entirely in aachba now */ static int nondasd = -1; static int aac_cache = 2; /* WCE=0 to avoid performance problems */ static int dacmode = -1; int aac_msi; int aac_commit = -1; int startup_timeout = 180; int aif_timeout = 120; int aac_sync_mode; /* Only Sync. transfer - disabled */ static int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */ module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode" " 0=off, 1=on"); module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list" " 0=off, 1=on"); module_param(nondasd, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices." " 0=off, 1=on"); module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n" "\tbit 0 - Disable FUA in WRITE SCSI commands\n" "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n" "\tbit 2 - Disable only if Battery is protecting Cache"); module_param(dacmode, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC." " 0=off, 1=on"); module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the" " adapter for foreign arrays.\n" "This is typically needed in systems that do not have a BIOS." " 0=off, 1=on"); module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(msi, "IRQ handling." " 0=PIC(default), 1=MSI, 2=MSI-X)"); module_param(startup_timeout, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for" " adapter to have its kernel up and\n" "running. This is typically adjusted for large systems that do not" " have a BIOS."); module_param(aif_timeout, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for" " applications to pick up AIFs before\n" "deregistering them. This is typically adjusted for heavily burdened" " systems."); int aac_fib_dump; module_param(aac_fib_dump, int, 0644); MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on"); int numacb = -1; module_param(numacb, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control" " blocks (FIB) allocated. Valid values are 512 and down. Default is" " to use suggestion from Firmware."); static int acbsize = -1; module_param(acbsize, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)" " size. Valid values are 512, 2048, 4096 and 8192. Default is to use" " suggestion from Firmware."); int update_interval = 30 * 60; module_param(update_interval, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync" " updates issued to adapter."); int check_interval = 60; module_param(check_interval, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health" " checks."); int aac_check_reset = 1; module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the" " adapter. a value of -1 forces the reset to adapters programmed to" " ignore it."); int expose_physicals = -1; module_param(expose_physicals, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays." " -1=protect 0=off, 1=on"); int aac_reset_devices; module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization."); static int aac_wwn = 1; module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n" "\t0 - Disable\n" "\t1 - Array Meta Data Signature (default)\n" "\t2 - Adapter Serial Number"); static inline int aac_valid_context(struct scsi_cmnd *scsicmd, struct fib *fibptr) { struct scsi_device *device; if (unlikely(!scsicmd)) { dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n")); aac_fib_complete(fibptr); return 0; } aac_priv(scsicmd)->owner = AAC_OWNER_MIDLEVEL; device = scsicmd->device; if (unlikely(!device)) { dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n")); aac_fib_complete(fibptr); return 0; } return 1; } /** * aac_get_config_status - check the adapter configuration * @dev: aac driver data * @commit_flag: force sending CT_COMMIT_CONFIG * * Query config status, and commit the configuration if needed. */ int aac_get_config_status(struct aac_dev *dev, int commit_flag) { int status = 0; struct fib * fibptr; if (!(fibptr = aac_fib_alloc(dev))) return -ENOMEM; aac_fib_init(fibptr); { struct aac_get_config_status *dinfo; dinfo = (struct aac_get_config_status *) fib_data(fibptr); dinfo->command = cpu_to_le32(VM_ContainerConfig); dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS); dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data)); } status = aac_fib_send(ContainerCommand, fibptr, sizeof (struct aac_get_config_status), FsaNormal, 1, 1, NULL, NULL); if (status < 0) { printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n"); } else { struct aac_get_config_status_resp *reply = (struct aac_get_config_status_resp *) fib_data(fibptr); dprintk((KERN_WARNING "aac_get_config_status: response=%d status=%d action=%d\n", le32_to_cpu(reply->response), le32_to_cpu(reply->status), le32_to_cpu(reply->data.action))); if ((le32_to_cpu(reply->response) != ST_OK) || (le32_to_cpu(reply->status) != CT_OK) || (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) { printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n"); status = -EINVAL; } } /* Do not set XferState to zero unless receives a response from F/W */ if (status >= 0) aac_fib_complete(fibptr); /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ if (status >= 0) { if ((aac_commit == 1) || commit_flag) { struct aac_commit_config * dinfo; aac_fib_init(fibptr); dinfo = (struct aac_commit_config *) fib_data(fibptr); dinfo->command = cpu_to_le32(VM_ContainerConfig); dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); status = aac_fib_send(ContainerCommand, fibptr, sizeof (struct aac_commit_config), FsaNormal, 1, 1, NULL, NULL); /* Do not set XferState to zero unless * receives a response from F/W */ if (status >= 0) aac_fib_complete(fibptr); } else if (aac_commit == 0) { printk(KERN_WARNING "aac_get_config_status: Foreign device configurations are being ignored\n"); } } /* FIB should be freed only after getting the response from the F/W */ if (status != -ERESTARTSYS) aac_fib_free(fibptr); return status; } static void aac_expose_phy_device(struct scsi_cmnd *scsicmd) { char inq_data; scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data)); if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) { inq_data &= 0xdf; scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); } } /** * aac_get_containers - list containers * @dev: aac driver data * * Make a list of all containers on this controller */ int aac_get_containers(struct aac_dev *dev) { struct fsa_dev_info *fsa_dev_ptr; u32 index; int status = 0; struct fib * fibptr; struct aac_get_container_count *dinfo; struct aac_get_container_count_resp *dresp; int maximum_num_containers = MAXIMUM_NUM_CONTAINERS; if (!(fibptr = aac_fib_alloc(dev))) return -ENOMEM; aac_fib_init(fibptr); dinfo = (struct aac_get_container_count *) fib_data(fibptr); dinfo->command = cpu_to_le32(VM_ContainerConfig); dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT); status = aac_fib_send(ContainerCommand, fibptr, sizeof (struct aac_get_container_count), FsaNormal, 1, 1, NULL, NULL); if (status >= 0) { dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); if (fibptr->dev->supplement_adapter_info.supported_options2 & AAC_OPTION_SUPPORTED_240_VOLUMES) { maximum_num_containers = le32_to_cpu(dresp->MaxSimpleVolumes); } aac_fib_complete(fibptr); } /* FIB should be freed only after getting the response from the F/W */ if (status != -ERESTARTSYS) aac_fib_free(fibptr); if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) maximum_num_containers = MAXIMUM_NUM_CONTAINERS; if (dev->fsa_dev == NULL || dev->maximum_num_containers != maximum_num_containers) { fsa_dev_ptr = dev->fsa_dev; dev->fsa_dev = kcalloc(maximum_num_containers, sizeof(*fsa_dev_ptr), GFP_KERNEL); kfree(fsa_dev_ptr); fsa_dev_ptr = NULL; if (!dev->fsa_dev) return -ENOMEM; dev->maximum_num_containers = maximum_num_containers; } for (index = 0; index < dev->maximum_num_containers; index++) { dev->fsa_dev[index].devname[0] = '\0'; dev->fsa_dev[index].valid = 0; status = aac_probe_container(dev, index); if (status < 0) { printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n"); break; } } return status; } static void aac_scsi_done(struct scsi_cmnd *scmd) { if (scmd->device->request_queue) { /* SCSI command has been submitted by the SCSI mid-layer. */ scsi_done(scmd); } else { /* SCSI command has been submitted by aac_probe_container(). */ aac_probe_container_scsi_done(scmd); } } static void get_container_name_callback(void *context, struct fib * fibptr) { struct aac_get_name_resp * get_name_reply; struct scsi_cmnd * scsicmd; scsicmd = (struct scsi_cmnd *) context; if (!aac_valid_context(scsicmd, fibptr)) return; dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies)); BUG_ON(fibptr == NULL); get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr); /* Failure is irrelevant, using default value instead */ if ((le32_to_cpu(get_name_reply->status) == CT_OK) && (get_name_reply->data[0] != '\0')) { char *sp = get_name_reply->data; int data_size = sizeof_field(struct aac_get_name_resp, data); sp[data_size - 1] = '\0'; while (*sp == ' ') ++sp; if (*sp) { struct inquiry_data inq; char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)]; int count = sizeof(d); char *dp = d; do { *dp++ = (*sp) ? *sp++ : ' '; } while (--count > 0); scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq)); memcpy(inq.inqd_pid, d, sizeof(d)); scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq)); } } scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; aac_fib_complete(fibptr); aac_scsi_done(scsicmd); } /* * aac_get_container_name - get container name, none blocking. */ static int aac_get_container_name(struct scsi_cmnd * scsicmd) { int status; int data_size; struct aac_get_name *dinfo; struct fib * cmd_fibcontext; struct aac_dev * dev; dev = (struct aac_dev *)scsicmd->device->host->hostdata; data_size = sizeof_field(struct aac_get_name_resp, data); cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); aac_fib_init(cmd_fibcontext); dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; dinfo->command = cpu_to_le32(VM_ContainerConfig); dinfo->type = cpu_to_le32(CT_READ_NAME); dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); dinfo->count = cpu_to_le32(data_size - 1); status = aac_fib_send(ContainerCommand, cmd_fibcontext, sizeof(struct aac_get_name_resp), FsaNormal, 0, 1, (fib_callback)get_container_name_callback, (void *) scsicmd); /* * Check that the command queued to the controller */ if (status == -EINPROGRESS) return 0; printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); aac_fib_complete(cmd_fibcontext); return -1; } static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd) { struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev; if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1)) return aac_scsi_cmd(scsicmd); scsicmd->result = DID_NO_CONNECT << 16; aac_scsi_done(scsicmd); return 0; } static void _aac_probe_container2(void * context, struct fib * fibptr) { struct fsa_dev_info *fsa_dev_ptr; int (*callback)(struct scsi_cmnd *); struct scsi_cmnd *scsicmd = context; struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd); int i; if (!aac_valid_context(scsicmd, fibptr)) return; cmd_priv->status = 0; fsa_dev_ptr = fibptr->dev->fsa_dev; if (fsa_dev_ptr) { struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr); __le32 sup_options2; fsa_dev_ptr += scmd_id(scsicmd); sup_options2 = fibptr->dev->supplement_adapter_info.supported_options2; if ((le32_to_cpu(dresp->status) == ST_OK) && (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) { dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200; fsa_dev_ptr->block_size = 0x200; } else { fsa_dev_ptr->block_size = le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size); } for (i = 0; i < 16; i++) fsa_dev_ptr->identifier[i] = dresp->mnt[0].fileinfo.bdevinfo .identifier[i]; fsa_dev_ptr->valid = 1; /* sense_key holds the current state of the spin-up */ if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY)) fsa_dev_ptr->sense_data.sense_key = NOT_READY; else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY) fsa_dev_ptr->sense_data.sense_key = NO_SENSE; fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol); fsa_dev_ptr->size = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) + (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32); fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0); } if ((fsa_dev_ptr->valid & 1) == 0) fsa_dev_ptr->valid = 0; cmd_priv->status = le32_to_cpu(dresp->count); } aac_fib_complete(fibptr); aac_fib_free(fibptr); callback = cmd_priv->callback; cmd_priv->callback = NULL; (*callback)(scsicmd); return; } static void _aac_probe_container1(void * context, struct fib * fibptr) { struct scsi_cmnd * scsicmd; struct aac_mount * dresp; struct aac_query_mount *dinfo; int status; dresp = (struct aac_mount *) fib_data(fibptr); if (!aac_supports_2T(fibptr->dev)) { dresp->mnt[0].capacityhigh = 0; if ((le32_to_cpu(dresp->status) == ST_OK) && (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { _aac_probe_container2(context, fibptr); return; } } scsicmd = (struct scsi_cmnd *) context; if (!aac_valid_context(scsicmd, fibptr)) return; aac_fib_init(fibptr); dinfo = (struct aac_query_mount *)fib_data(fibptr); if (fibptr->dev->supplement_adapter_info.supported_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE) dinfo->command = cpu_to_le32(VM_NameServeAllBlk); else dinfo->command = cpu_to_le32(VM_NameServe64); dinfo->count = cpu_to_le32(scmd_id(scsicmd)); dinfo->type = cpu_to_le32(FT_FILESYS); aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; status = aac_fib_send(ContainerCommand, fibptr, sizeof(struct aac_query_mount), FsaNormal, 0, 1, _aac_probe_container2, (void *) scsicmd); /* * Check that the command queued to the controller */ if (status < 0 && status != -EINPROGRESS) { /* Inherit results from VM_NameServe, if any */ dresp->status = cpu_to_le32(ST_OK); _aac_probe_container2(context, fibptr); } } static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *)) { struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd); struct fib * fibptr; int status = -ENOMEM; if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) { struct aac_query_mount *dinfo; aac_fib_init(fibptr); dinfo = (struct aac_query_mount *)fib_data(fibptr); if (fibptr->dev->supplement_adapter_info.supported_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE) dinfo->command = cpu_to_le32(VM_NameServeAllBlk); else dinfo->command = cpu_to_le32(VM_NameServe); dinfo->count = cpu_to_le32(scmd_id(scsicmd)); dinfo->type = cpu_to_le32(FT_FILESYS); cmd_priv->callback = callback; cmd_priv->owner = AAC_OWNER_FIRMWARE; status = aac_fib_send(ContainerCommand, fibptr, sizeof(struct aac_query_mount), FsaNormal, 0, 1, _aac_probe_container1, (void *) scsicmd); /* * Check that the command queued to the controller */ if (status == -EINPROGRESS) return 0; if (status < 0) { cmd_priv->callback = NULL; aac_fib_complete(fibptr); aac_fib_free(fibptr); } } if (status < 0) { struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev; if (fsa_dev_ptr) { fsa_dev_ptr += scmd_id(scsicmd); if ((fsa_dev_ptr->valid & 1) == 0) { fsa_dev_ptr->valid = 0; return (*callback)(scsicmd); } } } return status; } /** * aac_probe_container_callback1 - query a logical volume * @scsicmd: the scsi command block * * Queries the controller about the given volume. The volume information * is updated in the struct fsa_dev_info structure rather than returned. */ static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd) { scsicmd->device = NULL; return 0; } static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd) { aac_probe_container_callback1(scsi_cmnd); } int aac_probe_container(struct aac_dev *dev, int cid) { struct aac_cmd_priv *cmd_priv; struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd) + sizeof(*cmd_priv), GFP_KERNEL); struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL); int status; if (!scsicmd || !scsidev) { kfree(scsicmd); kfree(scsidev); return -ENOMEM; } scsicmd->device = scsidev; scsidev->sdev_state = 0; scsidev->id = cid; scsidev->host = dev->scsi_host_ptr; if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0) while (scsicmd->device == scsidev) schedule(); kfree(scsidev); cmd_priv = aac_priv(scsicmd); status = cmd_priv->status; kfree(scsicmd); return status; } /* Local Structure to set SCSI inquiry data strings */ struct scsi_inq { char vid[8]; /* Vendor ID */ char pid[16]; /* Product ID */ char prl[4]; /* Product Revision Level */ }; /** * inqstrcpy - string merge * @a: string to copy from * @b: string to copy to * * Copy a String from one location to another * without copying \0 */ static void inqstrcpy(char *a, char *b) { while (*a != (char)0) *b++ = *a++; } static char *container_types[] = { "None", "Volume", "Mirror", "Stripe", "RAID5", "SSRW", "SSRO", "Morph", "Legacy", "RAID4", "RAID10", "RAID00", "V-MIRRORS", "PSEUDO R4", "RAID50", "RAID5D", "RAID5D0", "RAID1E", "RAID6", "RAID60", "Unknown" }; char * get_container_type(unsigned tindex) { if (tindex >= ARRAY_SIZE(container_types)) tindex = ARRAY_SIZE(container_types) - 1; return container_types[tindex]; } /* Function: setinqstr * * Arguments: [1] pointer to void [1] int * * Purpose: Sets SCSI inquiry data strings for vendor, product * and revision level. Allows strings to be set in platform dependent * files instead of in OS dependent driver source. */ static void setinqstr(struct aac_dev *dev, void *data, int tindex) { struct scsi_inq *str; struct aac_supplement_adapter_info *sup_adap_info; sup_adap_info = &dev->supplement_adapter_info; str = (struct scsi_inq *)(data); /* cast data to scsi inq block */ memset(str, ' ', sizeof(*str)); if (sup_adap_info->adapter_type_text[0]) { int c; char *cp; char *cname = kmemdup(sup_adap_info->adapter_type_text, sizeof(sup_adap_info->adapter_type_text), GFP_ATOMIC); if (!cname) return; cp = cname; if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C')) inqstrcpy("SMC", str->vid); else { c = sizeof(str->vid); while (*cp && *cp != ' ' && --c) ++cp; c = *cp; *cp = '\0'; inqstrcpy(cname, str->vid); *cp = c; while (*cp && *cp != ' ') ++cp; } while (*cp == ' ') ++cp; /* last six chars reserved for vol type */ if (strlen(cp) > sizeof(str->pid)) cp[sizeof(str->pid)] = '\0'; inqstrcpy (cp, str->pid); kfree(cname); } else { struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype); inqstrcpy (mp->vname, str->vid); /* last six chars reserved for vol type */ inqstrcpy (mp->model, str->pid); } if (tindex < ARRAY_SIZE(container_types)){ char *findit = str->pid; for ( ; *findit != ' '; findit++); /* walk till we find a space */ /* RAID is superfluous in the context of a RAID device */ if (memcmp(findit-4, "RAID", 4) == 0) *(findit -= 4) = ' '; if (((findit - str->pid) + strlen(container_types[tindex])) < (sizeof(str->pid) + sizeof(str->prl))) inqstrcpy (container_types[tindex], findit + 1); } inqstrcpy ("V1.0", str->prl); } static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data, struct aac_dev *dev, struct scsi_cmnd *scsicmd) { int container; vpdpage83data->type3.codeset = 1; vpdpage83data->type3.identifiertype = 3; vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3) - 4; for (container = 0; container < dev->maximum_num_containers; container++) { if (scmd_id(scsicmd) == container) { memcpy(vpdpage83data->type3.Identifier, dev->fsa_dev[container].identifier, 16); break; } } } static void get_container_serial_callback(void *context, struct fib * fibptr) { struct aac_get_serial_resp * get_serial_reply; struct scsi_cmnd * scsicmd; BUG_ON(fibptr == NULL); scsicmd = (struct scsi_cmnd *) context; if (!aac_valid_context(scsicmd, fibptr)) return; get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr); /* Failure is irrelevant, using default value instead */ if (le32_to_cpu(get_serial_reply->status) == CT_OK) { /*Check to see if it's for VPD 0x83 or 0x80 */ if (scsicmd->cmnd[2] == 0x83) { /* vpd page 0x83 - Device Identification Page */ struct aac_dev *dev; int i; struct tvpd_page83 vpdpage83data; dev = (struct aac_dev *)scsicmd->device->host->hostdata; memset(((u8 *)&vpdpage83data), 0, sizeof(vpdpage83data)); /* DIRECT_ACCESS_DEVIC */ vpdpage83data.DeviceType = 0; /* DEVICE_CONNECTED */ vpdpage83data.DeviceTypeQualifier = 0; /* VPD_DEVICE_IDENTIFIERS */ vpdpage83data.PageCode = 0x83; vpdpage83data.reserved = 0; vpdpage83data.PageLength = sizeof(vpdpage83data.type1) + sizeof(vpdpage83data.type2); /* VPD 83 Type 3 is not supported for ARC */ if (dev->sa_firmware) vpdpage83data.PageLength += sizeof(vpdpage83data.type3); /* T10 Vendor Identifier Field Format */ /* VpdcodesetAscii */ vpdpage83data.type1.codeset = 2; /* VpdIdentifierTypeVendorId */ vpdpage83data.type1.identifiertype = 1; vpdpage83data.type1.identifierlength = sizeof(vpdpage83data.type1) - 4; /* "ADAPTEC " for adaptec */ memcpy(vpdpage83data.type1.venid, "ADAPTEC ", sizeof(vpdpage83data.type1.venid)); memcpy(vpdpage83data.type1.productid, "ARRAY ", sizeof( vpdpage83data.type1.productid)); /* Convert to ascii based serial number. * The LSB is the end. */ for (i = 0; i < 8; i++) { u8 temp = (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF); if (temp > 0x9) { vpdpage83data.type1.serialnumber[i] = 'A' + (temp - 0xA); } else { vpdpage83data.type1.serialnumber[i] = '0' + temp; } } /* VpdCodeSetBinary */ vpdpage83data.type2.codeset = 1; /* VpdidentifiertypeEUI64 */ vpdpage83data.type2.identifiertype = 2; vpdpage83data.type2.identifierlength = sizeof(vpdpage83data.type2) - 4; vpdpage83data.type2.eu64id.venid[0] = 0xD0; vpdpage83data.type2.eu64id.venid[1] = 0; vpdpage83data.type2.eu64id.venid[2] = 0; vpdpage83data.type2.eu64id.Serial = get_serial_reply->uid; vpdpage83data.type2.eu64id.reserved = 0; /* * VpdIdentifierTypeFCPHName * VPD 0x83 Type 3 not supported for ARC */ if (dev->sa_firmware) { build_vpd83_type3(&vpdpage83data, dev, scsicmd); } /* Move the inquiry data to the response buffer. */ scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data, sizeof(vpdpage83data)); } else { /* It must be for VPD 0x80 */ char sp[13]; /* EVPD bit set */ sp[0] = INQD_PDT_DA; sp[1] = scsicmd->cmnd[2]; sp[2] = 0; sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X", le32_to_cpu(get_serial_reply->uid)); scsi_sg_copy_from_buffer(scsicmd, sp, sizeof(sp)); } } scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; aac_fib_complete(fibptr); aac_scsi_done(scsicmd); } /* * aac_get_container_serial - get container serial, none blocking. */ static int aac_get_container_serial(struct scsi_cmnd * scsicmd) { int status; struct aac_get_serial *dinfo; struct fib * cmd_fibcontext; struct aac_dev * dev; dev = (struct aac_dev *)scsicmd->device->host->hostdata; cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); aac_fib_init(cmd_fibcontext); dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext); dinfo->command = cpu_to_le32(VM_ContainerConfig); dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID); dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; status = aac_fib_send(ContainerCommand, cmd_fibcontext, sizeof(struct aac_get_serial_resp), FsaNormal, 0, 1, (fib_callback) get_container_serial_callback, (void *) scsicmd); /* * Check that the command queued to the controller */ if (status == -EINPROGRESS) return 0; printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status); aac_fib_complete(cmd_fibcontext); return -1; } /* Function: setinqserial * * Arguments: [1] pointer to void [1] int * * Purpose: Sets SCSI Unit Serial number. * This is a fake. We should read a proper * serial number from the container. <SuSE>But * without docs it's quite hard to do it :-) * So this will have to do in the meantime.</SuSE> */ static int setinqserial(struct aac_dev *dev, void *data, int cid) { /* * This breaks array migration. */ return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X", le32_to_cpu(dev->adapter_info.serial[0]), cid); } static inline void set_sense(struct sense_data *sense_data, u8 sense_key, u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer) { u8 *sense_buf = (u8 *)sense_data; /* Sense data valid, err code 70h */ sense_buf[0] = 0x70; /* No info field */ sense_buf[1] = 0; /* Segment number, always zero */ sense_buf[2] = sense_key; /* Sense key */ sense_buf[12] = sense_code; /* Additional sense code */ sense_buf[13] = a_sense_code; /* Additional sense code qualifier */ if (sense_key == ILLEGAL_REQUEST) { sense_buf[7] = 10; /* Additional sense length */ sense_buf[15] = bit_pointer; /* Illegal parameter is in the parameter block */ if (sense_code == SENCODE_INVALID_CDB_FIELD) sense_buf[15] |= 0xc0;/* Std sense key specific field */ /* Illegal parameter is in the CDB block */ sense_buf[16] = field_pointer >> 8; /* MSB */ sense_buf[17] = field_pointer; /* LSB */ } else sense_buf[7] = 6; /* Additional sense length */ } static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba) { if (lba & 0xffffffff00000000LL) { int cid = scmd_id(cmd); dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); cmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); aac_scsi_done(cmd); return 1; } return 0; } static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba) { return 0; } static void io_callback(void *context, struct fib * fibptr); static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) { struct aac_dev *dev = fib->dev; u16 fibsize, command; long ret; aac_fib_init(fib); if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && !dev->sync_mode) { struct aac_raw_io2 *readcmd2; readcmd2 = (struct aac_raw_io2 *) fib_data(fib); memset(readcmd2, 0, sizeof(struct aac_raw_io2)); readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff)); readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); readcmd2->byteCount = cpu_to_le32(count * dev->fsa_dev[scmd_id(cmd)].block_size); readcmd2->cid = cpu_to_le16(scmd_id(cmd)); readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ); ret = aac_build_sgraw2(cmd, readcmd2, dev->scsi_host_ptr->sg_tablesize); if (ret < 0) return ret; command = ContainerRawIo2; fibsize = struct_size(readcmd2, sge, le32_to_cpu(readcmd2->sgeCnt)); } else { struct aac_raw_io *readcmd; readcmd = (struct aac_raw_io *) fib_data(fib); readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); readcmd->count = cpu_to_le32(count * dev->fsa_dev[scmd_id(cmd)].block_size); readcmd->cid = cpu_to_le16(scmd_id(cmd)); readcmd->flags = cpu_to_le16(RIO_TYPE_READ); readcmd->bpTotal = 0; readcmd->bpComplete = 0; ret = aac_build_sgraw(cmd, &readcmd->sg); if (ret < 0) return ret; command = ContainerRawIo; fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw)); } BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); /* * Now send the Fib to the adapter */ return aac_fib_send(command, fib, fibsize, FsaNormal, 0, 1, (fib_callback) io_callback, (void *) cmd); } static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) { u16 fibsize; struct aac_read64 *readcmd; long ret; aac_fib_init(fib); readcmd = (struct aac_read64 *) fib_data(fib); readcmd->command = cpu_to_le32(VM_CtHostRead64); readcmd->cid = cpu_to_le16(scmd_id(cmd)); readcmd->sector_count = cpu_to_le16(count); readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); readcmd->pad = 0; readcmd->flags = 0; ret = aac_build_sg64(cmd, &readcmd->sg); if (ret < 0) return ret; fibsize = sizeof(struct aac_read64) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentry64)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); /* * Now send the Fib to the adapter */ return aac_fib_send(ContainerCommand64, fib, fibsize, FsaNormal, 0, 1, (fib_callback) io_callback, (void *) cmd); } static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) { u16 fibsize; struct aac_read *readcmd; struct aac_dev *dev = fib->dev; long ret; aac_fib_init(fib); readcmd = (struct aac_read *) fib_data(fib); readcmd->command = cpu_to_le32(VM_CtBlockRead); readcmd->cid = cpu_to_le32(scmd_id(cmd)); readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); readcmd->count = cpu_to_le32(count * dev->fsa_dev[scmd_id(cmd)].block_size); ret = aac_build_sg(cmd, &readcmd->sg); if (ret < 0) return ret; fibsize = sizeof(struct aac_read) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentry)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); /* * Now send the Fib to the adapter */ return aac_fib_send(ContainerCommand, fib, fibsize, FsaNormal, 0, 1, (fib_callback) io_callback, (void *) cmd); } static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua) { struct aac_dev *dev = fib->dev; u16 fibsize, command; long ret; aac_fib_init(fib); if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && !dev->sync_mode) { struct aac_raw_io2 *writecmd2; writecmd2 = (struct aac_raw_io2 *) fib_data(fib); memset(writecmd2, 0, sizeof(struct aac_raw_io2)); writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff)); writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); writecmd2->byteCount = cpu_to_le32(count * dev->fsa_dev[scmd_id(cmd)].block_size); writecmd2->cid = cpu_to_le16(scmd_id(cmd)); writecmd2->flags = (fua && ((aac_cache & 5) != 1) && (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) : cpu_to_le16(RIO2_IO_TYPE_WRITE); ret = aac_build_sgraw2(cmd, writecmd2, dev->scsi_host_ptr->sg_tablesize); if (ret < 0) return ret; command = ContainerRawIo2; fibsize = struct_size(writecmd2, sge, le32_to_cpu(writecmd2->sgeCnt)); } else { struct aac_raw_io *writecmd; writecmd = (struct aac_raw_io *) fib_data(fib); writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); writecmd->count = cpu_to_le32(count * dev->fsa_dev[scmd_id(cmd)].block_size); writecmd->cid = cpu_to_le16(scmd_id(cmd)); writecmd->flags = (fua && ((aac_cache & 5) != 1) && (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) : cpu_to_le16(RIO_TYPE_WRITE); writecmd->bpTotal = 0; writecmd->bpComplete = 0; ret = aac_build_sgraw(cmd, &writecmd->sg); if (ret < 0) return ret; command = ContainerRawIo; fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw)); } BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); /* * Now send the Fib to the adapter */ return aac_fib_send(command, fib, fibsize, FsaNormal, 0, 1, (fib_callback) io_callback, (void *) cmd); } static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua) { u16 fibsize; struct aac_write64 *writecmd; long ret; aac_fib_init(fib); writecmd = (struct aac_write64 *) fib_data(fib); writecmd->command = cpu_to_le32(VM_CtHostWrite64); writecmd->cid = cpu_to_le16(scmd_id(cmd)); writecmd->sector_count = cpu_to_le16(count); writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); writecmd->pad = 0; writecmd->flags = 0; ret = aac_build_sg64(cmd, &writecmd->sg); if (ret < 0) return ret; fibsize = sizeof(struct aac_write64) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentry64)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); /* * Now send the Fib to the adapter */ return aac_fib_send(ContainerCommand64, fib, fibsize, FsaNormal, 0, 1, (fib_callback) io_callback, (void *) cmd); } static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua) { u16 fibsize; struct aac_write *writecmd; struct aac_dev *dev = fib->dev; long ret; aac_fib_init(fib); writecmd = (struct aac_write *) fib_data(fib); writecmd->command = cpu_to_le32(VM_CtBlockWrite); writecmd->cid = cpu_to_le32(scmd_id(cmd)); writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); writecmd->count = cpu_to_le32(count * dev->fsa_dev[scmd_id(cmd)].block_size); writecmd->sg.count = cpu_to_le32(1); /* ->stable is not used - it did mean which type of write */ ret = aac_build_sg(cmd, &writecmd->sg); if (ret < 0) return ret; fibsize = sizeof(struct aac_write) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentry)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); /* * Now send the Fib to the adapter */ return aac_fib_send(ContainerCommand, fib, fibsize, FsaNormal, 0, 1, (fib_callback) io_callback, (void *) cmd); } static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd) { struct aac_srb * srbcmd; u32 flag; u32 timeout; struct aac_dev *dev = fib->dev; aac_fib_init(fib); switch(cmd->sc_data_direction){ case DMA_TO_DEVICE: flag = SRB_DataOut; break; case DMA_BIDIRECTIONAL: flag = SRB_DataIn | SRB_DataOut; break; case DMA_FROM_DEVICE: flag = SRB_DataIn; break; case DMA_NONE: default: /* shuts up some versions of gcc */ flag = SRB_NoDataXfer; break; } srbcmd = (struct aac_srb*) fib_data(fib); srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd))); srbcmd->id = cpu_to_le32(scmd_id(cmd)); srbcmd->lun = cpu_to_le32(cmd->device->lun); srbcmd->flags = cpu_to_le32(flag); timeout = scsi_cmd_to_rq(cmd)->timeout / HZ; if (timeout == 0) timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT); srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds srbcmd->retry_limit = 0; /* Obsolete parameter */ srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len); return srbcmd; } static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib, struct scsi_cmnd *cmd) { struct aac_hba_cmd_req *hbacmd; struct aac_dev *dev; int bus, target; u64 address; dev = (struct aac_dev *)cmd->device->host->hostdata; hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va; memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */ /* iu_type is a parameter of aac_hba_send */ switch (cmd->sc_data_direction) { case DMA_TO_DEVICE: hbacmd->byte1 = 2; break; case DMA_FROM_DEVICE: case DMA_BIDIRECTIONAL: hbacmd->byte1 = 1; break; case DMA_NONE: default: break; } hbacmd->lun[1] = cpu_to_le32(cmd->device->lun); bus = aac_logical_to_phys(scmd_channel(cmd)); target = scmd_id(cmd); hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus; /* we fill in reply_qid later in aac_src_deliver_message */ /* we fill in iu_type, request_id later in aac_hba_send */ /* we fill in emb_data_desc_count later in aac_build_sghba */ memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len); hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd)); address = (u64)fib->hw_error_pa; hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); return hbacmd; } static void aac_srb_callback(void *context, struct fib * fibptr); static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd) { u16 fibsize; struct aac_srb * srbcmd = aac_scsi_common(fib, cmd); long ret; ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg); if (ret < 0) return ret; srbcmd->count = cpu_to_le32(scsi_bufflen(cmd)); memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len); /* * Build Scatter/Gather list */ fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) + ((le32_to_cpu(srbcmd->sg.count) & 0xff) * sizeof (struct sgentry64)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); /* * Now send the Fib to the adapter */ return aac_fib_send(ScsiPortCommand64, fib, fibsize, FsaNormal, 0, 1, (fib_callback) aac_srb_callback, (void *) cmd); } static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd) { u16 fibsize; struct aac_srb * srbcmd = aac_scsi_common(fib, cmd); long ret; ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg); if (ret < 0) return ret; srbcmd->count = cpu_to_le32(scsi_bufflen(cmd)); memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len); /* * Build Scatter/Gather list */ fibsize = sizeof (struct aac_srb) + (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * sizeof (struct sgentry)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); /* * Now send the Fib to the adapter */ return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1, (fib_callback) aac_srb_callback, (void *) cmd); } static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd) { if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac && (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) return FAILED; return aac_scsi_32(fib, cmd); } static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd) { struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd); struct aac_dev *dev; long ret; dev = (struct aac_dev *)cmd->device->host->hostdata; ret = aac_build_sghba(cmd, hbacmd, dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa); if (ret < 0) return ret; /* * Now send the HBA command to the adapter */ fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) * sizeof(struct aac_hba_sgl); return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib, (fib_callback) aac_hba_callback, (void *) cmd); } static int aac_send_safw_bmic_cmd(struct aac_dev *dev, struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len) { struct fib *fibptr; dma_addr_t addr; int rcode; int fibsize; struct aac_srb *srb; struct aac_srb_reply *srb_reply; struct sgmap64 *sg64; u32 vbus; u32 vid; if (!dev->sa_firmware) return 0; /* allocate FIB */ fibptr = aac_fib_alloc(dev); if (!fibptr) return -ENOMEM; aac_fib_init(fibptr); fibptr->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable); fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + sizeof(struct sgentry64); /* allocate DMA buffer for response */ addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len, DMA_BIDIRECTIONAL); if (dma_mapping_error(&dev->pdev->dev, addr)) { rcode = -ENOMEM; goto fib_error; } srb = fib_data(fibptr); memcpy(srb, &srbu->srb, sizeof(struct aac_srb)); vbus = (u32)le16_to_cpu( dev->supplement_adapter_info.virt_device_bus); vid = (u32)le16_to_cpu( dev->supplement_adapter_info.virt_device_target); /* set the common request fields */ srb->channel = cpu_to_le32(vbus); srb->id = cpu_to_le32(vid); srb->lun = 0; srb->function = cpu_to_le32(SRBF_ExecuteScsi); srb->timeout = 0; srb->retry_limit = 0; srb->cdb_size = cpu_to_le32(16); srb->count = cpu_to_le32(xfer_len); sg64 = (struct sgmap64 *)&srb->sg; sg64->count = cpu_to_le32(1); sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr)); sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr)); sg64->sg[0].count = cpu_to_le32(xfer_len); /* * Copy the updated data for other dumping or other usage if needed */ memcpy(&srbu->srb, srb, sizeof(struct aac_srb)); /* issue request to the controller */ rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL); if (rcode == -ERESTARTSYS) rcode = -ERESTART; if (unlikely(rcode < 0)) goto bmic_error; srb_reply = (struct aac_srb_reply *)fib_data(fibptr); memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply)); bmic_error: dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL); fib_error: aac_fib_complete(fibptr); aac_fib_free(fibptr); return rcode; } static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target) { struct aac_ciss_identify_pd *identify_resp; if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW) return; identify_resp = dev->hba_map[bus][target].safw_identify_resp; if (identify_resp == NULL) { dev->hba_map[bus][target].qd_limit = 32; return; } if (identify_resp->current_queue_depth_limit <= 0 || identify_resp->current_queue_depth_limit > 255) dev->hba_map[bus][target].qd_limit = 32; else dev->hba_map[bus][target].qd_limit = identify_resp->current_queue_depth_limit; } static int aac_issue_safw_bmic_identify(struct aac_dev *dev, struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target) { int rcode = -ENOMEM; int datasize; struct aac_srb_unit srbu; struct aac_srb *srbcmd; struct aac_ciss_identify_pd *identify_reply; datasize = sizeof(struct aac_ciss_identify_pd); identify_reply = kmalloc(datasize, GFP_KERNEL); if (!identify_reply) goto out; memset(&srbu, 0, sizeof(struct aac_srb_unit)); srbcmd = &srbu.srb; srbcmd->flags = cpu_to_le32(SRB_DataIn); srbcmd->cdb[0] = 0x26; srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF); srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE; rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize); if (unlikely(rcode < 0)) goto mem_free_all; *identify_resp = identify_reply; out: return rcode; mem_free_all: kfree(identify_reply); goto out; } static inline void aac_free_safw_ciss_luns(struct aac_dev *dev) { kfree(dev->safw_phys_luns); dev->safw_phys_luns = NULL; } /** * aac_get_safw_ciss_luns() - Process topology change * @dev: aac_dev structure * * Execute a CISS REPORT PHYS LUNS and process the results into * the current hba_map. */ static int aac_get_safw_ciss_luns(struct aac_dev *dev) { int rcode = -ENOMEM; int datasize; struct aac_srb *srbcmd; struct aac_srb_unit srbu; struct aac_ciss_phys_luns_resp *phys_luns; datasize = sizeof(struct aac_ciss_phys_luns_resp) + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun); phys_luns = kmalloc(datasize, GFP_KERNEL); if (phys_luns == NULL) goto out; memset(&srbu, 0, sizeof(struct aac_srb_unit)); srbcmd = &srbu.srb; srbcmd->flags = cpu_to_le32(SRB_DataIn); srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS; srbcmd->cdb[1] = 2; /* extended reporting */ srbcmd->cdb[8] = (u8)(datasize >> 8); srbcmd->cdb[9] = (u8)(datasize); rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize); if (unlikely(rcode < 0)) goto mem_free_all; if (phys_luns->resp_flag != 2) { rcode = -ENOMSG; goto mem_free_all; } dev->safw_phys_luns = phys_luns; out: return rcode; mem_free_all: kfree(phys_luns); goto out; } static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev) { return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24; } static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun) { return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f; } static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun) { return dev->safw_phys_luns->lun[lun].level2[0]; } static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun) { return dev->safw_phys_luns->lun[lun].bus >> 6; } static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun) { return dev->safw_phys_luns->lun[lun].node_ident[9]; } static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun) { return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]); } static inline void aac_free_safw_identify_resp(struct aac_dev *dev, int bus, int target) { kfree(dev->hba_map[bus][target].safw_identify_resp); dev->hba_map[bus][target].safw_identify_resp = NULL; } static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev, int lun_count) { int luns; int i; u32 bus; u32 target; luns = aac_get_safw_phys_lun_count(dev); if (luns < lun_count) lun_count = luns; else if (lun_count < 0) lun_count = luns; for (i = 0; i < lun_count; i++) { bus = aac_get_safw_phys_bus(dev, i); target = aac_get_safw_phys_target(dev, i); aac_free_safw_identify_resp(dev, bus, target); } } static int aac_get_safw_attr_all_targets(struct aac_dev *dev) { int i; int rcode = 0; u32 lun_count; u32 bus; u32 target; struct aac_ciss_identify_pd *identify_resp = NULL; lun_count = aac_get_safw_phys_lun_count(dev); for (i = 0; i < lun_count; ++i) { bus = aac_get_safw_phys_bus(dev, i); target = aac_get_safw_phys_target(dev, i); rcode = aac_issue_safw_bmic_identify(dev, &identify_resp, bus, target); if (unlikely(rcode < 0)) goto free_identify_resp; dev->hba_map[bus][target].safw_identify_resp = identify_resp; } out: return rcode; free_identify_resp: aac_free_safw_all_identify_resp(dev, i); goto out; } /** * aac_set_safw_attr_all_targets- update current hba map with data from FW * @dev: aac_dev structure * * Update our hba map with the information gathered from the FW */ static void aac_set_safw_attr_all_targets(struct aac_dev *dev) { /* ok and extended reporting */ u32 lun_count, nexus; u32 i, bus, target; u8 expose_flag, attribs; lun_count = aac_get_safw_phys_lun_count(dev); dev->scan_counter++; for (i = 0; i < lun_count; ++i) { bus = aac_get_safw_phys_bus(dev, i); target = aac_get_safw_phys_target(dev, i); expose_flag = aac_get_safw_phys_expose_flag(dev, i); attribs = aac_get_safw_phys_attribs(dev, i); nexus = aac_get_safw_phys_nexus(dev, i); if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS) continue; if (expose_flag != 0) { dev->hba_map[bus][target].devtype = AAC_DEVTYPE_RAID_MEMBER; continue; } if (nexus != 0 && (attribs & 8)) { dev->hba_map[bus][target].devtype = AAC_DEVTYPE_NATIVE_RAW; dev->hba_map[bus][target].rmw_nexus = nexus; } else dev->hba_map[bus][target].devtype = AAC_DEVTYPE_ARC_RAW; dev->hba_map[bus][target].scan_counter = dev->scan_counter; aac_set_safw_target_qd(dev, bus, target); } } static int aac_setup_safw_targets(struct aac_dev *dev) { int rcode = 0; rcode = aac_get_containers(dev); if (unlikely(rcode < 0)) goto out; rcode = aac_get_safw_ciss_luns(dev); if (unlikely(rcode < 0)) goto out; rcode = aac_get_safw_attr_all_targets(dev); if (unlikely(rcode < 0)) goto free_ciss_luns; aac_set_safw_attr_all_targets(dev); aac_free_safw_all_identify_resp(dev, -1); free_ciss_luns: aac_free_safw_ciss_luns(dev); out: return rcode; } int aac_setup_safw_adapter(struct aac_dev *dev) { return aac_setup_safw_targets(dev); } int aac_get_adapter_info(struct aac_dev* dev) { struct fib* fibptr; int rcode; u32 tmp, bus, target; struct aac_adapter_info *info; struct aac_bus_info *command; struct aac_bus_info_response *bus_info; if (!(fibptr = aac_fib_alloc(dev))) return -ENOMEM; aac_fib_init(fibptr); info = (struct aac_adapter_info *) fib_data(fibptr); memset(info,0,sizeof(*info)); rcode = aac_fib_send(RequestAdapterInfo, fibptr, sizeof(*info), FsaNormal, -1, 1, /* First `interrupt' command uses special wait */ NULL, NULL); if (rcode < 0) { /* FIB should be freed only after * getting the response from the F/W */ if (rcode != -ERESTARTSYS) { aac_fib_complete(fibptr); aac_fib_free(fibptr); } return rcode; } memcpy(&dev->adapter_info, info, sizeof(*info)); dev->supplement_adapter_info.virt_device_bus = 0xffff; if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { struct aac_supplement_adapter_info * sinfo; aac_fib_init(fibptr); sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr); memset(sinfo,0,sizeof(*sinfo)); rcode = aac_fib_send(RequestSupplementAdapterInfo, fibptr, sizeof(*sinfo), FsaNormal, 1, 1, NULL, NULL); if (rcode >= 0) memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); if (rcode == -ERESTARTSYS) { fibptr = aac_fib_alloc(dev); if (!fibptr) return -ENOMEM; } } /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */ for (bus = 0; bus < AAC_MAX_BUSES; bus++) { for (target = 0; target < AAC_MAX_TARGETS; target++) { dev->hba_map[bus][target].devtype = 0; dev->hba_map[bus][target].qd_limit = 0; } } /* * GetBusInfo */ aac_fib_init(fibptr); bus_info = (struct aac_bus_info_response *) fib_data(fibptr); memset(bus_info, 0, sizeof(*bus_info)); command = (struct aac_bus_info *)bus_info; command->Command = cpu_to_le32(VM_Ioctl); command->ObjType = cpu_to_le32(FT_DRIVE); command->MethodId = cpu_to_le32(1); command->CtlCmd = cpu_to_le32(GetBusInfo); rcode = aac_fib_send(ContainerCommand, fibptr, sizeof (*bus_info), FsaNormal, 1, 1, NULL, NULL); /* reasoned default */ dev->maximum_num_physicals = 16; if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) { dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus); dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount); } if (!dev->in_reset) { char buffer[16]; tmp = le32_to_cpu(dev->adapter_info.kernelrev); printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n", dev->name, dev->id, tmp>>24, (tmp>>16)&0xff, tmp&0xff, le32_to_cpu(dev->adapter_info.kernelbuild), (int)sizeof(dev->supplement_adapter_info.build_date), dev->supplement_adapter_info.build_date); tmp = le32_to_cpu(dev->adapter_info.monitorrev); printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", dev->name, dev->id, tmp>>24,(tmp>>16)&0xff,tmp&0xff, le32_to_cpu(dev->adapter_info.monitorbuild)); tmp = le32_to_cpu(dev->adapter_info.biosrev); printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n", dev->name, dev->id, tmp>>24,(tmp>>16)&0xff,tmp&0xff, le32_to_cpu(dev->adapter_info.biosbuild)); buffer[0] = '\0'; if (aac_get_serial_number( shost_to_class(dev->scsi_host_ptr), buffer)) printk(KERN_INFO "%s%d: serial %s", dev->name, dev->id, buffer); if (dev->supplement_adapter_info.vpd_info.tsid[0]) { printk(KERN_INFO "%s%d: TSID %.*s\n", dev->name, dev->id, (int)sizeof(dev->supplement_adapter_info .vpd_info.tsid), dev->supplement_adapter_info.vpd_info.tsid); } if (!aac_check_reset || ((aac_check_reset == 1) && (dev->supplement_adapter_info.supported_options2 & AAC_OPTION_IGNORE_RESET))) { printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", dev->name, dev->id); } } dev->cache_protected = 0; dev->jbod = ((dev->supplement_adapter_info.feature_bits & AAC_FEATURE_JBOD) != 0); dev->nondasd_support = 0; dev->raid_scsi_mode = 0; if(dev->adapter_info.options & AAC_OPT_NONDASD) dev->nondasd_support = 1; /* * If the firmware supports ROMB RAID/SCSI mode and we are currently * in RAID/SCSI mode, set the flag. For now if in this mode we will * force nondasd support on. If we decide to allow the non-dasd flag * additional changes changes will have to be made to support * RAID/SCSI. the function aac_scsi_cmd in this module will have to be * changed to support the new dev->raid_scsi_mode flag instead of * leaching off of the dev->nondasd_support flag. Also in linit.c the * function aac_detect will have to be modified where it sets up the * max number of channels based on the aac->nondasd_support flag only. */ if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) && (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) { dev->nondasd_support = 1; dev->raid_scsi_mode = 1; } if (dev->raid_scsi_mode != 0) printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n", dev->name, dev->id); if (nondasd != -1) dev->nondasd_support = (nondasd!=0); if (dev->nondasd_support && !dev->in_reset) printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32)) dev->needs_dac = 1; dev->dac_support = 0; if ((sizeof(dma_addr_t) > 4) && dev->needs_dac && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) { if (!dev->in_reset) printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id); dev->dac_support = 1; } if(dacmode != -1) { dev->dac_support = (dacmode!=0); } /* avoid problems with AAC_QUIRK_SCSI_32 controllers */ if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)) { dev->nondasd_support = 0; dev->jbod = 0; expose_physicals = 0; } if (dev->dac_support) { if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(64))) { if (!dev->in_reset) dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n"); } else if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(32))) { dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n"); dev->dac_support = 0; } else { dev_info(&dev->pdev->dev, "No suitable DMA available\n"); rcode = -ENOMEM; } } /* * Deal with configuring for the individualized limits of each packet * interface. */ dev->a_ops.adapter_scsi = (dev->dac_support) ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32) ? aac_scsi_32_64 : aac_scsi_64) : aac_scsi_32; if (dev->raw_io_interface) { dev->a_ops.adapter_bounds = (dev->raw_io_64) ? aac_bounds_64 : aac_bounds_32; dev->a_ops.adapter_read = aac_read_raw_io; dev->a_ops.adapter_write = aac_write_raw_io; } else { dev->a_ops.adapter_bounds = aac_bounds_32; dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) - sizeof(struct aac_write) + sizeof(struct sgentry)) / sizeof(struct sgentry); if (dev->dac_support) { dev->a_ops.adapter_read = aac_read_block64; dev->a_ops.adapter_write = aac_write_block64; /* * 38 scatter gather elements */ dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) - sizeof(struct aac_write64) + sizeof(struct sgentry64)) / sizeof(struct sgentry64); } else { dev->a_ops.adapter_read = aac_read_block; dev->a_ops.adapter_write = aac_write_block; } dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { /* * Worst case size that could cause sg overflow when * we break up SG elements that are larger than 64KB. * Would be nice if we could tell the SCSI layer what * the maximum SG element size can be. Worst case is * (sg_tablesize-1) 4KB elements with one 64KB * element. * 32bit -> 468 or 238KB 64bit -> 424 or 212KB */ dev->scsi_host_ptr->max_sectors = (dev->scsi_host_ptr->sg_tablesize * 8) + 112; } } if (!dev->sync_mode && dev->sa_firmware && dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE) dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize = HBA_MAX_SG_SEPARATE; /* FIB should be freed only after getting the response from the F/W */ if (rcode != -ERESTARTSYS) { aac_fib_complete(fibptr); aac_fib_free(fibptr); } return rcode; } static void io_callback(void *context, struct fib * fibptr) { struct aac_dev *dev; struct aac_read_reply *readreply; struct scsi_cmnd *scsicmd; u32 cid; scsicmd = (struct scsi_cmnd *) context; if (!aac_valid_context(scsicmd, fibptr)) return; dev = fibptr->dev; cid = scmd_id(scsicmd); if (nblank(dprintk(x))) { u64 lba; switch (scsicmd->cmnd[0]) { case WRITE_6: case READ_6: lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; break; case WRITE_16: case READ_16: lba = ((u64)scsicmd->cmnd[2] << 56) | ((u64)scsicmd->cmnd[3] << 48) | ((u64)scsicmd->cmnd[4] << 40) | ((u64)scsicmd->cmnd[5] << 32) | ((u64)scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16) | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; break; case WRITE_12: case READ_12: lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; break; default: lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; break; } printk(KERN_DEBUG "io_callback[cpu %d]: lba = %llu, t = %ld.\n", smp_processor_id(), (unsigned long long)lba, jiffies); } BUG_ON(fibptr == NULL); scsi_dma_unmap(scsicmd); readreply = (struct aac_read_reply *)fib_data(fibptr); switch (le32_to_cpu(readreply->status)) { case ST_OK: scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE; break; case ST_NOT_READY: scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY, SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); break; case ST_MEDERR: scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR, SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); break; default: #ifdef AAC_DETAILED_STATUS_INFO printk(KERN_WARNING "io_callback: io failed, status = %d\n", le32_to_cpu(readreply->status)); #endif scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); break; } aac_fib_complete(fibptr); aac_scsi_done(scsicmd); } static int aac_read(struct scsi_cmnd * scsicmd) { u64 lba; u32 count; int status; struct aac_dev *dev; struct fib * cmd_fibcontext; int cid; dev = (struct aac_dev *)scsicmd->device->host->hostdata; /* * Get block address and transfer length */ switch (scsicmd->cmnd[0]) { case READ_6: dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd))); lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; count = scsicmd->cmnd[4]; if (count == 0) count = 256; break; case READ_16: dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd))); lba = ((u64)scsicmd->cmnd[2] << 56) | ((u64)scsicmd->cmnd[3] << 48) | ((u64)scsicmd->cmnd[4] << 40) | ((u64)scsicmd->cmnd[5] << 32) | ((u64)scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16) | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) | (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; break; case READ_12: dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd))); lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16) | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; break; default: dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd))); lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; break; } if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) { cid = scmd_id(scsicmd); dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); aac_scsi_done(scsicmd); return 0; } dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", smp_processor_id(), (unsigned long long)lba, jiffies)); if (aac_adapter_bounds(dev,scsicmd,lba)) return 0; /* * Alocate and initialize a Fib */ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); /* * Check that the command queued to the controller */ if (status == -EINPROGRESS) return 0; printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status); /* * For some reason, the Fib didn't queue, return QUEUE_FULL */ scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL; aac_scsi_done(scsicmd); aac_fib_complete(cmd_fibcontext); aac_fib_free(cmd_fibcontext); return 0; } static int aac_write(struct scsi_cmnd * scsicmd) { u64 lba; u32 count; int fua; int status; struct aac_dev *dev; struct fib * cmd_fibcontext; int cid; dev = (struct aac_dev *)scsicmd->device->host->hostdata; /* * Get block address and transfer length */ if (scsicmd->cmnd[0] == WRITE_6) /* 6 byte command */ { lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; count = scsicmd->cmnd[4]; if (count == 0) count = 256; fua = 0; } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */ dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd))); lba = ((u64)scsicmd->cmnd[2] << 56) | ((u64)scsicmd->cmnd[3] << 48) | ((u64)scsicmd->cmnd[4] << 40) | ((u64)scsicmd->cmnd[5] << 32) | ((u64)scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16) | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) | (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; fua = scsicmd->cmnd[1] & 0x8; } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */ dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd))); lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16) | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; fua = scsicmd->cmnd[1] & 0x8; } else { dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd))); lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; fua = scsicmd->cmnd[1] & 0x8; } if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) { cid = scmd_id(scsicmd); dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); aac_scsi_done(scsicmd); return 0; } dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", smp_processor_id(), (unsigned long long)lba, jiffies)); if (aac_adapter_bounds(dev,scsicmd,lba)) return 0; /* * Allocate and initialize a Fib then setup a BlockWrite command */ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); /* * Check that the command queued to the controller */ if (status == -EINPROGRESS) return 0; printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status); /* * For some reason, the Fib didn't queue, return QUEUE_FULL */ scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL; aac_scsi_done(scsicmd); aac_fib_complete(cmd_fibcontext); aac_fib_free(cmd_fibcontext); return 0; } static void synchronize_callback(void *context, struct fib *fibptr) { struct aac_synchronize_reply *synchronizereply; struct scsi_cmnd *cmd = context; if (!aac_valid_context(cmd, fibptr)) return; dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies)); BUG_ON(fibptr == NULL); synchronizereply = fib_data(fibptr); if (le32_to_cpu(synchronizereply->status) == CT_OK) cmd->result = DID_OK << 16 | SAM_STAT_GOOD; else { struct scsi_device *sdev = cmd->device; struct aac_dev *dev = fibptr->dev; u32 cid = sdev_id(sdev); printk(KERN_WARNING "synchronize_callback: synchronize failed, status = %d\n", le32_to_cpu(synchronizereply->status)); cmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); } aac_fib_complete(fibptr); aac_fib_free(fibptr); aac_scsi_done(cmd); } static int aac_synchronize(struct scsi_cmnd *scsicmd) { int status; struct fib *cmd_fibcontext; struct aac_synchronize *synchronizecmd; struct scsi_device *sdev = scsicmd->device; struct aac_dev *aac; aac = (struct aac_dev *)sdev->host->hostdata; if (aac->in_reset) return SCSI_MLQUEUE_HOST_BUSY; /* * Allocate and initialize a Fib */ cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd); aac_fib_init(cmd_fibcontext); synchronizecmd = fib_data(cmd_fibcontext); synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE); synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd)); synchronizecmd->count = cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data)); aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; /* * Now send the Fib to the adapter */ status = aac_fib_send(ContainerCommand, cmd_fibcontext, sizeof(struct aac_synchronize), FsaNormal, 0, 1, (fib_callback)synchronize_callback, (void *)scsicmd); /* * Check that the command queued to the controller */ if (status == -EINPROGRESS) return 0; printk(KERN_WARNING "aac_synchronize: aac_fib_send failed with status: %d.\n", status); aac_fib_complete(cmd_fibcontext); aac_fib_free(cmd_fibcontext); return SCSI_MLQUEUE_HOST_BUSY; } static void aac_start_stop_callback(void *context, struct fib *fibptr) { struct scsi_cmnd *scsicmd = context; if (!aac_valid_context(scsicmd, fibptr)) return; BUG_ON(fibptr == NULL); scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; aac_fib_complete(fibptr); aac_fib_free(fibptr); aac_scsi_done(scsicmd); } static int aac_start_stop(struct scsi_cmnd *scsicmd) { int status; struct fib *cmd_fibcontext; struct aac_power_management *pmcmd; struct scsi_device *sdev = scsicmd->device; struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; if (!(aac->supplement_adapter_info.supported_options2 & AAC_OPTION_POWER_MANAGEMENT)) { scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; aac_scsi_done(scsicmd); return 0; } if (aac->in_reset) return SCSI_MLQUEUE_HOST_BUSY; /* * Allocate and initialize a Fib */ cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd); aac_fib_init(cmd_fibcontext); pmcmd = fib_data(cmd_fibcontext); pmcmd->command = cpu_to_le32(VM_ContainerConfig); pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT); /* Eject bit ignored, not relevant */ pmcmd->sub = (scsicmd->cmnd[4] & 1) ? cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT); pmcmd->cid = cpu_to_le32(sdev_id(sdev)); pmcmd->parm = (scsicmd->cmnd[1] & 1) ? cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0; aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; /* * Now send the Fib to the adapter */ status = aac_fib_send(ContainerCommand, cmd_fibcontext, sizeof(struct aac_power_management), FsaNormal, 0, 1, (fib_callback)aac_start_stop_callback, (void *)scsicmd); /* * Check that the command queued to the controller */ if (status == -EINPROGRESS) return 0; aac_fib_complete(cmd_fibcontext); aac_fib_free(cmd_fibcontext); return SCSI_MLQUEUE_HOST_BUSY; } /** * aac_scsi_cmd() - Process SCSI command * @scsicmd: SCSI command block * * Emulate a SCSI command and queue the required request for the * aacraid firmware. */ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) { u32 cid, bus; struct Scsi_Host *host = scsicmd->device->host; struct aac_dev *dev = (struct aac_dev *)host->hostdata; struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; if (fsa_dev_ptr == NULL) return -1; /* * If the bus, id or lun is out of range, return fail * Test does not apply to ID 16, the pseudo id for the controller * itself. */ cid = scmd_id(scsicmd); if (cid != host->this_id) { if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) { if((cid >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)) { scsicmd->result = DID_NO_CONNECT << 16; goto scsi_done_ret; } /* * If the target container doesn't exist, it may have * been newly created */ if (((fsa_dev_ptr[cid].valid & 1) == 0) || (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY)) { switch (scsicmd->cmnd[0]) { case SERVICE_ACTION_IN_16: if (!(dev->raw_io_interface) || !(dev->raw_io_64) || ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) break; fallthrough; case INQUIRY: case READ_CAPACITY: case TEST_UNIT_READY: if (dev->in_reset) return -1; return _aac_probe_container(scsicmd, aac_probe_container_callback2); default: break; } } } else { /* check for physical non-dasd devices */ bus = aac_logical_to_phys(scmd_channel(scsicmd)); if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS && dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) { if (dev->in_reset) return -1; return aac_send_hba_fib(scsicmd); } else if (dev->nondasd_support || expose_physicals || dev->jbod) { if (dev->in_reset) return -1; return aac_send_srb_fib(scsicmd); } else { scsicmd->result = DID_NO_CONNECT << 16; goto scsi_done_ret; } } } /* * else Command for the controller itself */ else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */ (scsicmd->cmnd[0] != TEST_UNIT_READY)) { dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0])); scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, ASENCODE_INVALID_COMMAND, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); goto scsi_done_ret; } switch (scsicmd->cmnd[0]) { case READ_6: case READ_10: case READ_12: case READ_16: if (dev->in_reset) return -1; return aac_read(scsicmd); case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: if (dev->in_reset) return -1; return aac_write(scsicmd); case SYNCHRONIZE_CACHE: if (((aac_cache & 6) == 6) && dev->cache_protected) { scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; break; } /* Issue FIB to tell Firmware to flush it's cache */ if ((aac_cache & 6) != 2) return aac_synchronize(scsicmd); fallthrough; case INQUIRY: { struct inquiry_data inq_data; dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid)); memset(&inq_data, 0, sizeof (struct inquiry_data)); if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) { char *arr = (char *)&inq_data; /* EVPD bit set */ arr[0] = (scmd_id(scsicmd) == host->this_id) ? INQD_PDT_PROC : INQD_PDT_DA; if (scsicmd->cmnd[2] == 0) { /* supported vital product data pages */ arr[3] = 3; arr[4] = 0x0; arr[5] = 0x80; arr[6] = 0x83; arr[1] = scsicmd->cmnd[2]; scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; } else if (scsicmd->cmnd[2] == 0x80) { /* unit serial number page */ arr[3] = setinqserial(dev, &arr[4], scmd_id(scsicmd)); arr[1] = scsicmd->cmnd[2]; scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); if (aac_wwn != 2) return aac_get_container_serial( scsicmd); scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; } else if (scsicmd->cmnd[2] == 0x83) { /* vpd page 0x83 - Device Identification Page */ char *sno = (char *)&inq_data; sno[3] = setinqserial(dev, &sno[4], scmd_id(scsicmd)); if (aac_wwn != 2) return aac_get_container_serial( scsicmd); scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; } else { /* vpd page not implemented */ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD, ASENCODE_NO_SENSE, 7, 2); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); } break; } inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */ inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ inq_data.inqd_len = 31; /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ inq_data.inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */ /* * Set the Vendor, Product, and Revision Level * see: <vendor>.c i.e. aac.c */ if (cid == host->this_id) { setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types)); inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */ scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; break; } if (dev->in_reset) return -1; setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); return aac_get_container_name(scsicmd); } case SERVICE_ACTION_IN_16: if (!(dev->raw_io_interface) || !(dev->raw_io_64) || ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) break; { u64 capacity; char cp[13]; unsigned int alloc_len; dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n")); capacity = fsa_dev_ptr[cid].size - 1; cp[0] = (capacity >> 56) & 0xff; cp[1] = (capacity >> 48) & 0xff; cp[2] = (capacity >> 40) & 0xff; cp[3] = (capacity >> 32) & 0xff; cp[4] = (capacity >> 24) & 0xff; cp[5] = (capacity >> 16) & 0xff; cp[6] = (capacity >> 8) & 0xff; cp[7] = (capacity >> 0) & 0xff; cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff; cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff; cp[12] = 0; alloc_len = ((scsicmd->cmnd[10] << 24) + (scsicmd->cmnd[11] << 16) + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]); alloc_len = min_t(size_t, alloc_len, sizeof(cp)); scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len); if (alloc_len < scsi_bufflen(scsicmd)) scsi_set_resid(scsicmd, scsi_bufflen(scsicmd) - alloc_len); /* Do not cache partition table for arrays */ scsicmd->device->removable = 1; scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; break; } case READ_CAPACITY: { u32 capacity; char cp[8]; dprintk((KERN_DEBUG "READ CAPACITY command.\n")); if (fsa_dev_ptr[cid].size <= 0x100000000ULL) capacity = fsa_dev_ptr[cid].size - 1; else capacity = (u32)-1; cp[0] = (capacity >> 24) & 0xff; cp[1] = (capacity >> 16) & 0xff; cp[2] = (capacity >> 8) & 0xff; cp[3] = (capacity >> 0) & 0xff; cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff; cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff; scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); /* Do not cache partition table for arrays */ scsicmd->device->removable = 1; scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; break; } case MODE_SENSE: { int mode_buf_length = 4; u32 capacity; aac_modep_data mpd; if (fsa_dev_ptr[cid].size <= 0x100000000ULL) capacity = fsa_dev_ptr[cid].size - 1; else capacity = (u32)-1; dprintk((KERN_DEBUG "MODE SENSE command.\n")); memset((char *)&mpd, 0, sizeof(aac_modep_data)); /* Mode data length */ mpd.hd.data_length = sizeof(mpd.hd) - 1; /* Medium type - default */ mpd.hd.med_type = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected bit 4: 0/1 = FUA enabled */ mpd.hd.dev_par = 0; if (dev->raw_io_interface && ((aac_cache & 5) != 1)) mpd.hd.dev_par = 0x10; if (scsicmd->cmnd[1] & 0x8) mpd.hd.bd_length = 0; /* Block descriptor length */ else { mpd.hd.bd_length = sizeof(mpd.bd); mpd.hd.data_length += mpd.hd.bd_length; mpd.bd.block_length[0] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; mpd.bd.block_length[1] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; mpd.bd.block_length[2] = fsa_dev_ptr[cid].block_size & 0xff; mpd.mpc_buf[0] = scsicmd->cmnd[2]; if (scsicmd->cmnd[2] == 0x1C) { /* page length */ mpd.mpc_buf[1] = 0xa; /* Mode data length */ mpd.hd.data_length = 23; } else { /* Mode data length */ mpd.hd.data_length = 15; } if (capacity > 0xffffff) { mpd.bd.block_count[0] = 0xff; mpd.bd.block_count[1] = 0xff; mpd.bd.block_count[2] = 0xff; } else { mpd.bd.block_count[0] = (capacity >> 16) & 0xff; mpd.bd.block_count[1] = (capacity >> 8) & 0xff; mpd.bd.block_count[2] = capacity & 0xff; } } if (((scsicmd->cmnd[2] & 0x3f) == 8) || ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) { mpd.hd.data_length += 3; mpd.mpc_buf[0] = 8; mpd.mpc_buf[1] = 1; mpd.mpc_buf[2] = ((aac_cache & 6) == 2) ? 0 : 0x04; /* WCE */ mode_buf_length = sizeof(mpd); } if (mode_buf_length > scsicmd->cmnd[4]) mode_buf_length = scsicmd->cmnd[4]; else mode_buf_length = sizeof(mpd); scsi_sg_copy_from_buffer(scsicmd, (char *)&mpd, mode_buf_length); scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; break; } case MODE_SENSE_10: { u32 capacity; int mode_buf_length = 8; aac_modep10_data mpd10; if (fsa_dev_ptr[cid].size <= 0x100000000ULL) capacity = fsa_dev_ptr[cid].size - 1; else capacity = (u32)-1; dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n")); memset((char *)&mpd10, 0, sizeof(aac_modep10_data)); /* Mode data length (MSB) */ mpd10.hd.data_length[0] = 0; /* Mode data length (LSB) */ mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1; /* Medium type - default */ mpd10.hd.med_type = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected bit 4: 0/1 = FUA enabled */ mpd10.hd.dev_par = 0; if (dev->raw_io_interface && ((aac_cache & 5) != 1)) mpd10.hd.dev_par = 0x10; mpd10.hd.rsrvd[0] = 0; /* reserved */ mpd10.hd.rsrvd[1] = 0; /* reserved */ if (scsicmd->cmnd[1] & 0x8) { /* Block descriptor length (MSB) */ mpd10.hd.bd_length[0] = 0; /* Block descriptor length (LSB) */ mpd10.hd.bd_length[1] = 0; } else { mpd10.hd.bd_length[0] = 0; mpd10.hd.bd_length[1] = sizeof(mpd10.bd); mpd10.hd.data_length[1] += mpd10.hd.bd_length[1]; mpd10.bd.block_length[0] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; mpd10.bd.block_length[1] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; mpd10.bd.block_length[2] = fsa_dev_ptr[cid].block_size & 0xff; if (capacity > 0xffffff) { mpd10.bd.block_count[0] = 0xff; mpd10.bd.block_count[1] = 0xff; mpd10.bd.block_count[2] = 0xff; } else { mpd10.bd.block_count[0] = (capacity >> 16) & 0xff; mpd10.bd.block_count[1] = (capacity >> 8) & 0xff; mpd10.bd.block_count[2] = capacity & 0xff; } } if (((scsicmd->cmnd[2] & 0x3f) == 8) || ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) { mpd10.hd.data_length[1] += 3; mpd10.mpc_buf[0] = 8; mpd10.mpc_buf[1] = 1; mpd10.mpc_buf[2] = ((aac_cache & 6) == 2) ? 0 : 0x04; /* WCE */ mode_buf_length = sizeof(mpd10); if (mode_buf_length > scsicmd->cmnd[8]) mode_buf_length = scsicmd->cmnd[8]; } scsi_sg_copy_from_buffer(scsicmd, (char *)&mpd10, mode_buf_length); scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; break; } case REQUEST_SENSE: dprintk((KERN_DEBUG "REQUEST SENSE command.\n")); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof(struct sense_data)); memset(&dev->fsa_dev[cid].sense_data, 0, sizeof(struct sense_data)); scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; break; case ALLOW_MEDIUM_REMOVAL: dprintk((KERN_DEBUG "LOCK command.\n")); if (scsicmd->cmnd[4]) fsa_dev_ptr[cid].locked = 1; else fsa_dev_ptr[cid].locked = 0; scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; break; /* * These commands are all No-Ops */ case TEST_UNIT_READY: if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) { scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY, SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); break; } fallthrough; case RESERVE: case RELEASE: case REZERO_UNIT: case REASSIGN_BLOCKS: case SEEK_10: scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; break; case START_STOP: return aac_start_stop(scsicmd); default: /* * Unhandled commands */ dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0])); scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, ASENCODE_INVALID_COMMAND, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); } scsi_done_ret: aac_scsi_done(scsicmd); return 0; } static int query_disk(struct aac_dev *dev, void __user *arg) { struct aac_query_disk qd; struct fsa_dev_info *fsa_dev_ptr; fsa_dev_ptr = dev->fsa_dev; if (!fsa_dev_ptr) return -EBUSY; if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) return -EFAULT; if (qd.cnum == -1) { if (qd.id < 0 || qd.id >= dev->maximum_num_containers) return -EINVAL; qd.cnum = qd.id; } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) { if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) return -EINVAL; qd.instance = dev->scsi_host_ptr->host_no; qd.bus = 0; qd.id = CONTAINER_TO_ID(qd.cnum); qd.lun = CONTAINER_TO_LUN(qd.cnum); } else return -EINVAL; qd.valid = fsa_dev_ptr[qd.cnum].valid != 0; qd.locked = fsa_dev_ptr[qd.cnum].locked; qd.deleted = fsa_dev_ptr[qd.cnum].deleted; if (fsa_dev_ptr[qd.cnum].devname[0] == '\0') qd.unmapped = 1; else qd.unmapped = 0; strscpy(qd.name, fsa_dev_ptr[qd.cnum].devname, min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1)); if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk))) return -EFAULT; return 0; } static int force_delete_disk(struct aac_dev *dev, void __user *arg) { struct aac_delete_disk dd; struct fsa_dev_info *fsa_dev_ptr; fsa_dev_ptr = dev->fsa_dev; if (!fsa_dev_ptr) return -EBUSY; if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) return -EFAULT; if (dd.cnum >= dev->maximum_num_containers) return -EINVAL; /* * Mark this container as being deleted. */ fsa_dev_ptr[dd.cnum].deleted = 1; /* * Mark the container as no longer valid */ fsa_dev_ptr[dd.cnum].valid = 0; return 0; } static int delete_disk(struct aac_dev *dev, void __user *arg) { struct aac_delete_disk dd; struct fsa_dev_info *fsa_dev_ptr; fsa_dev_ptr = dev->fsa_dev; if (!fsa_dev_ptr) return -EBUSY; if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) return -EFAULT; if (dd.cnum >= dev->maximum_num_containers) return -EINVAL; /* * If the container is locked, it can not be deleted by the API. */ if (fsa_dev_ptr[dd.cnum].locked) return -EBUSY; else { /* * Mark the container as no longer being valid. */ fsa_dev_ptr[dd.cnum].valid = 0; fsa_dev_ptr[dd.cnum].devname[0] = '\0'; return 0; } } int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg) { switch (cmd) { case FSACTL_QUERY_DISK: return query_disk(dev, arg); case FSACTL_DELETE_DISK: return delete_disk(dev, arg); case FSACTL_FORCE_DELETE_DISK: return force_delete_disk(dev, arg); case FSACTL_GET_CONTAINERS: return aac_get_containers(dev); default: return -ENOTTY; } } /** * aac_srb_callback * @context: the context set in the fib - here it is scsi cmd * @fibptr: pointer to the fib * * Handles the completion of a scsi command to a non dasd device */ static void aac_srb_callback(void *context, struct fib * fibptr) { struct aac_srb_reply *srbreply; struct scsi_cmnd *scsicmd; scsicmd = (struct scsi_cmnd *) context; if (!aac_valid_context(scsicmd, fibptr)) return; BUG_ON(fibptr == NULL); srbreply = (struct aac_srb_reply *) fib_data(fibptr); scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) { /* fast response */ srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS); srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD); } else { /* * Calculate resid for sg */ scsi_set_resid(scsicmd, scsi_bufflen(scsicmd) - le32_to_cpu(srbreply->data_xfer_length)); } scsi_dma_unmap(scsicmd); /* expose physical device if expose_physicald flag is on */ if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01) && expose_physicals > 0) aac_expose_phy_device(scsicmd); /* * First check the fib status */ if (le32_to_cpu(srbreply->status) != ST_OK) { int len; pr_warn("aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status)); len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), SCSI_SENSE_BUFFERSIZE); scsicmd->result = DID_ERROR << 16 | SAM_STAT_CHECK_CONDITION; memcpy(scsicmd->sense_buffer, srbreply->sense_data, len); } /* * Next check the srb status */ switch ((le32_to_cpu(srbreply->srb_status))&0x3f) { case SRB_STATUS_ERROR_RECOVERY: case SRB_STATUS_PENDING: case SRB_STATUS_SUCCESS: scsicmd->result = DID_OK << 16; break; case SRB_STATUS_DATA_OVERRUN: switch (scsicmd->cmnd[0]) { case READ_6: case WRITE_6: case READ_10: case WRITE_10: case READ_12: case WRITE_12: case READ_16: case WRITE_16: if (le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow) pr_warn("aacraid: SCSI CMD underflow\n"); else pr_warn("aacraid: SCSI CMD Data Overrun\n"); scsicmd->result = DID_ERROR << 16; break; case INQUIRY: scsicmd->result = DID_OK << 16; break; default: scsicmd->result = DID_OK << 16; break; } break; case SRB_STATUS_ABORTED: scsicmd->result = DID_ABORT << 16; break; case SRB_STATUS_ABORT_FAILED: /* * Not sure about this one - but assuming the * hba was trying to abort for some reason */ scsicmd->result = DID_ERROR << 16; break; case SRB_STATUS_PARITY_ERROR: scsicmd->result = DID_PARITY << 16; break; case SRB_STATUS_NO_DEVICE: case SRB_STATUS_INVALID_PATH_ID: case SRB_STATUS_INVALID_TARGET_ID: case SRB_STATUS_INVALID_LUN: case SRB_STATUS_SELECTION_TIMEOUT: scsicmd->result = DID_NO_CONNECT << 16; break; case SRB_STATUS_COMMAND_TIMEOUT: case SRB_STATUS_TIMEOUT: scsicmd->result = DID_TIME_OUT << 16; break; case SRB_STATUS_BUSY: scsicmd->result = DID_BUS_BUSY << 16; break; case SRB_STATUS_BUS_RESET: scsicmd->result = DID_RESET << 16; break; case SRB_STATUS_MESSAGE_REJECTED: scsicmd->result = DID_ERROR << 16; break; case SRB_STATUS_REQUEST_FLUSHED: case SRB_STATUS_ERROR: case SRB_STATUS_INVALID_REQUEST: case SRB_STATUS_REQUEST_SENSE_FAILED: case SRB_STATUS_NO_HBA: case SRB_STATUS_UNEXPECTED_BUS_FREE: case SRB_STATUS_PHASE_SEQUENCE_FAILURE: case SRB_STATUS_BAD_SRB_BLOCK_LENGTH: case SRB_STATUS_DELAYED_RETRY: case SRB_STATUS_BAD_FUNCTION: case SRB_STATUS_NOT_STARTED: case SRB_STATUS_NOT_IN_USE: case SRB_STATUS_FORCE_ABORT: case SRB_STATUS_DOMAIN_VALIDATION_FAIL: default: #ifdef AAC_DETAILED_STATUS_INFO pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n", le32_to_cpu(srbreply->srb_status) & 0x3F, aac_get_status_string( le32_to_cpu(srbreply->srb_status) & 0x3F), scsicmd->cmnd[0], le32_to_cpu(srbreply->scsi_status)); #endif /* * When the CC bit is SET by the host in ATA pass thru CDB, * driver is supposed to return DID_OK * * When the CC bit is RESET by the host, driver should * return DID_ERROR */ if ((scsicmd->cmnd[0] == ATA_12) || (scsicmd->cmnd[0] == ATA_16)) { if (scsicmd->cmnd[2] & (0x01 << 5)) { scsicmd->result = DID_OK << 16; } else { scsicmd->result = DID_ERROR << 16; } } else { scsicmd->result = DID_ERROR << 16; } break; } if (le32_to_cpu(srbreply->scsi_status) == SAM_STAT_CHECK_CONDITION) { int len; scsicmd->result |= SAM_STAT_CHECK_CONDITION; len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), SCSI_SENSE_BUFFERSIZE); #ifdef AAC_DETAILED_STATUS_INFO pr_warn("aac_srb_callback: check condition, status = %d len=%d\n", le32_to_cpu(srbreply->status), len); #endif memcpy(scsicmd->sense_buffer, srbreply->sense_data, len); } /* * OR in the scsi status (already shifted up a bit) */ scsicmd->result |= le32_to_cpu(srbreply->scsi_status); aac_fib_complete(fibptr); aac_scsi_done(scsicmd); } static void hba_resp_task_complete(struct aac_dev *dev, struct scsi_cmnd *scsicmd, struct aac_hba_resp *err) { scsicmd->result = err->status; /* set residual count */ scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count)); switch (err->status) { case SAM_STAT_GOOD: scsicmd->result |= DID_OK << 16; break; case SAM_STAT_CHECK_CONDITION: { int len; len = min_t(u8, err->sense_response_data_len, SCSI_SENSE_BUFFERSIZE); if (len) memcpy(scsicmd->sense_buffer, err->sense_response_buf, len); scsicmd->result |= DID_OK << 16; break; } case SAM_STAT_BUSY: scsicmd->result |= DID_BUS_BUSY << 16; break; case SAM_STAT_TASK_ABORTED: scsicmd->result |= DID_ABORT << 16; break; case SAM_STAT_RESERVATION_CONFLICT: case SAM_STAT_TASK_SET_FULL: default: scsicmd->result |= DID_ERROR << 16; break; } } static void hba_resp_task_failure(struct aac_dev *dev, struct scsi_cmnd *scsicmd, struct aac_hba_resp *err) { switch (err->status) { case HBA_RESP_STAT_HBAMODE_DISABLED: { u32 bus, cid; bus = aac_logical_to_phys(scmd_channel(scsicmd)); cid = scmd_id(scsicmd); if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) { dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW; dev->hba_map[bus][cid].rmw_nexus = 0xffffffff; } scsicmd->result = DID_NO_CONNECT << 16; break; } case HBA_RESP_STAT_IO_ERROR: case HBA_RESP_STAT_NO_PATH_TO_DEVICE: scsicmd->result = DID_OK << 16 | SAM_STAT_BUSY; break; case HBA_RESP_STAT_IO_ABORTED: scsicmd->result = DID_ABORT << 16; break; case HBA_RESP_STAT_INVALID_DEVICE: scsicmd->result = DID_NO_CONNECT << 16; break; case HBA_RESP_STAT_UNDERRUN: /* UNDERRUN is OK */ scsicmd->result = DID_OK << 16; break; case HBA_RESP_STAT_OVERRUN: default: scsicmd->result = DID_ERROR << 16; break; } } /** * aac_hba_callback * @context: the context set in the fib - here it is scsi cmd * @fibptr: pointer to the fib * * Handles the completion of a native HBA scsi command */ void aac_hba_callback(void *context, struct fib *fibptr) { struct aac_dev *dev; struct scsi_cmnd *scsicmd; struct aac_hba_resp *err = &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err; scsicmd = (struct scsi_cmnd *) context; if (!aac_valid_context(scsicmd, fibptr)) return; WARN_ON(fibptr == NULL); dev = fibptr->dev; if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)) scsi_dma_unmap(scsicmd); if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) { /* fast response */ scsicmd->result = DID_OK << 16; goto out; } switch (err->service_response) { case HBA_RESP_SVCRES_TASK_COMPLETE: hba_resp_task_complete(dev, scsicmd, err); break; case HBA_RESP_SVCRES_FAILURE: hba_resp_task_failure(dev, scsicmd, err); break; case HBA_RESP_SVCRES_TMF_REJECTED: scsicmd->result = DID_ERROR << 16; break; case HBA_RESP_SVCRES_TMF_LUN_INVALID: scsicmd->result = DID_NO_CONNECT << 16; break; case HBA_RESP_SVCRES_TMF_COMPLETE: case HBA_RESP_SVCRES_TMF_SUCCEEDED: scsicmd->result = DID_OK << 16; break; default: scsicmd->result = DID_ERROR << 16; break; } out: aac_fib_complete(fibptr); if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) aac_priv(scsicmd)->sent_command = 1; else aac_scsi_done(scsicmd); } /** * aac_send_srb_fib * @scsicmd: the scsi command block * * This routine will form a FIB and fill in the aac_srb from the * scsicmd passed in. */ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) { struct fib* cmd_fibcontext; struct aac_dev* dev; int status; dev = (struct aac_dev *)scsicmd->device->host->hostdata; if (scmd_id(scsicmd) >= dev->maximum_num_physicals || scsicmd->device->lun > 7) { scsicmd->result = DID_NO_CONNECT << 16; aac_scsi_done(scsicmd); return 0; } /* * Allocate and initialize a Fib then setup a BlockWrite command */ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; status = aac_adapter_scsi(cmd_fibcontext, scsicmd); /* * Check that the command queued to the controller */ if (status == -EINPROGRESS) return 0; printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status); aac_fib_complete(cmd_fibcontext); aac_fib_free(cmd_fibcontext); return -1; } /** * aac_send_hba_fib * @scsicmd: the scsi command block * * This routine will form a FIB and fill in the aac_hba_cmd_req from the * scsicmd passed in. */ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd) { struct fib *cmd_fibcontext; struct aac_dev *dev; int status; dev = shost_priv(scsicmd->device->host); if (scmd_id(scsicmd) >= dev->maximum_num_physicals || scsicmd->device->lun > AAC_MAX_LUN - 1) { scsicmd->result = DID_NO_CONNECT << 16; aac_scsi_done(scsicmd); return 0; } /* * Allocate and initialize a Fib then setup a BlockWrite command */ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); if (!cmd_fibcontext) return -1; aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE; status = aac_adapter_hba(cmd_fibcontext, scsicmd); /* * Check that the command queued to the controller */ if (status == -EINPROGRESS) return 0; pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n", status); aac_fib_complete(cmd_fibcontext); aac_fib_free(cmd_fibcontext); return -1; } static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg) { unsigned long byte_count = 0; int nseg; struct scatterlist *sg; int i; // Get rid of old data psg->count = 0; psg->sg[0].addr = 0; psg->sg[0].count = 0; nseg = scsi_dma_map(scsicmd); if (nseg <= 0) return nseg; psg->count = cpu_to_le32(nseg); scsi_for_each_sg(scsicmd, sg, nseg, i) { psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg)); psg->sg[i].count = cpu_to_le32(sg_dma_len(sg)); byte_count += sg_dma_len(sg); } /* hba wants the size to be exact */ if (byte_count > scsi_bufflen(scsicmd)) { u32 temp = le32_to_cpu(psg->sg[i-1].count) - (byte_count - scsi_bufflen(scsicmd)); psg->sg[i-1].count = cpu_to_le32(temp); byte_count = scsi_bufflen(scsicmd); } /* Check for command underflow */ if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", byte_count, scsicmd->underflow); } return byte_count; } static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg) { unsigned long byte_count = 0; u64 addr; int nseg; struct scatterlist *sg; int i; // Get rid of old data psg->count = 0; psg->sg[0].addr[0] = 0; psg->sg[0].addr[1] = 0; psg->sg[0].count = 0; nseg = scsi_dma_map(scsicmd); if (nseg <= 0) return nseg; scsi_for_each_sg(scsicmd, sg, nseg, i) { int count = sg_dma_len(sg); addr = sg_dma_address(sg); psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); psg->sg[i].addr[1] = cpu_to_le32(addr>>32); psg->sg[i].count = cpu_to_le32(count); byte_count += count; } psg->count = cpu_to_le32(nseg); /* hba wants the size to be exact */ if (byte_count > scsi_bufflen(scsicmd)) { u32 temp = le32_to_cpu(psg->sg[i-1].count) - (byte_count - scsi_bufflen(scsicmd)); psg->sg[i-1].count = cpu_to_le32(temp); byte_count = scsi_bufflen(scsicmd); } /* Check for command underflow */ if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", byte_count, scsicmd->underflow); } return byte_count; } static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg) { unsigned long byte_count = 0; int nseg; struct scatterlist *sg; int i; // Get rid of old data psg->count = 0; psg->sg[0].next = 0; psg->sg[0].prev = 0; psg->sg[0].addr[0] = 0; psg->sg[0].addr[1] = 0; psg->sg[0].count = 0; psg->sg[0].flags = 0; nseg = scsi_dma_map(scsicmd); if (nseg <= 0) return nseg; scsi_for_each_sg(scsicmd, sg, nseg, i) { int count = sg_dma_len(sg); u64 addr = sg_dma_address(sg); psg->sg[i].next = 0; psg->sg[i].prev = 0; psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32)); psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); psg->sg[i].count = cpu_to_le32(count); psg->sg[i].flags = 0; byte_count += count; } psg->count = cpu_to_le32(nseg); /* hba wants the size to be exact */ if (byte_count > scsi_bufflen(scsicmd)) { u32 temp = le32_to_cpu(psg->sg[i-1].count) - (byte_count - scsi_bufflen(scsicmd)); psg->sg[i-1].count = cpu_to_le32(temp); byte_count = scsi_bufflen(scsicmd); } /* Check for command underflow */ if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", byte_count, scsicmd->underflow); } return byte_count; } static long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max) { unsigned long byte_count = 0; int nseg; struct scatterlist *sg; int i, conformable = 0; u32 min_size = PAGE_SIZE, cur_size; nseg = scsi_dma_map(scsicmd); if (nseg <= 0) return nseg; scsi_for_each_sg(scsicmd, sg, nseg, i) { int count = sg_dma_len(sg); u64 addr = sg_dma_address(sg); BUG_ON(i >= sg_max); rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32)); rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff)); cur_size = cpu_to_le32(count); rio2->sge[i].length = cur_size; rio2->sge[i].flags = 0; if (i == 0) { conformable = 1; rio2->sgeFirstSize = cur_size; } else if (i == 1) { rio2->sgeNominalSize = cur_size; min_size = cur_size; } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) { conformable = 0; if (cur_size < min_size) min_size = cur_size; } byte_count += count; } /* hba wants the size to be exact */ if (byte_count > scsi_bufflen(scsicmd)) { u32 temp = le32_to_cpu(rio2->sge[i-1].length) - (byte_count - scsi_bufflen(scsicmd)); rio2->sge[i-1].length = cpu_to_le32(temp); byte_count = scsi_bufflen(scsicmd); } rio2->sgeCnt = cpu_to_le32(nseg); rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212); /* not conformable: evaluate required sg elements */ if (!conformable) { int j, nseg_new = nseg, err_found; for (i = min_size / PAGE_SIZE; i >= 1; --i) { err_found = 0; nseg_new = 2; for (j = 1; j < nseg - 1; ++j) { if (rio2->sge[j].length % (i*PAGE_SIZE)) { err_found = 1; break; } nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE)); } if (!err_found) break; } if (i > 0 && nseg_new <= sg_max) { int ret = aac_convert_sgraw2(rio2, i, nseg, nseg_new); if (ret < 0) return ret; } } else rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT); /* Check for command underflow */ if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", byte_count, scsicmd->underflow); } return byte_count; } static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new) { struct sge_ieee1212 *sge; int i, j, pos; u32 addr_low; if (aac_convert_sgl == 0) return 0; sge = kmalloc_array(nseg_new, sizeof(*sge), GFP_ATOMIC); if (sge == NULL) return -ENOMEM; for (i = 1, pos = 1; i < nseg-1; ++i) { for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) { addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE; sge[pos].addrLow = addr_low; sge[pos].addrHigh = rio2->sge[i].addrHigh; if (addr_low < rio2->sge[i].addrLow) sge[pos].addrHigh++; sge[pos].length = pages * PAGE_SIZE; sge[pos].flags = 0; pos++; } } sge[pos] = rio2->sge[nseg-1]; memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212)); kfree(sge); rio2->sgeCnt = cpu_to_le32(nseg_new); rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT); rio2->sgeNominalSize = pages * PAGE_SIZE; return 0; } static long aac_build_sghba(struct scsi_cmnd *scsicmd, struct aac_hba_cmd_req *hbacmd, int sg_max, u64 sg_address) { unsigned long byte_count = 0; int nseg; struct scatterlist *sg; int i; u32 cur_size; struct aac_hba_sgl *sge; nseg = scsi_dma_map(scsicmd); if (nseg <= 0) { byte_count = nseg; goto out; } if (nseg > HBA_MAX_SG_EMBEDDED) sge = &hbacmd->sge[2]; else sge = &hbacmd->sge[0]; scsi_for_each_sg(scsicmd, sg, nseg, i) { int count = sg_dma_len(sg); u64 addr = sg_dma_address(sg); WARN_ON(i >= sg_max); sge->addr_hi = cpu_to_le32((u32)(addr>>32)); sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff)); cur_size = cpu_to_le32(count); sge->len = cur_size; sge->flags = 0; byte_count += count; sge++; } sge--; /* hba wants the size to be exact */ if (byte_count > scsi_bufflen(scsicmd)) { u32 temp; temp = le32_to_cpu(sge->len) - byte_count - scsi_bufflen(scsicmd); sge->len = cpu_to_le32(temp); byte_count = scsi_bufflen(scsicmd); } if (nseg <= HBA_MAX_SG_EMBEDDED) { hbacmd->emb_data_desc_count = cpu_to_le32(nseg); sge->flags = cpu_to_le32(0x40000000); } else { /* not embedded */ hbacmd->sge[0].flags = cpu_to_le32(0x80000000); hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1); hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32); hbacmd->sge[0].addr_lo = cpu_to_le32((u32)(sg_address & 0xffffffff)); } /* Check for command underflow */ if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n", byte_count, scsicmd->underflow); } out: return byte_count; } #ifdef AAC_DETAILED_STATUS_INFO struct aac_srb_status_info { u32 status; char *str; }; static struct aac_srb_status_info srb_status_info[] = { { SRB_STATUS_PENDING, "Pending Status"}, { SRB_STATUS_SUCCESS, "Success"}, { SRB_STATUS_ABORTED, "Aborted Command"}, { SRB_STATUS_ABORT_FAILED, "Abort Failed"}, { SRB_STATUS_ERROR, "Error Event"}, { SRB_STATUS_BUSY, "Device Busy"}, { SRB_STATUS_INVALID_REQUEST, "Invalid Request"}, { SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"}, { SRB_STATUS_NO_DEVICE, "No Device"}, { SRB_STATUS_TIMEOUT, "Timeout"}, { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"}, { SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"}, { SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"}, { SRB_STATUS_BUS_RESET, "Bus Reset"}, { SRB_STATUS_PARITY_ERROR, "Parity Error"}, { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"}, { SRB_STATUS_NO_HBA, "No HBA"}, { SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"}, { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"}, { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"}, { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"}, { SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"}, { SRB_STATUS_DELAYED_RETRY, "Delayed Retry"}, { SRB_STATUS_INVALID_LUN, "Invalid LUN"}, { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"}, { SRB_STATUS_BAD_FUNCTION, "Bad Function"}, { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"}, { SRB_STATUS_NOT_STARTED, "Not Started"}, { SRB_STATUS_NOT_IN_USE, "Not In Use"}, { SRB_STATUS_FORCE_ABORT, "Force Abort"}, { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"}, { 0xff, "Unknown Error"} }; char *aac_get_status_string(u32 status) { int i; for (i = 0; i < ARRAY_SIZE(srb_status_info); i++) if (srb_status_info[i].status == status) return srb_status_info[i].str; return "Bad Status Code"; } #endif
linux-master
drivers/scsi/aacraid/aachba.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Adaptec AAC series RAID controller driver * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010-2015 PMC-Sierra, Inc. ([email protected]) * 2016-2017 Microsemi Corp. ([email protected]) * * Module Name: * nark.c * * Abstract: Hardware Device Interface for NEMER/ARK */ #include <linux/pci.h> #include <linux/blkdev.h> #include <scsi/scsi_host.h> #include "aacraid.h" /** * aac_nark_ioremap * @dev: device to ioremap * @size: mapping resize request * */ static int aac_nark_ioremap(struct aac_dev * dev, u32 size) { if (!size) { iounmap(dev->regs.rx); dev->regs.rx = NULL; iounmap(dev->base); dev->base = NULL; return 0; } dev->base_start = pci_resource_start(dev->pdev, 2); dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) | ((u64)pci_resource_start(dev->pdev, 1) << 32), sizeof(struct rx_registers) - sizeof(struct rx_inbound)); dev->base = NULL; if (dev->regs.rx == NULL) return -1; dev->base = ioremap(dev->base_start, size); if (dev->base == NULL) { iounmap(dev->regs.rx); dev->regs.rx = NULL; return -1; } dev->IndexRegs = &((struct rx_registers __iomem *)dev->base)->IndexRegs; return 0; } /** * aac_nark_init - initialize an NEMER/ARK Split Bar card * @dev: device to configure * */ int aac_nark_init(struct aac_dev * dev) { /* * Fill in the function dispatch table. */ dev->a_ops.adapter_ioremap = aac_nark_ioremap; dev->a_ops.adapter_comm = aac_rx_select_comm; return _aac_rx_init(dev); }
linux-master
drivers/scsi/aacraid/nark.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010-2015 PMC-Sierra, Inc. ([email protected]) * 2016-2017 Microsemi Corp. ([email protected]) * * Module Name: * dpcsup.c * * Abstract: All DPC processing routines for the cyclone board occur here. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/blkdev.h> #include "aacraid.h" /** * aac_response_normal - Handle command replies * @q: Queue to read from * * This DPC routine will be run when the adapter interrupts us to let us * know there is a response on our normal priority queue. We will pull off * all QE there are and wake up all the waiters before exiting. We will * take a spinlock out on the queue before operating on it. */ unsigned int aac_response_normal(struct aac_queue * q) { struct aac_dev * dev = q->dev; struct aac_entry *entry; struct hw_fib * hwfib; struct fib * fib; int consumed = 0; unsigned long flags, mflags; spin_lock_irqsave(q->lock, flags); /* * Keep pulling response QEs off the response queue and waking * up the waiters until there are no more QEs. We then return * back to the system. If no response was requested we just * deallocate the Fib here and continue. */ while(aac_consumer_get(dev, q, &entry)) { int fast; u32 index = le32_to_cpu(entry->addr); fast = index & 0x01; fib = &dev->fibs[index >> 2]; hwfib = fib->hw_fib_va; aac_consumer_free(dev, q, HostNormRespQueue); /* * Remove this fib from the Outstanding I/O queue. * But only if it has not already been timed out. * * If the fib has been timed out already, then just * continue. The caller has already been notified that * the fib timed out. */ atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { spin_unlock_irqrestore(q->lock, flags); aac_fib_complete(fib); aac_fib_free(fib); spin_lock_irqsave(q->lock, flags); continue; } spin_unlock_irqrestore(q->lock, flags); if (fast) { /* * Doctor the fib */ *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; } FIB_COUNTER_INCREMENT(aac_config.FibRecved); if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) { __le32 *pstatus = (__le32 *)hwfib->data; if (*pstatus & cpu_to_le32(0xffff0000)) *pstatus = cpu_to_le32(ST_OK); } if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) { if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) { FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); } else { FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); } /* * NOTE: we cannot touch the fib after this * call, because it may have been deallocated. */ fib->callback(fib->callback_data, fib); } else { unsigned long flagv; spin_lock_irqsave(&fib->event_lock, flagv); if (!fib->done) { fib->done = 1; complete(&fib->event_wait); } spin_unlock_irqrestore(&fib->event_lock, flagv); spin_lock_irqsave(&dev->manage_lock, mflags); dev->management_fib_count--; spin_unlock_irqrestore(&dev->manage_lock, mflags); FIB_COUNTER_INCREMENT(aac_config.NormalRecved); if (fib->done == 2) { spin_lock_irqsave(&fib->event_lock, flagv); fib->done = 0; spin_unlock_irqrestore(&fib->event_lock, flagv); aac_fib_complete(fib); aac_fib_free(fib); } } consumed++; spin_lock_irqsave(q->lock, flags); } if (consumed > aac_config.peak_fibs) aac_config.peak_fibs = consumed; if (consumed == 0) aac_config.zero_fibs++; spin_unlock_irqrestore(q->lock, flags); return 0; } /** * aac_command_normal - handle commands * @q: queue to process * * This DPC routine will be queued when the adapter interrupts us to * let us know there is a command on our normal priority queue. We will * pull off all QE there are and wake up all the waiters before exiting. * We will take a spinlock out on the queue before operating on it. */ unsigned int aac_command_normal(struct aac_queue *q) { struct aac_dev * dev = q->dev; struct aac_entry *entry; unsigned long flags; spin_lock_irqsave(q->lock, flags); /* * Keep pulling response QEs off the response queue and waking * up the waiters until there are no more QEs. We then return * back to the system. */ while(aac_consumer_get(dev, q, &entry)) { struct fib fibctx; struct hw_fib * hw_fib; u32 index; struct fib *fib = &fibctx; index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib); hw_fib = &dev->aif_base_va[index]; /* * Allocate a FIB at all costs. For non queued stuff * we can just use the stack so we are happy. We need * a fib object in order to manage the linked lists */ if (dev->aif_thread) if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL) fib = &fibctx; memset(fib, 0, sizeof(struct fib)); INIT_LIST_HEAD(&fib->fiblink); fib->type = FSAFS_NTC_FIB_CONTEXT; fib->size = sizeof(struct fib); fib->hw_fib_va = hw_fib; fib->data = hw_fib->data; fib->dev = dev; if (dev->aif_thread && fib != &fibctx) { list_add_tail(&fib->fiblink, &q->cmdq); aac_consumer_free(dev, q, HostNormCmdQueue); wake_up_interruptible(&q->cmdready); } else { aac_consumer_free(dev, q, HostNormCmdQueue); spin_unlock_irqrestore(q->lock, flags); /* * Set the status of this FIB */ *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); aac_fib_adapter_complete(fib, sizeof(u32)); spin_lock_irqsave(q->lock, flags); } } spin_unlock_irqrestore(q->lock, flags); return 0; } /* * * aac_aif_callback * @context: the context set in the fib - here it is scsi cmd * @fibptr: pointer to the fib * * Handles the AIFs - new method (SRC) * */ static void aac_aif_callback(void *context, struct fib * fibptr) { struct fib *fibctx; struct aac_dev *dev; struct aac_aifcmd *cmd; fibctx = (struct fib *)context; BUG_ON(fibptr == NULL); dev = fibptr->dev; if ((fibptr->hw_fib_va->header.XferState & cpu_to_le32(NoMoreAifDataAvailable)) || dev->sa_firmware) { aac_fib_complete(fibptr); aac_fib_free(fibptr); return; } aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va); aac_fib_init(fibctx); cmd = (struct aac_aifcmd *) fib_data(fibctx); cmd->command = cpu_to_le32(AifReqEvent); aac_fib_send(AifRequest, fibctx, sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), FsaNormal, 0, 1, (fib_callback)aac_aif_callback, fibctx); } /* * aac_intr_normal - Handle command replies * @dev: Device * @index: completion reference * * This DPC routine will be run when the adapter interrupts us to let us * know there is a response on our normal priority queue. We will pull off * all QE there are and wake up all the waiters before exiting. */ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif, int isFastResponse, struct hw_fib *aif_fib) { unsigned long mflags; dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); if (isAif == 1) { /* AIF - common */ struct hw_fib * hw_fib; struct fib * fib; struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; unsigned long flags; /* * Allocate a FIB. For non queued stuff we can just use * the stack so we are happy. We need a fib object in order to * manage the linked lists. */ if ((!dev->aif_thread) || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC)))) return 1; if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) { kfree (fib); return 1; } if (dev->sa_firmware) { fib->hbacmd_size = index; /* store event type */ } else if (aif_fib != NULL) { memcpy(hw_fib, aif_fib, sizeof(struct hw_fib)); } else { memcpy(hw_fib, (struct hw_fib *) (((uintptr_t)(dev->regs.sa)) + index), sizeof(struct hw_fib)); } INIT_LIST_HEAD(&fib->fiblink); fib->type = FSAFS_NTC_FIB_CONTEXT; fib->size = sizeof(struct fib); fib->hw_fib_va = hw_fib; fib->data = hw_fib->data; fib->dev = dev; spin_lock_irqsave(q->lock, flags); list_add_tail(&fib->fiblink, &q->cmdq); wake_up_interruptible(&q->cmdready); spin_unlock_irqrestore(q->lock, flags); return 1; } else if (isAif == 2) { /* AIF - new (SRC) */ struct fib *fibctx; struct aac_aifcmd *cmd; fibctx = aac_fib_alloc(dev); if (!fibctx) return 1; aac_fib_init(fibctx); cmd = (struct aac_aifcmd *) fib_data(fibctx); cmd->command = cpu_to_le32(AifReqEvent); return aac_fib_send(AifRequest, fibctx, sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), FsaNormal, 0, 1, (fib_callback)aac_aif_callback, fibctx); } else { struct fib *fib = &dev->fibs[index]; int start_callback = 0; /* * Remove this fib from the Outstanding I/O queue. * But only if it has not already been timed out. * * If the fib has been timed out already, then just * continue. The caller has already been notified that * the fib timed out. */ atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { aac_fib_complete(fib); aac_fib_free(fib); return 0; } FIB_COUNTER_INCREMENT(aac_config.FibRecved); if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) { if (isFastResponse) fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; if (fib->callback) { start_callback = 1; } else { unsigned long flagv; int completed = 0; dprintk((KERN_INFO "event_wait up\n")); spin_lock_irqsave(&fib->event_lock, flagv); if (fib->done == 2) { fib->done = 1; completed = 1; } else { fib->done = 1; complete(&fib->event_wait); } spin_unlock_irqrestore(&fib->event_lock, flagv); spin_lock_irqsave(&dev->manage_lock, mflags); dev->management_fib_count--; spin_unlock_irqrestore(&dev->manage_lock, mflags); FIB_COUNTER_INCREMENT(aac_config.NativeRecved); if (completed) aac_fib_complete(fib); } } else { struct hw_fib *hwfib = fib->hw_fib_va; if (isFastResponse) { /* Doctor the fib */ *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; } if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) { __le32 *pstatus = (__le32 *)hwfib->data; if (*pstatus & cpu_to_le32(0xffff0000)) *pstatus = cpu_to_le32(ST_OK); } if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) { if (hwfib->header.XferState & cpu_to_le32( NoResponseExpected)) { FIB_COUNTER_INCREMENT( aac_config.NoResponseRecved); } else { FIB_COUNTER_INCREMENT( aac_config.AsyncRecved); } start_callback = 1; } else { unsigned long flagv; int completed = 0; dprintk((KERN_INFO "event_wait up\n")); spin_lock_irqsave(&fib->event_lock, flagv); if (fib->done == 2) { fib->done = 1; completed = 1; } else { fib->done = 1; complete(&fib->event_wait); } spin_unlock_irqrestore(&fib->event_lock, flagv); spin_lock_irqsave(&dev->manage_lock, mflags); dev->management_fib_count--; spin_unlock_irqrestore(&dev->manage_lock, mflags); FIB_COUNTER_INCREMENT(aac_config.NormalRecved); if (completed) aac_fib_complete(fib); } } if (start_callback) { /* * NOTE: we cannot touch the fib after this * call, because it may have been deallocated. */ if (likely(fib->callback && fib->callback_data)) { fib->callback(fib->callback_data, fib); } else { aac_fib_complete(fib); aac_fib_free(fib); } } return 0; } }
linux-master
drivers/scsi/aacraid/dpcsup.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010-2015 PMC-Sierra, Inc. ([email protected]) * 2016-2017 Microsemi Corp. ([email protected]) * * Module Name: * comminit.c * * Abstract: This supports the initialization of the host adapter commuication interface. * This is a platform dependent module for the pci cyclone board. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/mm.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include "aacraid.h" struct aac_common aac_config = { .irq_mod = 1 }; static inline int aac_is_msix_mode(struct aac_dev *dev) { u32 status = 0; if (aac_is_src(dev)) status = src_readl(dev, MUnit.OMR); return (status & AAC_INT_MODE_MSIX); } static inline void aac_change_to_intx(struct aac_dev *dev) { aac_src_access_devreg(dev, AAC_DISABLE_MSIX); aac_src_access_devreg(dev, AAC_ENABLE_INTX); } static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign) { unsigned char *base; unsigned long size, align; const unsigned long fibsize = dev->max_fib_size; const unsigned long printfbufsiz = 256; unsigned long host_rrq_size, aac_init_size; union aac_init *init; dma_addr_t phys; unsigned long aac_max_hostphysmempages; if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) || (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) || (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && !dev->sa_firmware)) { host_rrq_size = (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) * sizeof(u32); aac_init_size = sizeof(union aac_init); } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && dev->sa_firmware) { host_rrq_size = (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) * sizeof(u32) * AAC_MAX_MSIX; aac_init_size = sizeof(union aac_init) + (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq); } else { host_rrq_size = 0; aac_init_size = sizeof(union aac_init); } size = fibsize + aac_init_size + commsize + commalign + printfbufsiz + host_rrq_size; base = dma_alloc_coherent(&dev->pdev->dev, size, &phys, GFP_KERNEL); if (base == NULL) { printk(KERN_ERR "aacraid: unable to create mapping.\n"); return 0; } dev->comm_addr = (void *)base; dev->comm_phys = phys; dev->comm_size = size; if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) || (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) || (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) { dev->host_rrq = (u32 *)(base + fibsize); dev->host_rrq_pa = phys + fibsize; memset(dev->host_rrq, 0, host_rrq_size); } dev->init = (union aac_init *)(base + fibsize + host_rrq_size); dev->init_pa = phys + fibsize + host_rrq_size; init = dev->init; if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { int i; u64 addr; init->r8.init_struct_revision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_8); init->r8.init_flags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | INITFLAGS_DRIVER_USES_UTC_TIME | INITFLAGS_DRIVER_SUPPORTS_PM); init->r8.init_flags |= cpu_to_le32(INITFLAGS_DRIVER_SUPPORTS_HBA_MODE); init->r8.rr_queue_count = cpu_to_le32(dev->max_msix); init->r8.max_io_size = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); init->r8.max_num_aif = init->r8.reserved1 = init->r8.reserved2 = 0; for (i = 0; i < dev->max_msix; i++) { addr = (u64)dev->host_rrq_pa + dev->vector_cap * i * sizeof(u32); init->r8.rrq[i].host_addr_high = cpu_to_le32( upper_32_bits(addr)); init->r8.rrq[i].host_addr_low = cpu_to_le32( lower_32_bits(addr)); init->r8.rrq[i].msix_id = i; init->r8.rrq[i].element_count = cpu_to_le16( (u16)dev->vector_cap); init->r8.rrq[i].comp_thresh = init->r8.rrq[i].unused = 0; } pr_warn("aacraid: Comm Interface type3 enabled\n"); } else { init->r7.init_struct_revision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); if (dev->max_fib_size != sizeof(struct hw_fib)) init->r7.init_struct_revision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); init->r7.no_of_msix_vectors = cpu_to_le32(SA_MINIPORT_REVISION); init->r7.fsrev = cpu_to_le32(dev->fsrev); /* * Adapter Fibs are the first thing allocated so that they * start page aligned */ dev->aif_base_va = (struct hw_fib *)base; init->r7.adapter_fibs_virtual_address = 0; init->r7.adapter_fibs_physical_address = cpu_to_le32((u32)phys); init->r7.adapter_fibs_size = cpu_to_le32(fibsize); init->r7.adapter_fib_align = cpu_to_le32(sizeof(struct hw_fib)); /* * number of 4k pages of host physical memory. The aacraid fw * needs this number to be less than 4gb worth of pages. New * firmware doesn't have any issues with the mapping system, but * older Firmware did, and had *troubles* dealing with the math * overloading past 32 bits, thus we must limit this field. */ aac_max_hostphysmempages = dma_get_required_mask(&dev->pdev->dev) >> 12; if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES) init->r7.host_phys_mem_pages = cpu_to_le32(aac_max_hostphysmempages); else init->r7.host_phys_mem_pages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); init->r7.init_flags = cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | INITFLAGS_DRIVER_SUPPORTS_PM); init->r7.max_io_commands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); init->r7.max_io_size = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); init->r7.max_fib_size = cpu_to_le32(dev->max_fib_size); init->r7.max_num_aif = cpu_to_le32(dev->max_num_aif); if (dev->comm_interface == AAC_COMM_MESSAGE) { init->r7.init_flags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); pr_warn("aacraid: Comm Interface enabled\n"); } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { init->r7.init_struct_revision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); init->r7.init_flags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED); init->r7.host_rrq_addr_high = cpu_to_le32(upper_32_bits(dev->host_rrq_pa)); init->r7.host_rrq_addr_low = cpu_to_le32(lower_32_bits(dev->host_rrq_pa)); pr_warn("aacraid: Comm Interface type1 enabled\n"); } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { init->r7.init_struct_revision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7); init->r7.init_flags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED); init->r7.host_rrq_addr_high = cpu_to_le32(upper_32_bits(dev->host_rrq_pa)); init->r7.host_rrq_addr_low = cpu_to_le32(lower_32_bits(dev->host_rrq_pa)); init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix); /* must be the COMM_PREFERRED_SETTINGS values */ pr_warn("aacraid: Comm Interface type2 enabled\n"); } } /* * Increment the base address by the amount already used */ base = base + fibsize + host_rrq_size + aac_init_size; phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size + aac_init_size); /* * Align the beginning of Headers to commalign */ align = (commalign - ((uintptr_t)(base) & (commalign - 1))); base = base + align; phys = phys + align; /* * Fill in addresses of the Comm Area Headers and Queues */ *commaddr = base; if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) init->r7.comm_header_address = cpu_to_le32((u32)phys); /* * Increment the base address by the size of the CommArea */ base = base + commsize; phys = phys + commsize; /* * Place the Printf buffer area after the Fast I/O comm area. */ dev->printfbuf = (void *)base; if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) { init->r7.printfbuf = cpu_to_le32(phys); init->r7.printfbufsiz = cpu_to_le32(printfbufsiz); } memset(base, 0, printfbufsiz); return 1; } static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize) { atomic_set(&q->numpending, 0); q->dev = dev; init_waitqueue_head(&q->cmdready); INIT_LIST_HEAD(&q->cmdq); init_waitqueue_head(&q->qfull); spin_lock_init(&q->lockdata); q->lock = &q->lockdata; q->headers.producer = (__le32 *)mem; q->headers.consumer = (__le32 *)(mem+1); *(q->headers.producer) = cpu_to_le32(qsize); *(q->headers.consumer) = cpu_to_le32(qsize); q->entries = qsize; } static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data) { int *active = data; if (aac_priv(cmd)->owner == AAC_OWNER_FIRMWARE) *active = *active + 1; return true; } static void aac_wait_for_io_completion(struct aac_dev *aac) { int i = 0, active; for (i = 60; i; --i) { active = 0; scsi_host_busy_iter(aac->scsi_host_ptr, wait_for_io_iter, &active); /* * We can exit If all the commands are complete */ if (active == 0) break; dev_info(&aac->pdev->dev, "Wait for %d commands to complete\n", active); ssleep(1); } if (active) dev_err(&aac->pdev->dev, "%d outstanding commands during shutdown\n", active); } /** * aac_send_shutdown - shutdown an adapter * @dev: Adapter to shutdown * * This routine will send a VM_CloseAll (shutdown) request to the adapter. */ int aac_send_shutdown(struct aac_dev * dev) { struct fib * fibctx; struct aac_close *cmd; int status = 0; if (aac_adapter_check_health(dev)) return status; if (!dev->adapter_shutdown) { mutex_lock(&dev->ioctl_mutex); dev->adapter_shutdown = 1; mutex_unlock(&dev->ioctl_mutex); } aac_wait_for_io_completion(dev); fibctx = aac_fib_alloc(dev); if (!fibctx) return -ENOMEM; aac_fib_init(fibctx); cmd = (struct aac_close *) fib_data(fibctx); cmd->command = cpu_to_le32(VM_CloseAll); cmd->cid = cpu_to_le32(0xfffffffe); status = aac_fib_send(ContainerCommand, fibctx, sizeof(struct aac_close), FsaNormal, -2 /* Timeout silently */, 1, NULL, NULL); if (status >= 0) aac_fib_complete(fibctx); /* FIB should be freed only after getting the response from the F/W */ if (status != -ERESTARTSYS) aac_fib_free(fibctx); if (aac_is_src(dev) && dev->msi_enabled) aac_set_intx_mode(dev); return status; } /** * aac_comm_init - Initialise FSA data structures * @dev: Adapter to initialise * * Initializes the data structures that are required for the FSA commuication * interface to operate. * Returns * 1 - if we were able to init the commuication interface. * 0 - If there were errors initing. This is a fatal error. */ static int aac_comm_init(struct aac_dev * dev) { unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2; unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES; u32 *headers; struct aac_entry * queues; unsigned long size; struct aac_queue_block * comm = dev->queues; /* * Now allocate and initialize the zone structures used as our * pool of FIB context records. The size of the zone is based * on the system memory size. We also initialize the mutex used * to protect the zone. */ spin_lock_init(&dev->fib_lock); /* * Allocate the physically contiguous space for the commuication * queue headers. */ size = hdrsize + queuesize; if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT)) return -ENOMEM; queues = (struct aac_entry *)(((ulong)headers) + hdrsize); /* Adapter to Host normal priority Command queue */ comm->queue[HostNormCmdQueue].base = queues; aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES); queues += HOST_NORM_CMD_ENTRIES; headers += 2; /* Adapter to Host high priority command queue */ comm->queue[HostHighCmdQueue].base = queues; aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES); queues += HOST_HIGH_CMD_ENTRIES; headers +=2; /* Host to adapter normal priority command queue */ comm->queue[AdapNormCmdQueue].base = queues; aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES); queues += ADAP_NORM_CMD_ENTRIES; headers += 2; /* host to adapter high priority command queue */ comm->queue[AdapHighCmdQueue].base = queues; aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES); queues += ADAP_HIGH_CMD_ENTRIES; headers += 2; /* adapter to host normal priority response queue */ comm->queue[HostNormRespQueue].base = queues; aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES); queues += HOST_NORM_RESP_ENTRIES; headers += 2; /* adapter to host high priority response queue */ comm->queue[HostHighRespQueue].base = queues; aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES); queues += HOST_HIGH_RESP_ENTRIES; headers += 2; /* host to adapter normal priority response queue */ comm->queue[AdapNormRespQueue].base = queues; aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES); queues += ADAP_NORM_RESP_ENTRIES; headers += 2; /* host to adapter high priority response queue */ comm->queue[AdapHighRespQueue].base = queues; aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES); comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock; comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock; comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock; comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock; return 0; } void aac_define_int_mode(struct aac_dev *dev) { int i, msi_count, min_msix; msi_count = i = 0; /* max. vectors from GET_COMM_PREFERRED_SETTINGS */ if (dev->max_msix == 0 || dev->pdev->device == PMC_DEVICE_S6 || dev->sync_mode) { dev->max_msix = 1; dev->vector_cap = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB; return; } /* Don't bother allocating more MSI-X vectors than cpus */ msi_count = min(dev->max_msix, (unsigned int)num_online_cpus()); dev->max_msix = msi_count; if (msi_count > AAC_MAX_MSIX) msi_count = AAC_MAX_MSIX; if (msi_count > 1 && pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { min_msix = 2; i = pci_alloc_irq_vectors(dev->pdev, min_msix, msi_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); if (i > 0) { dev->msi_enabled = 1; msi_count = i; } else { dev->msi_enabled = 0; dev_err(&dev->pdev->dev, "MSIX not supported!! Will try INTX 0x%x.\n", i); } } if (!dev->msi_enabled) dev->max_msix = msi_count = 1; else { if (dev->max_msix > msi_count) dev->max_msix = msi_count; } if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && dev->sa_firmware) dev->vector_cap = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB; else dev->vector_cap = (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) / msi_count; } struct aac_dev *aac_init_adapter(struct aac_dev *dev) { u32 status[5]; struct Scsi_Host * host = dev->scsi_host_ptr; extern int aac_sync_mode; /* * Check the preferred comm settings, defaults from template. */ dev->management_fib_count = 0; spin_lock_init(&dev->manage_lock); spin_lock_init(&dev->sync_lock); spin_lock_init(&dev->iq_lock); dev->max_fib_size = sizeof(struct hw_fib); dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) - sizeof(struct aac_write) + sizeof(struct sgentry)) / sizeof(struct sgentry); dev->comm_interface = AAC_COMM_PRODUCER; dev->raw_io_interface = dev->raw_io_64 = 0; /* * Enable INTX mode, if not done already Enabled */ if (aac_is_msix_mode(dev)) { aac_change_to_intx(dev); dev_info(&dev->pdev->dev, "Changed firmware to INTX mode"); } if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, status+3, status+4)) && (status[0] == 0x00000001)) { dev->doorbell_mask = status[3]; if (status[1] & AAC_OPT_NEW_COMM_64) dev->raw_io_64 = 1; dev->sync_mode = aac_sync_mode; if (dev->a_ops.adapter_comm && (status[1] & AAC_OPT_NEW_COMM)) { dev->comm_interface = AAC_COMM_MESSAGE; dev->raw_io_interface = 1; if ((status[1] & AAC_OPT_NEW_COMM_TYPE1)) { /* driver supports TYPE1 (Tupelo) */ dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; } else if (status[1] & AAC_OPT_NEW_COMM_TYPE2) { /* driver supports TYPE2 (Denali, Yosemite) */ dev->comm_interface = AAC_COMM_MESSAGE_TYPE2; } else if (status[1] & AAC_OPT_NEW_COMM_TYPE3) { /* driver supports TYPE3 (Yosemite, Thor) */ dev->comm_interface = AAC_COMM_MESSAGE_TYPE3; } else if (status[1] & AAC_OPT_NEW_COMM_TYPE4) { /* not supported TYPE - switch to sync. mode */ dev->comm_interface = AAC_COMM_MESSAGE_TYPE2; dev->sync_mode = 1; } } if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) && (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE))) dev->sa_firmware = 1; else dev->sa_firmware = 0; if (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)) dev->soft_reset_support = 1; else dev->soft_reset_support = 0; if ((dev->comm_interface == AAC_COMM_MESSAGE) && (status[2] > dev->base_size)) { aac_adapter_ioremap(dev, 0); dev->base_size = status[2]; if (aac_adapter_ioremap(dev, status[2])) { /* remap failed, go back ... */ dev->comm_interface = AAC_COMM_PRODUCER; if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) { printk(KERN_WARNING "aacraid: unable to map adapter.\n"); return NULL; } } } } dev->max_msix = 0; dev->msi_enabled = 0; dev->adapter_shutdown = 0; if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, status+3, status+4)) && (status[0] == 0x00000001)) { /* * status[1] >> 16 maximum command size in KB * status[1] & 0xFFFF maximum FIB size * status[2] >> 16 maximum SG elements to driver * status[2] & 0xFFFF maximum SG elements from driver * status[3] & 0xFFFF maximum number FIBs outstanding */ host->max_sectors = (status[1] >> 16) << 1; /* Multiple of 32 for PMC */ dev->max_fib_size = status[1] & 0xFFE0; host->sg_tablesize = status[2] >> 16; dev->sg_tablesize = status[2] & 0xFFFF; if (aac_is_src(dev)) { if (host->can_queue > (status[3] >> 16) - AAC_NUM_MGT_FIB) host->can_queue = (status[3] >> 16) - AAC_NUM_MGT_FIB; } else if (host->can_queue > (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB) host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; dev->max_num_aif = status[4] & 0xFFFF; } if (numacb > 0) { if (numacb < host->can_queue) host->can_queue = numacb; else pr_warn("numacb=%d ignored\n", numacb); } if (aac_is_src(dev)) aac_define_int_mode(dev); /* * Ok now init the communication subsystem */ dev->queues = kzalloc(sizeof(struct aac_queue_block), GFP_KERNEL); if (dev->queues == NULL) { printk(KERN_ERR "Error could not allocate comm region.\n"); return NULL; } if (aac_comm_init(dev)<0){ kfree(dev->queues); return NULL; } /* * Initialize the list of fibs */ if (aac_fib_setup(dev) < 0) { kfree(dev->queues); return NULL; } INIT_LIST_HEAD(&dev->fib_list); INIT_LIST_HEAD(&dev->sync_fib_list); return dev; }
linux-master
drivers/scsi/aacraid/comminit.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010-2015 PMC-Sierra, Inc. ([email protected]) * 2016-2017 Microsemi Corp. ([email protected]) * * Module Name: * commctrl.c * * Abstract: Contains all routines for control of the AFA comm layer */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/blkdev.h> #include <linux/compat.h> #include <linux/delay.h> /* ssleep prototype */ #include <linux/kthread.h> #include <linux/uaccess.h> #include <scsi/scsi_host.h> #include "aacraid.h" # define AAC_DEBUG_PREAMBLE KERN_INFO # define AAC_DEBUG_POSTAMBLE /** * ioctl_send_fib - send a FIB from userspace * @dev: adapter is being processed * @arg: arguments to the ioctl call * * This routine sends a fib to the adapter on behalf of a user level * program. */ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) { struct hw_fib * kfib; struct fib *fibptr; struct hw_fib * hw_fib = (struct hw_fib *)0; dma_addr_t hw_fib_pa = (dma_addr_t)0LL; unsigned int size, osize; int retval; if (dev->in_reset) { return -EBUSY; } fibptr = aac_fib_alloc(dev); if(fibptr == NULL) { return -ENOMEM; } kfib = fibptr->hw_fib_va; /* * First copy in the header so that we can check the size field. */ if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { aac_fib_free(fibptr); return -EFAULT; } /* * Since we copy based on the fib header size, make sure that we * will not overrun the buffer when we copy the memory. Return * an error if we would. */ osize = size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); if (size < le16_to_cpu(kfib->header.SenderSize)) size = le16_to_cpu(kfib->header.SenderSize); if (size > dev->max_fib_size) { dma_addr_t daddr; if (size > 2048) { retval = -EINVAL; goto cleanup; } kfib = dma_alloc_coherent(&dev->pdev->dev, size, &daddr, GFP_KERNEL); if (!kfib) { retval = -ENOMEM; goto cleanup; } /* Highjack the hw_fib */ hw_fib = fibptr->hw_fib_va; hw_fib_pa = fibptr->hw_fib_pa; fibptr->hw_fib_va = kfib; fibptr->hw_fib_pa = daddr; memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); memcpy(kfib, hw_fib, dev->max_fib_size); } if (copy_from_user(kfib, arg, size)) { retval = -EFAULT; goto cleanup; } /* Sanity check the second copy */ if ((osize != le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr)) || (size < le16_to_cpu(kfib->header.SenderSize))) { retval = -EINVAL; goto cleanup; } if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { aac_adapter_interrupt(dev); /* * Since we didn't really send a fib, zero out the state to allow * cleanup code not to assert. */ kfib->header.XferState = 0; } else { retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr, le16_to_cpu(kfib->header.Size) , FsaNormal, 1, 1, NULL, NULL); if (retval) { goto cleanup; } if (aac_fib_complete(fibptr) != 0) { retval = -EINVAL; goto cleanup; } } /* * Make sure that the size returned by the adapter (which includes * the header) is less than or equal to the size of a fib, so we * don't corrupt application data. Then copy that size to the user * buffer. (Don't try to add the header information again, since it * was already included by the adapter.) */ retval = 0; if (copy_to_user(arg, (void *)kfib, size)) retval = -EFAULT; cleanup: if (hw_fib) { dma_free_coherent(&dev->pdev->dev, size, kfib, fibptr->hw_fib_pa); fibptr->hw_fib_pa = hw_fib_pa; fibptr->hw_fib_va = hw_fib; } if (retval != -ERESTARTSYS) aac_fib_free(fibptr); return retval; } /** * open_getadapter_fib - Get the next fib * @dev: adapter is being processed * @arg: arguments to the open call * * This routine will get the next Fib, if available, from the AdapterFibContext * passed in from the user. */ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) { struct aac_fib_context * fibctx; int status; fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL); if (fibctx == NULL) { status = -ENOMEM; } else { unsigned long flags; struct list_head * entry; struct aac_fib_context * context; fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT; fibctx->size = sizeof(struct aac_fib_context); /* * Yes yes, I know this could be an index, but we have a * better guarantee of uniqueness for the locked loop below. * Without the aid of a persistent history, this also helps * reduce the chance that the opaque context would be reused. */ fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF); /* * Initialize the mutex used to wait for the next AIF. */ init_completion(&fibctx->completion); fibctx->wait = 0; /* * Initialize the fibs and set the count of fibs on * the list to 0. */ fibctx->count = 0; INIT_LIST_HEAD(&fibctx->fib_list); fibctx->jiffies = jiffies/HZ; /* * Now add this context onto the adapter's * AdapterFibContext list. */ spin_lock_irqsave(&dev->fib_lock, flags); /* Ensure that we have a unique identifier */ entry = dev->fib_list.next; while (entry != &dev->fib_list) { context = list_entry(entry, struct aac_fib_context, next); if (context->unique == fibctx->unique) { /* Not unique (32 bits) */ fibctx->unique++; entry = dev->fib_list.next; } else { entry = entry->next; } } list_add_tail(&fibctx->next, &dev->fib_list); spin_unlock_irqrestore(&dev->fib_lock, flags); if (copy_to_user(arg, &fibctx->unique, sizeof(fibctx->unique))) { status = -EFAULT; } else { status = 0; } } return status; } struct compat_fib_ioctl { u32 fibctx; s32 wait; compat_uptr_t fib; }; /** * next_getadapter_fib - get the next fib * @dev: adapter to use * @arg: ioctl argument * * This routine will get the next Fib, if available, from the AdapterFibContext * passed in from the user. */ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg) { struct fib_ioctl f; struct fib *fib; struct aac_fib_context *fibctx; int status; struct list_head * entry; unsigned long flags; if (in_compat_syscall()) { struct compat_fib_ioctl cf; if (copy_from_user(&cf, arg, sizeof(struct compat_fib_ioctl))) return -EFAULT; f.fibctx = cf.fibctx; f.wait = cf.wait; f.fib = compat_ptr(cf.fib); } else { if (copy_from_user(&f, arg, sizeof(struct fib_ioctl))) return -EFAULT; } /* * Verify that the HANDLE passed in was a valid AdapterFibContext * * Search the list of AdapterFibContext addresses on the adapter * to be sure this is a valid address */ spin_lock_irqsave(&dev->fib_lock, flags); entry = dev->fib_list.next; fibctx = NULL; while (entry != &dev->fib_list) { fibctx = list_entry(entry, struct aac_fib_context, next); /* * Extract the AdapterFibContext from the Input parameters. */ if (fibctx->unique == f.fibctx) { /* We found a winner */ break; } entry = entry->next; fibctx = NULL; } if (!fibctx) { spin_unlock_irqrestore(&dev->fib_lock, flags); dprintk ((KERN_INFO "Fib Context not found\n")); return -EINVAL; } if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || (fibctx->size != sizeof(struct aac_fib_context))) { spin_unlock_irqrestore(&dev->fib_lock, flags); dprintk ((KERN_INFO "Fib Context corrupt?\n")); return -EINVAL; } status = 0; /* * If there are no fibs to send back, then either wait or return * -EAGAIN */ return_fib: if (!list_empty(&fibctx->fib_list)) { /* * Pull the next fib from the fibs */ entry = fibctx->fib_list.next; list_del(entry); fib = list_entry(entry, struct fib, fiblink); fibctx->count--; spin_unlock_irqrestore(&dev->fib_lock, flags); if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) { kfree(fib->hw_fib_va); kfree(fib); return -EFAULT; } /* * Free the space occupied by this copy of the fib. */ kfree(fib->hw_fib_va); kfree(fib); status = 0; } else { spin_unlock_irqrestore(&dev->fib_lock, flags); /* If someone killed the AIF aacraid thread, restart it */ status = !dev->aif_thread; if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { /* Be paranoid, be very paranoid! */ kthread_stop(dev->thread); ssleep(1); dev->aif_thread = 0; dev->thread = kthread_run(aac_command_thread, dev, "%s", dev->name); ssleep(1); } if (f.wait) { if (wait_for_completion_interruptible(&fibctx->completion) < 0) { status = -ERESTARTSYS; } else { /* Lock again and retry */ spin_lock_irqsave(&dev->fib_lock, flags); goto return_fib; } } else { status = -EAGAIN; } } fibctx->jiffies = jiffies/HZ; return status; } int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx) { struct fib *fib; /* * First free any FIBs that have not been consumed. */ while (!list_empty(&fibctx->fib_list)) { struct list_head * entry; /* * Pull the next fib from the fibs */ entry = fibctx->fib_list.next; list_del(entry); fib = list_entry(entry, struct fib, fiblink); fibctx->count--; /* * Free the space occupied by this copy of the fib. */ kfree(fib->hw_fib_va); kfree(fib); } /* * Remove the Context from the AdapterFibContext List */ list_del(&fibctx->next); /* * Invalidate context */ fibctx->type = 0; /* * Free the space occupied by the Context */ kfree(fibctx); return 0; } /** * close_getadapter_fib - close down user fib context * @dev: adapter * @arg: ioctl arguments * * This routine will close down the fibctx passed in from the user. */ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg) { struct aac_fib_context *fibctx; int status; unsigned long flags; struct list_head * entry; /* * Verify that the HANDLE passed in was a valid AdapterFibContext * * Search the list of AdapterFibContext addresses on the adapter * to be sure this is a valid address */ entry = dev->fib_list.next; fibctx = NULL; while(entry != &dev->fib_list) { fibctx = list_entry(entry, struct aac_fib_context, next); /* * Extract the fibctx from the input parameters */ if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */ break; entry = entry->next; fibctx = NULL; } if (!fibctx) return 0; /* Already gone */ if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || (fibctx->size != sizeof(struct aac_fib_context))) return -EINVAL; spin_lock_irqsave(&dev->fib_lock, flags); status = aac_close_fib_context(dev, fibctx); spin_unlock_irqrestore(&dev->fib_lock, flags); return status; } /** * check_revision - close down user fib context * @dev: adapter * @arg: ioctl arguments * * This routine returns the driver version. * Under Linux, there have been no version incompatibilities, so this is * simple! */ static int check_revision(struct aac_dev *dev, void __user *arg) { struct revision response; char *driver_version = aac_driver_version; u32 version; response.compat = 1; version = (simple_strtol(driver_version, &driver_version, 10) << 24) | 0x00000400; version += simple_strtol(driver_version + 1, &driver_version, 10) << 16; version += simple_strtol(driver_version + 1, NULL, 10); response.version = cpu_to_le32(version); # ifdef AAC_DRIVER_BUILD response.build = cpu_to_le32(AAC_DRIVER_BUILD); # else response.build = cpu_to_le32(9999); # endif if (copy_to_user(arg, &response, sizeof(response))) return -EFAULT; return 0; } /** * aac_send_raw_srb() * @dev: adapter is being processed * @arg: arguments to the send call */ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) { struct fib* srbfib; int status; struct aac_srb *srbcmd = NULL; struct aac_hba_cmd_req *hbacmd = NULL; struct user_aac_srb *user_srbcmd = NULL; struct user_aac_srb __user *user_srb = arg; struct aac_srb_reply __user *user_reply; u32 chn; u32 fibsize = 0; u32 flags = 0; s32 rcode = 0; u32 data_dir; void __user *sg_user[HBA_MAX_SG_EMBEDDED]; void *sg_list[HBA_MAX_SG_EMBEDDED]; u32 sg_count[HBA_MAX_SG_EMBEDDED]; u32 sg_indx = 0; u32 byte_count = 0; u32 actual_fibsize64, actual_fibsize = 0; int i; int is_native_device; u64 address; if (dev->in_reset) { dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n")); return -EBUSY; } if (!capable(CAP_SYS_ADMIN)){ dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); return -EPERM; } /* * Allocate and initialize a Fib then setup a SRB command */ if (!(srbfib = aac_fib_alloc(dev))) { return -ENOMEM; } memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){ dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n")); rcode = -EFAULT; goto cleanup; } if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { rcode = -EINVAL; goto cleanup; } user_srbcmd = memdup_user(user_srb, fibsize); if (IS_ERR(user_srbcmd)) { rcode = PTR_ERR(user_srbcmd); user_srbcmd = NULL; goto cleanup; } flags = user_srbcmd->flags; /* from user in cpu order */ switch (flags & (SRB_DataIn | SRB_DataOut)) { case SRB_DataOut: data_dir = DMA_TO_DEVICE; break; case (SRB_DataIn | SRB_DataOut): data_dir = DMA_BIDIRECTIONAL; break; case SRB_DataIn: data_dir = DMA_FROM_DEVICE; break; default: data_dir = DMA_NONE; } if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", user_srbcmd->sg.count)); rcode = -EINVAL; goto cleanup; } if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n")); rcode = -EINVAL; goto cleanup; } actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * (sizeof(struct sgentry64) - sizeof(struct sgentry)); /* User made a mistake - should not continue */ if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) { dprintk((KERN_DEBUG"aacraid: Bad Size specified in " "Raw SRB command calculated fibsize=%lu;%lu " "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu " "issued fibsize=%d\n", actual_fibsize, actual_fibsize64, user_srbcmd->sg.count, sizeof(struct aac_srb), sizeof(struct sgentry), sizeof(struct sgentry64), fibsize)); rcode = -EINVAL; goto cleanup; } chn = user_srbcmd->channel; if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS && dev->hba_map[chn][user_srbcmd->id].devtype == AAC_DEVTYPE_NATIVE_RAW) { is_native_device = 1; hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va; memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */ /* iu_type is a parameter of aac_hba_send */ switch (data_dir) { case DMA_TO_DEVICE: hbacmd->byte1 = 2; break; case DMA_FROM_DEVICE: case DMA_BIDIRECTIONAL: hbacmd->byte1 = 1; break; case DMA_NONE: default: break; } hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun); hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus; /* * we fill in reply_qid later in aac_src_deliver_message * we fill in iu_type, request_id later in aac_hba_send * we fill in emb_data_desc_count, data_length later * in sg list build */ memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb)); address = (u64)srbfib->hw_error_pa; hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); hbacmd->emb_data_desc_count = cpu_to_le32(user_srbcmd->sg.count); srbfib->hbacmd_size = 64 + user_srbcmd->sg.count * sizeof(struct aac_hba_sgl); } else { is_native_device = 0; aac_fib_init(srbfib); /* raw_srb FIB is not FastResponseCapable */ srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable); srbcmd = (struct aac_srb *) fib_data(srbfib); // Fix up srb for endian and force some values srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this srbcmd->channel = cpu_to_le32(user_srbcmd->channel); srbcmd->id = cpu_to_le32(user_srbcmd->id); srbcmd->lun = cpu_to_le32(user_srbcmd->lun); srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); srbcmd->flags = cpu_to_le32(flags); srbcmd->retry_limit = 0; // Obsolete parameter srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); } byte_count = 0; if (is_native_device) { struct user_sgmap *usg32 = &user_srbcmd->sg; struct user_sgmap64 *usg64 = (struct user_sgmap64 *)&user_srbcmd->sg; for (i = 0; i < usg32->count; i++) { void *p; u64 addr; sg_count[i] = (actual_fibsize64 == fibsize) ? usg64->sg[i].count : usg32->sg[i].count; if (sg_count[i] > (dev->scsi_host_ptr->max_sectors << 9)) { pr_err("aacraid: upsg->sg[%d].count=%u>%u\n", i, sg_count[i], dev->scsi_host_ptr->max_sectors << 9); rcode = -EINVAL; goto cleanup; } p = kmalloc(sg_count[i], GFP_KERNEL); if (!p) { rcode = -ENOMEM; goto cleanup; } if (actual_fibsize64 == fibsize) { addr = (u64)usg64->sg[i].addr[0]; addr += ((u64)usg64->sg[i].addr[1]) << 32; } else { addr = (u64)usg32->sg[i].addr; } sg_user[i] = (void __user *)(uintptr_t)addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if (copy_from_user(p, sg_user[i], sg_count[i])) { rcode = -EFAULT; goto cleanup; } } addr = dma_map_single(&dev->pdev->dev, p, sg_count[i], data_dir); hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32)); hbacmd->sge[i].addr_lo = cpu_to_le32( (u32)(addr & 0xffffffff)); hbacmd->sge[i].len = cpu_to_le32(sg_count[i]); hbacmd->sge[i].flags = 0; byte_count += sg_count[i]; } if (usg32->count > 0) /* embedded sglist */ hbacmd->sge[usg32->count-1].flags = cpu_to_le32(0x40000000); hbacmd->data_length = cpu_to_le32(byte_count); status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib, NULL, NULL); } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) { struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; /* * This should also catch if user used the 32 bit sgmap */ if (actual_fibsize64 == fibsize) { actual_fibsize = actual_fibsize64; for (i = 0; i < upsg->count; i++) { u64 addr; void* p; sg_count[i] = upsg->sg[i].count; if (sg_count[i] > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { rcode = -EINVAL; goto cleanup; } p = kmalloc(sg_count[i], GFP_KERNEL); if(!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", sg_count[i], i, upsg->count)); rcode = -ENOMEM; goto cleanup; } addr = (u64)upsg->sg[i].addr[0]; addr += ((u64)upsg->sg[i].addr[1]) << 32; sg_user[i] = (void __user *)(uintptr_t)addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if (copy_from_user(p, sg_user[i], sg_count[i])){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = dma_map_single(&dev->pdev->dev, p, sg_count[i], data_dir); psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); psg->sg[i].addr[1] = cpu_to_le32(addr>>32); byte_count += sg_count[i]; psg->sg[i].count = cpu_to_le32(sg_count[i]); } } else { struct user_sgmap* usg; usg = kmemdup(upsg, actual_fibsize - sizeof(struct aac_srb) + sizeof(struct sgmap), GFP_KERNEL); if (!usg) { dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); rcode = -ENOMEM; goto cleanup; } actual_fibsize = actual_fibsize64; for (i = 0; i < usg->count; i++) { u64 addr; void* p; sg_count[i] = usg->sg[i].count; if (sg_count[i] > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { kfree(usg); rcode = -EINVAL; goto cleanup; } p = kmalloc(sg_count[i], GFP_KERNEL); if(!p) { dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", sg_count[i], i, usg->count)); kfree(usg); rcode = -ENOMEM; goto cleanup; } sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if (copy_from_user(p, sg_user[i], sg_count[i])) { kfree (usg); dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = dma_map_single(&dev->pdev->dev, p, sg_count[i], data_dir); psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); psg->sg[i].addr[1] = cpu_to_le32(addr>>32); byte_count += sg_count[i]; psg->sg[i].count = cpu_to_le32(sg_count[i]); } kfree (usg); } srbcmd->count = cpu_to_le32(byte_count); if (user_srbcmd->sg.count) psg->count = cpu_to_le32(sg_indx+1); else psg->count = 0; status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); } else { struct user_sgmap* upsg = &user_srbcmd->sg; struct sgmap* psg = &srbcmd->sg; if (actual_fibsize64 == fibsize) { struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; for (i = 0; i < upsg->count; i++) { uintptr_t addr; void* p; sg_count[i] = usg->sg[i].count; if (sg_count[i] > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { rcode = -EINVAL; goto cleanup; } p = kmalloc(sg_count[i], GFP_KERNEL); if (!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", sg_count[i], i, usg->count)); rcode = -ENOMEM; goto cleanup; } addr = (u64)usg->sg[i].addr[0]; addr += ((u64)usg->sg[i].addr[1]) << 32; sg_user[i] = (void __user *)addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if (copy_from_user(p, sg_user[i], sg_count[i])){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = dma_map_single(&dev->pdev->dev, p, usg->sg[i].count, data_dir); psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff); byte_count += usg->sg[i].count; psg->sg[i].count = cpu_to_le32(sg_count[i]); } } else { for (i = 0; i < upsg->count; i++) { dma_addr_t addr; void* p; sg_count[i] = upsg->sg[i].count; if (sg_count[i] > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { rcode = -EINVAL; goto cleanup; } p = kmalloc(sg_count[i], GFP_KERNEL); if (!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", sg_count[i], i, upsg->count)); rcode = -ENOMEM; goto cleanup; } sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if (copy_from_user(p, sg_user[i], sg_count[i])) { dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = dma_map_single(&dev->pdev->dev, p, sg_count[i], data_dir); psg->sg[i].addr = cpu_to_le32(addr); byte_count += sg_count[i]; psg->sg[i].count = cpu_to_le32(sg_count[i]); } } srbcmd->count = cpu_to_le32(byte_count); if (user_srbcmd->sg.count) psg->count = cpu_to_le32(sg_indx+1); else psg->count = 0; status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); } if (status == -ERESTARTSYS) { rcode = -ERESTARTSYS; goto cleanup; } if (status != 0) { dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); rcode = -ENXIO; goto cleanup; } if (flags & SRB_DataIn) { for(i = 0 ; i <= sg_indx; i++){ if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) { dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); rcode = -EFAULT; goto cleanup; } } } user_reply = arg + fibsize; if (is_native_device) { struct aac_hba_resp *err = &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err; struct aac_srb_reply reply; memset(&reply, 0, sizeof(reply)); reply.status = ST_OK; if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) { /* fast response */ reply.srb_status = SRB_STATUS_SUCCESS; reply.scsi_status = 0; reply.data_xfer_length = byte_count; reply.sense_data_size = 0; memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE); } else { reply.srb_status = err->service_response; reply.scsi_status = err->status; reply.data_xfer_length = byte_count - le32_to_cpu(err->residual_count); reply.sense_data_size = err->sense_response_data_len; memcpy(reply.sense_data, err->sense_response_buf, AAC_SENSE_BUFFERSIZE); } if (copy_to_user(user_reply, &reply, sizeof(struct aac_srb_reply))) { dprintk((KERN_DEBUG"aacraid: Copy to user failed\n")); rcode = -EFAULT; goto cleanup; } } else { struct aac_srb_reply *reply; reply = (struct aac_srb_reply *) fib_data(srbfib); if (copy_to_user(user_reply, reply, sizeof(struct aac_srb_reply))) { dprintk((KERN_DEBUG"aacraid: Copy to user failed\n")); rcode = -EFAULT; goto cleanup; } } cleanup: kfree(user_srbcmd); if (rcode != -ERESTARTSYS) { for (i = 0; i <= sg_indx; i++) kfree(sg_list[i]); aac_fib_complete(srbfib); aac_fib_free(srbfib); } return rcode; } struct aac_pci_info { u32 bus; u32 slot; }; static int aac_get_pci_info(struct aac_dev* dev, void __user *arg) { struct aac_pci_info pci_info; pci_info.bus = dev->pdev->bus->number; pci_info.slot = PCI_SLOT(dev->pdev->devfn); if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); return -EFAULT; } return 0; } static int aac_get_hba_info(struct aac_dev *dev, void __user *arg) { struct aac_hba_info hbainfo; memset(&hbainfo, 0, sizeof(hbainfo)); hbainfo.adapter_number = (u8) dev->id; hbainfo.system_io_bus_number = dev->pdev->bus->number; hbainfo.device_number = (dev->pdev->devfn >> 3); hbainfo.function_number = (dev->pdev->devfn & 0x0007); hbainfo.vendor_id = dev->pdev->vendor; hbainfo.device_id = dev->pdev->device; hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor; hbainfo.sub_system_id = dev->pdev->subsystem_device; if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) { dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n")); return -EFAULT; } return 0; } struct aac_reset_iop { u8 reset_type; }; static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg) { struct aac_reset_iop reset; int retval; if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop))) return -EFAULT; dev->adapter_shutdown = 1; mutex_unlock(&dev->ioctl_mutex); retval = aac_reset_adapter(dev, 0, reset.reset_type); mutex_lock(&dev->ioctl_mutex); return retval; } int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg) { int status; mutex_lock(&dev->ioctl_mutex); if (dev->adapter_shutdown) { status = -EACCES; goto cleanup; } /* * HBA gets first crack */ status = aac_dev_ioctl(dev, cmd, arg); if (status != -ENOTTY) goto cleanup; switch (cmd) { case FSACTL_MINIPORT_REV_CHECK: status = check_revision(dev, arg); break; case FSACTL_SEND_LARGE_FIB: case FSACTL_SENDFIB: status = ioctl_send_fib(dev, arg); break; case FSACTL_OPEN_GET_ADAPTER_FIB: status = open_getadapter_fib(dev, arg); break; case FSACTL_GET_NEXT_ADAPTER_FIB: status = next_getadapter_fib(dev, arg); break; case FSACTL_CLOSE_GET_ADAPTER_FIB: status = close_getadapter_fib(dev, arg); break; case FSACTL_SEND_RAW_SRB: status = aac_send_raw_srb(dev,arg); break; case FSACTL_GET_PCI_INFO: status = aac_get_pci_info(dev,arg); break; case FSACTL_GET_HBA_INFO: status = aac_get_hba_info(dev, arg); break; case FSACTL_RESET_IOP: status = aac_send_reset_adapter(dev, arg); break; default: status = -ENOTTY; break; } cleanup: mutex_unlock(&dev->ioctl_mutex); return status; }
linux-master
drivers/scsi/aacraid/commctrl.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010-2015 PMC-Sierra, Inc. ([email protected]) * 2016-2017 Microsemi Corp. ([email protected]) * * Module Name: * src.c * * Abstract: Hardware Device Interface for PMC SRC based controllers */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/time.h> #include <linux/interrupt.h> #include <scsi/scsi_host.h> #include "aacraid.h" static int aac_src_get_sync_status(struct aac_dev *dev); static irqreturn_t aac_src_intr_message(int irq, void *dev_id) { struct aac_msix_ctx *ctx; struct aac_dev *dev; unsigned long bellbits, bellbits_shifted; int vector_no; int isFastResponse, mode; u32 index, handle; ctx = (struct aac_msix_ctx *)dev_id; dev = ctx->dev; vector_no = ctx->vector_no; if (dev->msi_enabled) { mode = AAC_INT_MODE_MSI; if (vector_no == 0) { bellbits = src_readl(dev, MUnit.ODR_MSI); if (bellbits & 0x40000) mode |= AAC_INT_MODE_AIF; if (bellbits & 0x1000) mode |= AAC_INT_MODE_SYNC; } } else { mode = AAC_INT_MODE_INTX; bellbits = src_readl(dev, MUnit.ODR_R); if (bellbits & PmDoorBellResponseSent) { bellbits = PmDoorBellResponseSent; src_writel(dev, MUnit.ODR_C, bellbits); src_readl(dev, MUnit.ODR_C); } else { bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); src_writel(dev, MUnit.ODR_C, bellbits); src_readl(dev, MUnit.ODR_C); if (bellbits_shifted & DoorBellAifPending) mode |= AAC_INT_MODE_AIF; else if (bellbits_shifted & OUTBOUNDDOORBELL_0) mode |= AAC_INT_MODE_SYNC; } } if (mode & AAC_INT_MODE_SYNC) { unsigned long sflags; struct list_head *entry; int send_it = 0; extern int aac_sync_mode; if (!aac_sync_mode && !dev->msi_enabled) { src_writel(dev, MUnit.ODR_C, bellbits); src_readl(dev, MUnit.ODR_C); } if (dev->sync_fib) { if (dev->sync_fib->callback) dev->sync_fib->callback(dev->sync_fib->callback_data, dev->sync_fib); spin_lock_irqsave(&dev->sync_fib->event_lock, sflags); if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) { dev->management_fib_count--; complete(&dev->sync_fib->event_wait); } spin_unlock_irqrestore(&dev->sync_fib->event_lock, sflags); spin_lock_irqsave(&dev->sync_lock, sflags); if (!list_empty(&dev->sync_fib_list)) { entry = dev->sync_fib_list.next; dev->sync_fib = list_entry(entry, struct fib, fiblink); list_del(entry); send_it = 1; } else { dev->sync_fib = NULL; } spin_unlock_irqrestore(&dev->sync_lock, sflags); if (send_it) { aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, (u32)dev->sync_fib->hw_fib_pa, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } } if (!dev->msi_enabled) mode = 0; } if (mode & AAC_INT_MODE_AIF) { /* handle AIF */ if (dev->sa_firmware) { u32 events = src_readl(dev, MUnit.SCR0); aac_intr_normal(dev, events, 1, 0, NULL); writel(events, &dev->IndexRegs->Mailbox[0]); src_writel(dev, MUnit.IDR, 1 << 23); } else { if (dev->aif_thread && dev->fsa_dev) aac_intr_normal(dev, 0, 2, 0, NULL); } if (dev->msi_enabled) aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); mode = 0; } if (mode) { index = dev->host_rrq_idx[vector_no]; for (;;) { isFastResponse = 0; /* remove toggle bit (31) */ handle = le32_to_cpu((dev->host_rrq[index]) & 0x7fffffff); /* check fast response bits (30, 1) */ if (handle & 0x40000000) isFastResponse = 1; handle &= 0x0000ffff; if (handle == 0) break; handle >>= 2; if (dev->msi_enabled && dev->max_msix > 1) atomic_dec(&dev->rrq_outstanding[vector_no]); aac_intr_normal(dev, handle, 0, isFastResponse, NULL); dev->host_rrq[index++] = 0; if (index == (vector_no + 1) * dev->vector_cap) index = vector_no * dev->vector_cap; dev->host_rrq_idx[vector_no] = index; } mode = 0; } return IRQ_HANDLED; } /** * aac_src_disable_interrupt - Disable interrupts * @dev: Adapter */ static void aac_src_disable_interrupt(struct aac_dev *dev) { src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); } /** * aac_src_enable_interrupt_message - Enable interrupts * @dev: Adapter */ static void aac_src_enable_interrupt_message(struct aac_dev *dev) { aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT); } /** * src_sync_cmd - send a command and wait * @dev: Adapter * @command: Command to execute * @p1: first parameter * @p2: second parameter * @p3: third parameter * @p4: forth parameter * @p5: fifth parameter * @p6: sixth parameter * @status: adapter status * @r1: first return value * @r2: second return valu * @r3: third return value * @r4: forth return value * * This routine will send a synchronous command to the adapter and wait * for its completion. */ static int src_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) { unsigned long start; unsigned long delay; int ok; /* * Write the command into Mailbox 0 */ writel(command, &dev->IndexRegs->Mailbox[0]); /* * Write the parameters into Mailboxes 1 - 6 */ writel(p1, &dev->IndexRegs->Mailbox[1]); writel(p2, &dev->IndexRegs->Mailbox[2]); writel(p3, &dev->IndexRegs->Mailbox[3]); writel(p4, &dev->IndexRegs->Mailbox[4]); /* * Clear the synch command doorbell to start on a clean slate. */ if (!dev->msi_enabled) src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); /* * Disable doorbell interrupts */ src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); /* * Force the completion of the mask register write before issuing * the interrupt. */ src_readl(dev, MUnit.OIMR); /* * Signal that there is a new synch command */ src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT); if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) && !dev->in_soft_reset) { ok = 0; start = jiffies; if (command == IOP_RESET_ALWAYS) { /* Wait up to 10 sec */ delay = 10*HZ; } else { /* Wait up to 5 minutes */ delay = 300*HZ; } while (time_before(jiffies, start+delay)) { udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ /* * Mon960 will set doorbell0 bit when it has completed the command. */ if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) { /* * Clear the doorbell. */ if (dev->msi_enabled) aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT); else src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); ok = 1; break; } /* * Yield the processor in case we are slow */ msleep(1); } if (unlikely(ok != 1)) { /* * Restore interrupt mask even though we timed out */ aac_adapter_enable_int(dev); return -ETIMEDOUT; } /* * Pull the synch status from Mailbox 0. */ if (status) *status = readl(&dev->IndexRegs->Mailbox[0]); if (r1) *r1 = readl(&dev->IndexRegs->Mailbox[1]); if (r2) *r2 = readl(&dev->IndexRegs->Mailbox[2]); if (r3) *r3 = readl(&dev->IndexRegs->Mailbox[3]); if (r4) *r4 = readl(&dev->IndexRegs->Mailbox[4]); if (command == GET_COMM_PREFERRED_SETTINGS) dev->max_msix = readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF; /* * Clear the synch command doorbell. */ if (!dev->msi_enabled) src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); } /* * Restore interrupt mask */ aac_adapter_enable_int(dev); return 0; } /** * aac_src_interrupt_adapter - interrupt adapter * @dev: Adapter * * Send an interrupt to the i960 and breakpoint it. */ static void aac_src_interrupt_adapter(struct aac_dev *dev) { src_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } /** * aac_src_notify_adapter - send an event to the adapter * @dev: Adapter * @event: Event to send * * Notify the i960 that something it probably cares about has * happened. */ static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) { switch (event) { case AdapNormCmdQue: src_writel(dev, MUnit.ODR_C, INBOUNDDOORBELL_1 << SRC_ODR_SHIFT); break; case HostNormRespNotFull: src_writel(dev, MUnit.ODR_C, INBOUNDDOORBELL_4 << SRC_ODR_SHIFT); break; case AdapNormRespQue: src_writel(dev, MUnit.ODR_C, INBOUNDDOORBELL_2 << SRC_ODR_SHIFT); break; case HostNormCmdNotFull: src_writel(dev, MUnit.ODR_C, INBOUNDDOORBELL_3 << SRC_ODR_SHIFT); break; case FastIo: src_writel(dev, MUnit.ODR_C, INBOUNDDOORBELL_6 << SRC_ODR_SHIFT); break; case AdapPrintfDone: src_writel(dev, MUnit.ODR_C, INBOUNDDOORBELL_5 << SRC_ODR_SHIFT); break; default: BUG(); break; } } /** * aac_src_start_adapter - activate adapter * @dev: Adapter * * Start up processing on an i960 based AAC adapter */ static void aac_src_start_adapter(struct aac_dev *dev) { union aac_init *init; int i; /* reset host_rrq_idx first */ for (i = 0; i < dev->max_msix; i++) { dev->host_rrq_idx[i] = i * dev->vector_cap; atomic_set(&dev->rrq_outstanding[i], 0); } atomic_set(&dev->msix_counter, 0); dev->fibs_pushed_no = 0; init = dev->init; if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { init->r8.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds()); src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, lower_32_bits(dev->init_pa), upper_32_bits(dev->init_pa), sizeof(struct _r8) + (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq), 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } else { init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds()); // We can only use a 32 bit address here src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } } /** * aac_src_check_health * @dev: device to check if healthy * * Will attempt to determine if the specified adapter is alive and * capable of handling requests, returning 0 if alive. */ static int aac_src_check_health(struct aac_dev *dev) { u32 status = src_readl(dev, MUnit.OMR); /* * Check to see if the board panic'd. */ if (unlikely(status & KERNEL_PANIC)) goto err_blink; /* * Check to see if the board failed any self tests. */ if (unlikely(status & SELF_TEST_FAILED)) goto err_out; /* * Check to see if the board failed any self tests. */ if (unlikely(status & MONITOR_PANIC)) goto err_out; /* * Wait for the adapter to be up and running. */ if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) return -3; /* * Everything is OK */ return 0; err_out: return -1; err_blink: return (status >> 16) & 0xFF; } static inline u32 aac_get_vector(struct aac_dev *dev) { return atomic_inc_return(&dev->msix_counter)%dev->max_msix; } /** * aac_src_deliver_message * @fib: fib to issue * * Will send a fib, returning 0 if successful. */ static int aac_src_deliver_message(struct fib *fib) { struct aac_dev *dev = fib->dev; struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; u32 fibsize; dma_addr_t address; struct aac_fib_xporthdr *pFibX; int native_hba; #if !defined(writeq) unsigned long flags; #endif u16 vector_no; struct scsi_cmnd *scmd; u32 blk_tag; struct Scsi_Host *shost = dev->scsi_host_ptr; struct blk_mq_queue_map *qmap; atomic_inc(&q->numpending); native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0; if (dev->msi_enabled && dev->max_msix > 1 && (native_hba || fib->hw_fib_va->header.Command != AifRequest)) { if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && dev->sa_firmware) vector_no = aac_get_vector(dev); else { if (!fib->vector_no || !fib->callback_data) { if (shost && dev->use_map_queue) { qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; vector_no = qmap->mq_map[raw_smp_processor_id()]; } /* * We hardcode the vector_no for * reserved commands as a valid shost is * absent during the init */ else vector_no = 0; } else { scmd = (struct scsi_cmnd *)fib->callback_data; blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); vector_no = blk_mq_unique_tag_to_hwq(blk_tag); } } if (native_hba) { if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) { struct aac_hba_tm_req *tm_req; tm_req = (struct aac_hba_tm_req *) fib->hw_fib_va; if (tm_req->iu_type == HBA_IU_TYPE_SCSI_TM_REQ) { ((struct aac_hba_tm_req *) fib->hw_fib_va)->reply_qid = vector_no; ((struct aac_hba_tm_req *) fib->hw_fib_va)->request_id += (vector_no << 16); } else { ((struct aac_hba_reset_req *) fib->hw_fib_va)->reply_qid = vector_no; ((struct aac_hba_reset_req *) fib->hw_fib_va)->request_id += (vector_no << 16); } } else { ((struct aac_hba_cmd_req *) fib->hw_fib_va)->reply_qid = vector_no; ((struct aac_hba_cmd_req *) fib->hw_fib_va)->request_id += (vector_no << 16); } } else { fib->hw_fib_va->header.Handle += (vector_no << 16); } } else { vector_no = 0; } atomic_inc(&dev->rrq_outstanding[vector_no]); if (native_hba) { address = fib->hw_fib_pa; fibsize = (fib->hbacmd_size + 127) / 128 - 1; if (fibsize > 31) fibsize = 31; address |= fibsize; #if defined(writeq) src_writeq(dev, MUnit.IQN_L, (u64)address); #else spin_lock_irqsave(&fib->dev->iq_lock, flags); src_writel(dev, MUnit.IQN_H, upper_32_bits(address) & 0xffffffff); src_writel(dev, MUnit.IQN_L, address & 0xffffffff); spin_unlock_irqrestore(&fib->dev->iq_lock, flags); #endif } else { if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { /* Calculate the amount to the fibsize bits */ fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size) + 127) / 128 - 1; /* New FIB header, 32-bit */ address = fib->hw_fib_pa; fib->hw_fib_va->header.StructType = FIB_MAGIC2; fib->hw_fib_va->header.SenderFibAddress = cpu_to_le32((u32)address); fib->hw_fib_va->header.u.TimeStamp = 0; WARN_ON(upper_32_bits(address) != 0L); } else { /* Calculate the amount to the fibsize bits */ fibsize = (sizeof(struct aac_fib_xporthdr) + le16_to_cpu(fib->hw_fib_va->header.Size) + 127) / 128 - 1; /* Fill XPORT header */ pFibX = (struct aac_fib_xporthdr *) ((unsigned char *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr)); pFibX->Handle = fib->hw_fib_va->header.Handle; pFibX->HostAddress = cpu_to_le64((u64)fib->hw_fib_pa); pFibX->Size = cpu_to_le32( le16_to_cpu(fib->hw_fib_va->header.Size)); address = fib->hw_fib_pa - (u64)sizeof(struct aac_fib_xporthdr); } if (fibsize > 31) fibsize = 31; address |= fibsize; #if defined(writeq) src_writeq(dev, MUnit.IQ_L, (u64)address); #else spin_lock_irqsave(&fib->dev->iq_lock, flags); src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff); src_writel(dev, MUnit.IQ_L, address & 0xffffffff); spin_unlock_irqrestore(&fib->dev->iq_lock, flags); #endif } return 0; } /** * aac_src_ioremap * @dev: device ioremap * @size: mapping resize request * */ static int aac_src_ioremap(struct aac_dev *dev, u32 size) { if (!size) { iounmap(dev->regs.src.bar1); dev->regs.src.bar1 = NULL; iounmap(dev->regs.src.bar0); dev->base = dev->regs.src.bar0 = NULL; return 0; } dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRC_BAR1_SIZE); dev->base = NULL; if (dev->regs.src.bar1 == NULL) return -1; dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); if (dev->base == NULL) { iounmap(dev->regs.src.bar1); dev->regs.src.bar1 = NULL; return -1; } dev->IndexRegs = &((struct src_registers __iomem *) dev->base)->u.tupelo.IndexRegs; return 0; } /** * aac_srcv_ioremap * @dev: device ioremap * @size: mapping resize request * */ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size) { if (!size) { iounmap(dev->regs.src.bar0); dev->base = dev->regs.src.bar0 = NULL; return 0; } dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE); dev->base = NULL; if (dev->regs.src.bar1 == NULL) return -1; dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); if (dev->base == NULL) { iounmap(dev->regs.src.bar1); dev->regs.src.bar1 = NULL; return -1; } dev->IndexRegs = &((struct src_registers __iomem *) dev->base)->u.denali.IndexRegs; return 0; } void aac_set_intx_mode(struct aac_dev *dev) { if (dev->msi_enabled) { aac_src_access_devreg(dev, AAC_ENABLE_INTX); dev->msi_enabled = 0; msleep(5000); /* Delay 5 seconds */ } } static void aac_clear_omr(struct aac_dev *dev) { u32 omr_value = 0; omr_value = src_readl(dev, MUnit.OMR); /* * Check for PCI Errors or Kernel Panic */ if ((omr_value == INVALID_OMR) || (omr_value & KERNEL_PANIC)) omr_value = 0; /* * Preserve MSIX Value if any */ src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX); src_readl(dev, MUnit.OMR); } static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev) { __le32 supported_options3; if (!aac_fib_dump) return; supported_options3 = dev->supplement_adapter_info.supported_options3; if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP)) return; aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } static bool aac_is_ctrl_up_and_running(struct aac_dev *dev) { bool ctrl_up = true; unsigned long status, start; bool is_up = false; start = jiffies; do { schedule(); status = src_readl(dev, MUnit.OMR); if (status == 0xffffffff) status = 0; if (status & KERNEL_BOOTING) { start = jiffies; continue; } if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) { ctrl_up = false; break; } is_up = status & KERNEL_UP_AND_RUNNING; } while (!is_up); return ctrl_up; } static void aac_src_drop_io(struct aac_dev *dev) { if (!dev->soft_reset_support) return; aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } static void aac_notify_fw_of_iop_reset(struct aac_dev *dev) { aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); aac_src_drop_io(dev); } static void aac_send_iop_reset(struct aac_dev *dev) { aac_dump_fw_fib_iop_reset(dev); aac_notify_fw_of_iop_reset(dev); aac_set_intx_mode(dev); aac_clear_omr(dev); src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); msleep(5000); } static void aac_send_hardware_soft_reset(struct aac_dev *dev) { u_int32_t val; aac_clear_omr(dev); val = readl(((char *)(dev->base) + IBW_SWR_OFFSET)); val |= 0x01; writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET)); msleep_interruptible(20000); } static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) { bool is_ctrl_up; int ret = 0; if (bled < 0) goto invalid_out; if (bled) dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled); /* * When there is a BlinkLED, IOP_RESET has not effect */ if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET) reset_type &= ~HW_IOP_RESET; dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type); if (reset_type & HW_IOP_RESET) { dev_info(&dev->pdev->dev, "Issuing IOP reset\n"); aac_send_iop_reset(dev); /* * Creates a delay or wait till up and running comes thru */ is_ctrl_up = aac_is_ctrl_up_and_running(dev); if (!is_ctrl_up) dev_err(&dev->pdev->dev, "IOP reset failed\n"); else { dev_info(&dev->pdev->dev, "IOP reset succeeded\n"); goto set_startup; } } if (!dev->sa_firmware) { dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n"); ret = -ENODEV; goto out; } if (reset_type & HW_SOFT_RESET) { dev_info(&dev->pdev->dev, "Issuing SOFT reset\n"); aac_send_hardware_soft_reset(dev); dev->msi_enabled = 0; is_ctrl_up = aac_is_ctrl_up_and_running(dev); if (!is_ctrl_up) { dev_err(&dev->pdev->dev, "SOFT reset failed\n"); ret = -ENODEV; goto out; } else dev_info(&dev->pdev->dev, "SOFT reset succeeded\n"); } set_startup: if (startup_timeout < 300) startup_timeout = 300; out: return ret; invalid_out: if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) ret = -ENODEV; goto out; } /** * aac_src_select_comm - Select communications method * @dev: Adapter * @comm: communications method */ static int aac_src_select_comm(struct aac_dev *dev, int comm) { switch (comm) { case AAC_COMM_MESSAGE: dev->a_ops.adapter_intr = aac_src_intr_message; dev->a_ops.adapter_deliver = aac_src_deliver_message; break; default: return 1; } return 0; } /** * aac_src_init - initialize an Cardinal Frey Bar card * @dev: device to configure * */ int aac_src_init(struct aac_dev *dev) { unsigned long start; unsigned long status; int restart = 0; int instance = dev->id; const char *name = dev->name; dev->a_ops.adapter_ioremap = aac_src_ioremap; dev->a_ops.adapter_comm = aac_src_select_comm; dev->base_size = AAC_MIN_SRC_BAR0_SIZE; if (aac_adapter_ioremap(dev, dev->base_size)) { printk(KERN_WARNING "%s: unable to map adapter.\n", name); goto error_iounmap; } /* Failure to reset here is an option ... */ dev->a_ops.adapter_sync_cmd = src_sync_cmd; dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; if (dev->init_reset) { dev->init_reset = false; if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) ++restart; } /* * Check to see if the board panic'd while booting. */ status = src_readl(dev, MUnit.OMR); if (status & KERNEL_PANIC) { if (aac_src_restart_adapter(dev, aac_src_check_health(dev), IOP_HWSOFT_RESET)) goto error_iounmap; ++restart; } /* * Check to see if the board failed any self tests. */ status = src_readl(dev, MUnit.OMR); if (status & SELF_TEST_FAILED) { printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); goto error_iounmap; } /* * Check to see if the monitor panic'd while booting. */ if (status & MONITOR_PANIC) { printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); goto error_iounmap; } start = jiffies; /* * Wait for the adapter to be up and running. Wait up to 3 minutes */ while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)) { if ((restart && (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || time_after(jiffies, start+HZ*startup_timeout)) { printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", dev->name, instance, status); goto error_iounmap; } if (!restart && ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || time_after(jiffies, start + HZ * ((startup_timeout > 60) ? (startup_timeout - 60) : (startup_timeout / 2))))) { if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev), IOP_HWSOFT_RESET))) start = jiffies; ++restart; } msleep(1); } if (restart && aac_commit) aac_commit = 1; /* * Fill in the common function dispatch table. */ dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; dev->a_ops.adapter_notify = aac_src_notify_adapter; dev->a_ops.adapter_sync_cmd = src_sync_cmd; dev->a_ops.adapter_check_health = aac_src_check_health; dev->a_ops.adapter_restart = aac_src_restart_adapter; dev->a_ops.adapter_start = aac_src_start_adapter; /* * First clear out all interrupts. Then enable the one's that we * can handle. */ aac_adapter_comm(dev, AAC_COMM_MESSAGE); aac_adapter_disable_int(dev); src_writel(dev, MUnit.ODR_C, 0xffffffff); aac_adapter_enable_int(dev); if (aac_init_adapter(dev) == NULL) goto error_iounmap; if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) goto error_iounmap; dev->msi = !pci_enable_msi(dev->pdev); dev->aac_msix[0].vector_no = 0; dev->aac_msix[0].dev = dev; if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) { if (dev->msi) pci_disable_msi(dev->pdev); printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance); goto error_iounmap; } dev->dbg_base = pci_resource_start(dev->pdev, 2); dev->dbg_base_mapped = dev->regs.src.bar1; dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; aac_adapter_enable_int(dev); if (!dev->sync_mode) { /* * Tell the adapter that all is configured, and it can * start accepting requests */ aac_src_start_adapter(dev); } return 0; error_iounmap: return -1; } static int aac_src_wait_sync(struct aac_dev *dev, int *status) { unsigned long start = jiffies; unsigned long usecs = 0; int delay = 5 * HZ; int rc = 1; while (time_before(jiffies, start+delay)) { /* * Delay 5 microseconds to let Mon960 get info. */ udelay(5); /* * Mon960 will set doorbell0 bit when it has completed the * command. */ if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) { /* * Clear: the doorbell. */ if (dev->msi_enabled) aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT); else src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); rc = 0; break; } /* * Yield the processor in case we are slow */ usecs = 1 * USEC_PER_MSEC; usleep_range(usecs, usecs + 50); } /* * Pull the synch status from Mailbox 0. */ if (status && !rc) { status[0] = readl(&dev->IndexRegs->Mailbox[0]); status[1] = readl(&dev->IndexRegs->Mailbox[1]); status[2] = readl(&dev->IndexRegs->Mailbox[2]); status[3] = readl(&dev->IndexRegs->Mailbox[3]); status[4] = readl(&dev->IndexRegs->Mailbox[4]); } return rc; } /** * aac_src_soft_reset - perform soft reset to speed up * access * * Assumptions: That the controller is in a state where we can * bring it back to life with an init struct. We can only use * fast sync commands, as the timeout is 5 seconds. * * @dev: device to configure * */ static int aac_src_soft_reset(struct aac_dev *dev) { u32 status_omr = src_readl(dev, MUnit.OMR); u32 status[5]; int rc = 1; int state = 0; char *state_str[7] = { "GET_ADAPTER_PROPERTIES Failed", "GET_ADAPTER_PROPERTIES timeout", "SOFT_RESET not supported", "DROP_IO Failed", "DROP_IO timeout", "Check Health failed" }; if (status_omr == INVALID_OMR) return 1; // pcie hosed if (!(status_omr & KERNEL_UP_AND_RUNNING)) return 1; // not up and running /* * We go into soft reset mode to allow us to handle response */ dev->in_soft_reset = 1; dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX; /* Get adapter properties */ rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, status+3, status+4); if (rc) goto out; state++; if (aac_src_wait_sync(dev, status)) { rc = 1; goto out; } state++; if (!(status[1] & le32_to_cpu(AAC_OPT_EXTENDED) && (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)))) { rc = 2; goto out; } if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) && (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE))) dev->sa_firmware = 1; state++; rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, status+3, status+4); if (rc) goto out; state++; if (aac_src_wait_sync(dev, status)) { rc = 3; goto out; } if (status[1]) dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n", __func__, status[1]); state++; rc = aac_src_check_health(dev); out: dev->in_soft_reset = 0; dev->msi_enabled = 0; if (rc) dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__, state_str[state], rc); return rc; } /** * aac_srcv_init - initialize an SRCv card * @dev: device to configure * */ int aac_srcv_init(struct aac_dev *dev) { unsigned long start; unsigned long status; int restart = 0; int instance = dev->id; const char *name = dev->name; dev->a_ops.adapter_ioremap = aac_srcv_ioremap; dev->a_ops.adapter_comm = aac_src_select_comm; dev->base_size = AAC_MIN_SRCV_BAR0_SIZE; if (aac_adapter_ioremap(dev, dev->base_size)) { printk(KERN_WARNING "%s: unable to map adapter.\n", name); goto error_iounmap; } /* Failure to reset here is an option ... */ dev->a_ops.adapter_sync_cmd = src_sync_cmd; dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; if (dev->init_reset) { dev->init_reset = false; if (aac_src_soft_reset(dev)) { aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET); ++restart; } } /* * Check to see if flash update is running. * Wait for the adapter to be up and running. Wait up to 5 minutes */ status = src_readl(dev, MUnit.OMR); if (status & FLASH_UPD_PENDING) { start = jiffies; do { status = src_readl(dev, MUnit.OMR); if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) { printk(KERN_ERR "%s%d: adapter flash update failed.\n", dev->name, instance); goto error_iounmap; } } while (!(status & FLASH_UPD_SUCCESS) && !(status & FLASH_UPD_FAILED)); /* Delay 10 seconds. * Because right now FW is doing a soft reset, * do not read scratch pad register at this time */ ssleep(10); } /* * Check to see if the board panic'd while booting. */ status = src_readl(dev, MUnit.OMR); if (status & KERNEL_PANIC) { if (aac_src_restart_adapter(dev, aac_src_check_health(dev), IOP_HWSOFT_RESET)) goto error_iounmap; ++restart; } /* * Check to see if the board failed any self tests. */ status = src_readl(dev, MUnit.OMR); if (status & SELF_TEST_FAILED) { printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); goto error_iounmap; } /* * Check to see if the monitor panic'd while booting. */ if (status & MONITOR_PANIC) { printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); goto error_iounmap; } start = jiffies; /* * Wait for the adapter to be up and running. Wait up to 3 minutes */ do { status = src_readl(dev, MUnit.OMR); if (status == INVALID_OMR) status = 0; if ((restart && (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || time_after(jiffies, start+HZ*startup_timeout)) { printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", dev->name, instance, status); goto error_iounmap; } if (!restart && ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || time_after(jiffies, start + HZ * ((startup_timeout > 60) ? (startup_timeout - 60) : (startup_timeout / 2))))) { if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev), IOP_HWSOFT_RESET))) start = jiffies; ++restart; } msleep(1); } while (!(status & KERNEL_UP_AND_RUNNING)); if (restart && aac_commit) aac_commit = 1; /* * Fill in the common function dispatch table. */ dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; dev->a_ops.adapter_notify = aac_src_notify_adapter; dev->a_ops.adapter_sync_cmd = src_sync_cmd; dev->a_ops.adapter_check_health = aac_src_check_health; dev->a_ops.adapter_restart = aac_src_restart_adapter; dev->a_ops.adapter_start = aac_src_start_adapter; /* * First clear out all interrupts. Then enable the one's that we * can handle. */ aac_adapter_comm(dev, AAC_COMM_MESSAGE); aac_adapter_disable_int(dev); src_writel(dev, MUnit.ODR_C, 0xffffffff); aac_adapter_enable_int(dev); if (aac_init_adapter(dev) == NULL) goto error_iounmap; if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) && (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)) goto error_iounmap; if (dev->msi_enabled) aac_src_access_devreg(dev, AAC_ENABLE_MSIX); if (aac_acquire_irq(dev)) goto error_iounmap; dev->dbg_base = pci_resource_start(dev->pdev, 2); dev->dbg_base_mapped = dev->regs.src.bar1; dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE; dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; aac_adapter_enable_int(dev); if (!dev->sync_mode) { /* * Tell the adapter that all is configured, and it can * start accepting requests */ aac_src_start_adapter(dev); } return 0; error_iounmap: return -1; } void aac_src_access_devreg(struct aac_dev *dev, int mode) { u_int32_t val; switch (mode) { case AAC_ENABLE_INTERRUPT: src_writel(dev, MUnit.OIMR, dev->OIMR = (dev->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX : AAC_INT_ENABLE_TYPE1_INTX)); break; case AAC_DISABLE_INTERRUPT: src_writel(dev, MUnit.OIMR, dev->OIMR = AAC_INT_DISABLE_ALL); break; case AAC_ENABLE_MSIX: /* set bit 6 */ val = src_readl(dev, MUnit.IDR); val |= 0x40; src_writel(dev, MUnit.IDR, val); src_readl(dev, MUnit.IDR); /* unmask int. */ val = PMC_ALL_INTERRUPT_BITS; src_writel(dev, MUnit.IOAR, val); val = src_readl(dev, MUnit.OIMR); src_writel(dev, MUnit.OIMR, val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0))); break; case AAC_DISABLE_MSIX: /* reset bit 6 */ val = src_readl(dev, MUnit.IDR); val &= ~0x40; src_writel(dev, MUnit.IDR, val); src_readl(dev, MUnit.IDR); break; case AAC_CLEAR_AIF_BIT: /* set bit 5 */ val = src_readl(dev, MUnit.IDR); val |= 0x20; src_writel(dev, MUnit.IDR, val); src_readl(dev, MUnit.IDR); break; case AAC_CLEAR_SYNC_BIT: /* set bit 4 */ val = src_readl(dev, MUnit.IDR); val |= 0x10; src_writel(dev, MUnit.IDR, val); src_readl(dev, MUnit.IDR); break; case AAC_ENABLE_INTX: /* set bit 7 */ val = src_readl(dev, MUnit.IDR); val |= 0x80; src_writel(dev, MUnit.IDR, val); src_readl(dev, MUnit.IDR); /* unmask int. */ val = PMC_ALL_INTERRUPT_BITS; src_writel(dev, MUnit.IOAR, val); src_readl(dev, MUnit.IOAR); val = src_readl(dev, MUnit.OIMR); src_writel(dev, MUnit.OIMR, val & (~(PMC_GLOBAL_INT_BIT2))); break; default: break; } } static int aac_src_get_sync_status(struct aac_dev *dev) { int msix_val = 0; int legacy_val = 0; msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0; if (!dev->msi_enabled) { /* * if Legacy int status indicates cmd is not complete * sample MSIx register to see if it indiactes cmd complete, * if yes set the controller in MSIx mode and consider cmd * completed */ legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT; if (!(legacy_val & 1) && msix_val) dev->msi_enabled = 1; return legacy_val; } return msix_val; }
linux-master
drivers/scsi/aacraid/src.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010-2015 PMC-Sierra, Inc. ([email protected]) * 2016-2017 Microsemi Corp. ([email protected]) * * Module Name: * sa.c * * Abstract: Drawbridge specific support functions */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/time.h> #include <linux/interrupt.h> #include <scsi/scsi_host.h> #include "aacraid.h" static irqreturn_t aac_sa_intr(int irq, void *dev_id) { struct aac_dev *dev = dev_id; unsigned short intstat, mask; intstat = sa_readw(dev, DoorbellReg_p); /* * Read mask and invert because drawbridge is reversed. * This allows us to only service interrupts that have been enabled. */ mask = ~(sa_readw(dev, SaDbCSR.PRISETIRQMASK)); /* Check to see if this is our interrupt. If it isn't just return */ if (intstat & mask) { if (intstat & PrintfReady) { aac_printf(dev, sa_readl(dev, Mailbox5)); sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */ sa_writew(dev, DoorbellReg_s, PrintfDone); } else if (intstat & DOORBELL_1) { // dev -> Host Normal Command Ready sa_writew(dev, DoorbellClrReg_p, DOORBELL_1); aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); } else if (intstat & DOORBELL_2) { // dev -> Host Normal Response Ready sa_writew(dev, DoorbellClrReg_p, DOORBELL_2); aac_response_normal(&dev->queues->queue[HostNormRespQueue]); } else if (intstat & DOORBELL_3) { // dev -> Host Normal Command Not Full sa_writew(dev, DoorbellClrReg_p, DOORBELL_3); } else if (intstat & DOORBELL_4) { // dev -> Host Normal Response Not Full sa_writew(dev, DoorbellClrReg_p, DOORBELL_4); } return IRQ_HANDLED; } return IRQ_NONE; } /** * aac_sa_disable_interrupt - disable interrupt * @dev: Which adapter to enable. */ static void aac_sa_disable_interrupt (struct aac_dev *dev) { sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); } /** * aac_sa_enable_interrupt - enable interrupt * @dev: Which adapter to enable. */ static void aac_sa_enable_interrupt (struct aac_dev *dev) { sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4)); } /** * aac_sa_notify_adapter - handle adapter notification * @dev: Adapter that notification is for * @event: Event to notidy * * Notify the adapter of an event */ static void aac_sa_notify_adapter(struct aac_dev *dev, u32 event) { switch (event) { case AdapNormCmdQue: sa_writew(dev, DoorbellReg_s,DOORBELL_1); break; case HostNormRespNotFull: sa_writew(dev, DoorbellReg_s,DOORBELL_4); break; case AdapNormRespQue: sa_writew(dev, DoorbellReg_s,DOORBELL_2); break; case HostNormCmdNotFull: sa_writew(dev, DoorbellReg_s,DOORBELL_3); break; case HostShutdown: /* sa_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); */ break; case FastIo: sa_writew(dev, DoorbellReg_s,DOORBELL_6); break; case AdapPrintfDone: sa_writew(dev, DoorbellReg_s,DOORBELL_5); break; default: BUG(); break; } } /** * sa_sync_cmd - send a command and wait * @dev: Adapter * @command: Command to execute * @p1: first parameter * @p2: second parameter * @p3: third parameter * @p4: forth parameter * @p5: fifth parameter * @p6: sixth parameter * @ret: adapter status * @r1: first return value * @r2: second return value * @r3: third return value * @r4: forth return value * * This routine will send a synchronous command to the adapter and wait * for its completion. */ static int sa_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *ret, u32 *r1, u32 *r2, u32 *r3, u32 *r4) { unsigned long start; int ok; /* * Write the Command into Mailbox 0 */ sa_writel(dev, Mailbox0, command); /* * Write the parameters into Mailboxes 1 - 4 */ sa_writel(dev, Mailbox1, p1); sa_writel(dev, Mailbox2, p2); sa_writel(dev, Mailbox3, p3); sa_writel(dev, Mailbox4, p4); /* * Clear the synch command doorbell to start on a clean slate. */ sa_writew(dev, DoorbellClrReg_p, DOORBELL_0); /* * Signal that there is a new synch command */ sa_writew(dev, DoorbellReg_s, DOORBELL_0); ok = 0; start = jiffies; while(time_before(jiffies, start+30*HZ)) { /* * Delay 5uS so that the monitor gets access */ udelay(5); /* * Mon110 will set doorbell0 bit when it has * completed the command. */ if(sa_readw(dev, DoorbellReg_p) & DOORBELL_0) { ok = 1; break; } msleep(1); } if (ok != 1) return -ETIMEDOUT; /* * Clear the synch command doorbell. */ sa_writew(dev, DoorbellClrReg_p, DOORBELL_0); /* * Pull the synch status from Mailbox 0. */ if (ret) *ret = sa_readl(dev, Mailbox0); if (r1) *r1 = sa_readl(dev, Mailbox1); if (r2) *r2 = sa_readl(dev, Mailbox2); if (r3) *r3 = sa_readl(dev, Mailbox3); if (r4) *r4 = sa_readl(dev, Mailbox4); return 0; } /** * aac_sa_interrupt_adapter - interrupt an adapter * @dev: Which adapter to enable. * * Breakpoint an adapter. */ static void aac_sa_interrupt_adapter (struct aac_dev *dev) { sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } /** * aac_sa_start_adapter - activate adapter * @dev: Adapter * * Start up processing on an ARM based AAC adapter */ static void aac_sa_start_adapter(struct aac_dev *dev) { union aac_init *init; /* * Fill in the remaining pieces of the init. */ init = dev->init; init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds()); /* We can only use a 32 bit address here */ sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } static int aac_sa_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) { return -EINVAL; } /** * aac_sa_check_health * @dev: device to check if healthy * * Will attempt to determine if the specified adapter is alive and * capable of handling requests, returning 0 if alive. */ static int aac_sa_check_health(struct aac_dev *dev) { long status = sa_readl(dev, Mailbox7); /* * Check to see if the board failed any self tests. */ if (status & SELF_TEST_FAILED) return -1; /* * Check to see if the board panic'd while booting. */ if (status & KERNEL_PANIC) return -2; /* * Wait for the adapter to be up and running. Wait up to 3 minutes */ if (!(status & KERNEL_UP_AND_RUNNING)) return -3; /* * Everything is OK */ return 0; } /** * aac_sa_ioremap * @dev: device to ioremap * @size: mapping resize request * */ static int aac_sa_ioremap(struct aac_dev * dev, u32 size) { if (!size) { iounmap(dev->regs.sa); return 0; } dev->base = dev->regs.sa = ioremap(dev->base_start, size); return (dev->base == NULL) ? -1 : 0; } /** * aac_sa_init - initialize an ARM based AAC card * @dev: device to configure * * Allocate and set up resources for the ARM based AAC variants. The * device_interface in the commregion will be allocated and linked * to the comm region. */ int aac_sa_init(struct aac_dev *dev) { unsigned long start; unsigned long status; int instance; const char *name; instance = dev->id; name = dev->name; /* * Fill in the function dispatch table. */ dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt; dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt; dev->a_ops.adapter_notify = aac_sa_notify_adapter; dev->a_ops.adapter_sync_cmd = sa_sync_cmd; dev->a_ops.adapter_check_health = aac_sa_check_health; dev->a_ops.adapter_restart = aac_sa_restart_adapter; dev->a_ops.adapter_start = aac_sa_start_adapter; dev->a_ops.adapter_intr = aac_sa_intr; dev->a_ops.adapter_deliver = aac_rx_deliver_producer; dev->a_ops.adapter_ioremap = aac_sa_ioremap; if (aac_sa_ioremap(dev, dev->base_size)) { printk(KERN_WARNING "%s: unable to map adapter.\n", name); goto error_iounmap; } /* * Check to see if the board failed any self tests. */ if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) { printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance); goto error_iounmap; } /* * Check to see if the board panic'd while booting. */ if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) { printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance); goto error_iounmap; } start = jiffies; /* * Wait for the adapter to be up and running. Wait up to 3 minutes. */ while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) { if (time_after(jiffies, start+startup_timeout*HZ)) { status = sa_readl(dev, Mailbox7); printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %lx.\n", name, instance, status); goto error_iounmap; } msleep(1); } /* * First clear out all interrupts. Then enable the one's that * we can handle. */ aac_adapter_disable_int(dev); aac_adapter_enable_int(dev); if(aac_init_adapter(dev) == NULL) goto error_irq; dev->sync_mode = 0; /* sync. mode not supported */ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, IRQF_SHARED, "aacraid", (void *)dev) < 0) { printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance); goto error_iounmap; } dev->dbg_base = dev->base_start; dev->dbg_base_mapped = dev->base; dev->dbg_size = dev->base_size; aac_adapter_enable_int(dev); /* * Tell the adapter that all is configure, and it can start * accepting requests */ aac_sa_start_adapter(dev); return 0; error_irq: aac_sa_disable_interrupt(dev); free_irq(dev->pdev->irq, (void *)dev); error_iounmap: return -1; }
linux-master
drivers/scsi/aacraid/sa.c
/* * linux/drivers/scsi/esas2r/esas2r_init.c * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers * * Copyright (c) 2001-2013 ATTO Technology, Inc. * (mailto:[email protected])mpt3sas/mpt3sas_trigger_diag. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include "esas2r.h" static bool esas2r_initmem_alloc(struct esas2r_adapter *a, struct esas2r_mem_desc *mem_desc, u32 align) { mem_desc->esas2r_param = mem_desc->size + align; mem_desc->virt_addr = NULL; mem_desc->phys_addr = 0; mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev, (size_t)mem_desc-> esas2r_param, (dma_addr_t *)&mem_desc-> phys_addr, GFP_KERNEL); if (mem_desc->esas2r_data == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate %lu bytes of consistent memory!", (long unsigned int)mem_desc->esas2r_param); return false; } mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align); mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align); memset(mem_desc->virt_addr, 0, mem_desc->size); return true; } static void esas2r_initmem_free(struct esas2r_adapter *a, struct esas2r_mem_desc *mem_desc) { if (mem_desc->virt_addr == NULL) return; /* * Careful! phys_addr and virt_addr may have been adjusted from the * original allocation in order to return the desired alignment. That * means we have to use the original address (in esas2r_data) and size * (esas2r_param) and calculate the original physical address based on * the difference between the requested and actual allocation size. */ if (mem_desc->phys_addr) { int unalign = ((u8 *)mem_desc->virt_addr) - ((u8 *)mem_desc->esas2r_data); dma_free_coherent(&a->pcid->dev, (size_t)mem_desc->esas2r_param, mem_desc->esas2r_data, (dma_addr_t)(mem_desc->phys_addr - unalign)); } else { kfree(mem_desc->esas2r_data); } mem_desc->virt_addr = NULL; } static bool alloc_vda_req(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_mem_desc *memdesc = kzalloc( sizeof(struct esas2r_mem_desc), GFP_KERNEL); if (memdesc == NULL) { esas2r_hdebug("could not alloc mem for vda request memdesc\n"); return false; } memdesc->size = sizeof(union atto_vda_req) + ESAS2R_DATA_BUF_LEN; if (!esas2r_initmem_alloc(a, memdesc, 256)) { esas2r_hdebug("could not alloc mem for vda request\n"); kfree(memdesc); return false; } a->num_vrqs++; list_add(&memdesc->next_desc, &a->vrq_mds_head); rq->vrq_md = memdesc; rq->vrq = (union atto_vda_req *)memdesc->virt_addr; rq->vrq->scsi.handle = a->num_vrqs; return true; } static void esas2r_unmap_regions(struct esas2r_adapter *a) { if (a->regs) iounmap((void __iomem *)a->regs); a->regs = NULL; pci_release_region(a->pcid, 2); if (a->data_window) iounmap((void __iomem *)a->data_window); a->data_window = NULL; pci_release_region(a->pcid, 0); } static int esas2r_map_regions(struct esas2r_adapter *a) { int error; a->regs = NULL; a->data_window = NULL; error = pci_request_region(a->pcid, 2, a->name); if (error != 0) { esas2r_log(ESAS2R_LOG_CRIT, "pci_request_region(2) failed, error %d", error); return error; } a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2), pci_resource_len(a->pcid, 2)); if (a->regs == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "ioremap failed for regs mem region\n"); pci_release_region(a->pcid, 2); return -EFAULT; } error = pci_request_region(a->pcid, 0, a->name); if (error != 0) { esas2r_log(ESAS2R_LOG_CRIT, "pci_request_region(2) failed, error %d", error); esas2r_unmap_regions(a); return error; } a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid, 0), pci_resource_len(a->pcid, 0)); if (a->data_window == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "ioremap failed for data_window mem region\n"); esas2r_unmap_regions(a); return -EFAULT; } return 0; } static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode) { int i; /* Set up interrupt mode based on the requested value */ switch (intr_mode) { case INTR_MODE_LEGACY: use_legacy_interrupts: a->intr_mode = INTR_MODE_LEGACY; break; case INTR_MODE_MSI: i = pci_enable_msi(a->pcid); if (i != 0) { esas2r_log(ESAS2R_LOG_WARN, "failed to enable MSI for adapter %d, " "falling back to legacy interrupts " "(err=%d)", a->index, i); goto use_legacy_interrupts; } a->intr_mode = INTR_MODE_MSI; set_bit(AF2_MSI_ENABLED, &a->flags2); break; default: esas2r_log(ESAS2R_LOG_WARN, "unknown interrupt_mode %d requested, " "falling back to legacy interrupt", interrupt_mode); goto use_legacy_interrupts; } } static void esas2r_claim_interrupts(struct esas2r_adapter *a) { unsigned long flags = 0; if (a->intr_mode == INTR_MODE_LEGACY) flags |= IRQF_SHARED; esas2r_log(ESAS2R_LOG_INFO, "esas2r_claim_interrupts irq=%d (%p, %s, %lx)", a->pcid->irq, a, a->name, flags); if (request_irq(a->pcid->irq, (a->intr_mode == INTR_MODE_LEGACY) ? esas2r_interrupt : esas2r_msi_interrupt, flags, a->name, a)) { esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X", a->pcid->irq); return; } set_bit(AF2_IRQ_CLAIMED, &a->flags2); esas2r_log(ESAS2R_LOG_INFO, "claimed IRQ %d flags: 0x%lx", a->pcid->irq, flags); } int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, int index) { struct esas2r_adapter *a; u64 bus_addr = 0; int i; void *next_uncached; struct esas2r_request *first_request, *last_request; bool dma64 = false; if (index >= MAX_ADAPTERS) { esas2r_log(ESAS2R_LOG_CRIT, "tried to init invalid adapter index %u!", index); return 0; } if (esas2r_adapters[index]) { esas2r_log(ESAS2R_LOG_CRIT, "tried to init existing adapter index %u!", index); return 0; } a = (struct esas2r_adapter *)host->hostdata; memset(a, 0, sizeof(struct esas2r_adapter)); a->pcid = pcid; a->host = host; if (sizeof(dma_addr_t) > 4 && dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) && !dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64))) dma64 = true; if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) { esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask"); esas2r_kill_adapter(index); return 0; } esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev, "%s-bit PCI addressing enabled\n", dma64 ? "64" : "32"); esas2r_adapters[index] = a; sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index); esas2r_debug("new adapter %p, name %s", a, a->name); spin_lock_init(&a->request_lock); spin_lock_init(&a->fw_event_lock); mutex_init(&a->fm_api_mutex); mutex_init(&a->fs_api_mutex); sema_init(&a->nvram_semaphore, 1); esas2r_fw_event_off(a); snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d", a->index); a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name); init_waitqueue_head(&a->buffered_ioctl_waiter); init_waitqueue_head(&a->nvram_waiter); init_waitqueue_head(&a->fm_api_waiter); init_waitqueue_head(&a->fs_api_waiter); init_waitqueue_head(&a->vda_waiter); INIT_LIST_HEAD(&a->general_req.req_list); INIT_LIST_HEAD(&a->active_list); INIT_LIST_HEAD(&a->defer_list); INIT_LIST_HEAD(&a->free_sg_list_head); INIT_LIST_HEAD(&a->avail_request); INIT_LIST_HEAD(&a->vrq_mds_head); INIT_LIST_HEAD(&a->fw_event_list); first_request = (struct esas2r_request *)((u8 *)(a + 1)); for (last_request = first_request, i = 1; i < num_requests; last_request++, i++) { INIT_LIST_HEAD(&last_request->req_list); list_add_tail(&last_request->comp_list, &a->avail_request); if (!alloc_vda_req(a, last_request)) { esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate a VDA request!"); esas2r_kill_adapter(index); return 0; } } esas2r_debug("requests: %p to %p (%d, %d)", first_request, last_request, sizeof(*first_request), num_requests); if (esas2r_map_regions(a) != 0) { esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!"); esas2r_kill_adapter(index); return 0; } a->index = index; /* interrupts will be disabled until we are done with init */ atomic_inc(&a->dis_ints_cnt); atomic_inc(&a->disable_cnt); set_bit(AF_CHPRST_PENDING, &a->flags); set_bit(AF_DISC_PENDING, &a->flags); set_bit(AF_FIRST_INIT, &a->flags); set_bit(AF_LEGACY_SGE_MODE, &a->flags); a->init_msg = ESAS2R_INIT_MSG_START; a->max_vdareq_size = 128; a->build_sgl = esas2r_build_sg_list_sge; esas2r_setup_interrupts(a, interrupt_mode); a->uncached_size = esas2r_get_uncached_size(a); a->uncached = dma_alloc_coherent(&pcid->dev, (size_t)a->uncached_size, (dma_addr_t *)&bus_addr, GFP_KERNEL); if (a->uncached == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate %d bytes of consistent memory!", a->uncached_size); esas2r_kill_adapter(index); return 0; } a->uncached_phys = bus_addr; esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)", a->uncached_size, a->uncached, upper_32_bits(bus_addr), lower_32_bits(bus_addr)); memset(a->uncached, 0, a->uncached_size); next_uncached = a->uncached; if (!esas2r_init_adapter_struct(a, &next_uncached)) { esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize adapter structure (2)!"); esas2r_kill_adapter(index); return 0; } tasklet_init(&a->tasklet, esas2r_adapter_tasklet, (unsigned long)a); /* * Disable chip interrupts to prevent spurious interrupts * until we claim the IRQ. */ esas2r_disable_chip_interrupts(a); esas2r_check_adapter(a); if (!esas2r_init_adapter_hw(a, true)) { esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!"); } else { esas2r_debug("esas2r_init_adapter ok"); } esas2r_claim_interrupts(a); if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) esas2r_enable_chip_interrupts(a); set_bit(AF2_INIT_DONE, &a->flags2); if (!test_bit(AF_DEGRADED_MODE, &a->flags)) esas2r_kickoff_timer(a); esas2r_debug("esas2r_init_adapter done for %p (%d)", a, a->disable_cnt); return 1; } static void esas2r_adapter_power_down(struct esas2r_adapter *a, int power_management) { struct esas2r_mem_desc *memdesc, *next; if ((test_bit(AF2_INIT_DONE, &a->flags2)) && (!test_bit(AF_DEGRADED_MODE, &a->flags))) { if (!power_management) { del_timer_sync(&a->timer); tasklet_kill(&a->tasklet); } esas2r_power_down(a); /* * There are versions of firmware that do not handle the sync * cache command correctly. Stall here to ensure that the * cache is lazily flushed. */ mdelay(500); esas2r_debug("chip halted"); } /* Remove sysfs binary files */ if (a->sysfs_fw_created) { sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw); a->sysfs_fw_created = 0; } if (a->sysfs_fs_created) { sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs); a->sysfs_fs_created = 0; } if (a->sysfs_vda_created) { sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda); a->sysfs_vda_created = 0; } if (a->sysfs_hw_created) { sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw); a->sysfs_hw_created = 0; } if (a->sysfs_live_nvram_created) { sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_live_nvram); a->sysfs_live_nvram_created = 0; } if (a->sysfs_default_nvram_created) { sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_default_nvram); a->sysfs_default_nvram_created = 0; } /* Clean up interrupts */ if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "free_irq(%d) called", a->pcid->irq); free_irq(a->pcid->irq, a); esas2r_debug("IRQ released"); clear_bit(AF2_IRQ_CLAIMED, &a->flags2); } if (test_bit(AF2_MSI_ENABLED, &a->flags2)) { pci_disable_msi(a->pcid); clear_bit(AF2_MSI_ENABLED, &a->flags2); esas2r_debug("MSI disabled"); } if (a->inbound_list_md.virt_addr) esas2r_initmem_free(a, &a->inbound_list_md); if (a->outbound_list_md.virt_addr) esas2r_initmem_free(a, &a->outbound_list_md); list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head, next_desc) { esas2r_initmem_free(a, memdesc); } /* Following frees everything allocated via alloc_vda_req */ list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) { esas2r_initmem_free(a, memdesc); list_del(&memdesc->next_desc); kfree(memdesc); } kfree(a->first_ae_req); a->first_ae_req = NULL; kfree(a->sg_list_mds); a->sg_list_mds = NULL; kfree(a->req_table); a->req_table = NULL; if (a->regs) { esas2r_unmap_regions(a); a->regs = NULL; a->data_window = NULL; esas2r_debug("regions unmapped"); } } /* Release/free allocated resources for specified adapters. */ void esas2r_kill_adapter(int i) { struct esas2r_adapter *a = esas2r_adapters[i]; if (a) { unsigned long flags; struct workqueue_struct *wq; esas2r_debug("killing adapter %p [%d] ", a, i); esas2r_fw_event_off(a); esas2r_adapter_power_down(a, 0); if (esas2r_buffered_ioctl && (a->pcid == esas2r_buffered_ioctl_pcid)) { dma_free_coherent(&a->pcid->dev, (size_t)esas2r_buffered_ioctl_size, esas2r_buffered_ioctl, esas2r_buffered_ioctl_addr); esas2r_buffered_ioctl = NULL; } if (a->vda_buffer) { dma_free_coherent(&a->pcid->dev, (size_t)VDA_MAX_BUFFER_SIZE, a->vda_buffer, (dma_addr_t)a->ppvda_buffer); a->vda_buffer = NULL; } if (a->fs_api_buffer) { dma_free_coherent(&a->pcid->dev, (size_t)a->fs_api_buffer_size, a->fs_api_buffer, (dma_addr_t)a->ppfs_api_buffer); a->fs_api_buffer = NULL; } kfree(a->local_atto_ioctl); a->local_atto_ioctl = NULL; spin_lock_irqsave(&a->fw_event_lock, flags); wq = a->fw_event_q; a->fw_event_q = NULL; spin_unlock_irqrestore(&a->fw_event_lock, flags); if (wq) destroy_workqueue(wq); if (a->uncached) { dma_free_coherent(&a->pcid->dev, (size_t)a->uncached_size, a->uncached, (dma_addr_t)a->uncached_phys); a->uncached = NULL; esas2r_debug("uncached area freed"); } esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "pci_disable_device() called. msix_enabled: %d " "msi_enabled: %d irq: %d pin: %d", a->pcid->msix_enabled, a->pcid->msi_enabled, a->pcid->irq, a->pcid->pin); esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "before pci_disable_device() enable_cnt: %d", a->pcid->enable_cnt.counter); pci_disable_device(a->pcid); esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "after pci_disable_device() enable_cnt: %d", a->pcid->enable_cnt.counter); esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "pci_set_drv_data(%p, NULL) called", a->pcid); pci_set_drvdata(a->pcid, NULL); esas2r_adapters[i] = NULL; if (test_bit(AF2_INIT_DONE, &a->flags2)) { clear_bit(AF2_INIT_DONE, &a->flags2); set_bit(AF_DEGRADED_MODE, &a->flags); esas2r_log_dev(ESAS2R_LOG_INFO, &(a->host->shost_gendev), "scsi_remove_host() called"); scsi_remove_host(a->host); esas2r_log_dev(ESAS2R_LOG_INFO, &(a->host->shost_gendev), "scsi_host_put() called"); scsi_host_put(a->host); } } } static int __maybe_unused esas2r_suspend(struct device *dev) { struct Scsi_Host *host = dev_get_drvdata(dev); struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; esas2r_log_dev(ESAS2R_LOG_INFO, dev, "suspending adapter()"); if (!a) return -ENODEV; esas2r_adapter_power_down(a, 1); esas2r_log_dev(ESAS2R_LOG_INFO, dev, "esas2r_suspend(): 0"); return 0; } static int __maybe_unused esas2r_resume(struct device *dev) { struct Scsi_Host *host = dev_get_drvdata(dev); struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; int rez = 0; esas2r_log_dev(ESAS2R_LOG_INFO, dev, "resuming adapter()"); if (!a) { rez = -ENODEV; goto error_exit; } if (esas2r_map_regions(a) != 0) { esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!"); rez = -ENOMEM; goto error_exit; } /* Set up interupt mode */ esas2r_setup_interrupts(a, a->intr_mode); /* * Disable chip interrupts to prevent spurious interrupts until we * claim the IRQ. */ esas2r_disable_chip_interrupts(a); if (!esas2r_power_up(a, true)) { esas2r_debug("yikes, esas2r_power_up failed"); rez = -ENOMEM; goto error_exit; } esas2r_claim_interrupts(a); if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { /* * Now that system interrupt(s) are claimed, we can enable * chip interrupts. */ esas2r_enable_chip_interrupts(a); esas2r_kickoff_timer(a); } else { esas2r_debug("yikes, unable to claim IRQ"); esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!"); rez = -ENOMEM; goto error_exit; } error_exit: esas2r_log_dev(ESAS2R_LOG_CRIT, dev, "esas2r_resume(): %d", rez); return rez; } SIMPLE_DEV_PM_OPS(esas2r_pm_ops, esas2r_suspend, esas2r_resume); bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str) { set_bit(AF_DEGRADED_MODE, &a->flags); esas2r_log(ESAS2R_LOG_CRIT, "setting adapter to degraded mode: %s\n", error_str); return false; } u32 esas2r_get_uncached_size(struct esas2r_adapter *a) { return sizeof(struct esas2r_sas_nvram) + ALIGN(ESAS2R_DISC_BUF_LEN, 8) + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */ + 8 + (num_sg_lists * (u16)sgl_page_size) + ALIGN((num_requests + num_ae_requests + 1 + ESAS2R_LIST_EXTRA) * sizeof(struct esas2r_inbound_list_source_entry), 8) + ALIGN((num_requests + num_ae_requests + 1 + ESAS2R_LIST_EXTRA) * sizeof(struct atto_vda_ob_rsp), 8) + 256; /* VDA request and buffer align */ } static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) { if (pci_is_pcie(a->pcid)) { u16 devcontrol; pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol); if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > PCI_EXP_DEVCTL_READRQ_512B) { esas2r_log(ESAS2R_LOG_INFO, "max read request size > 512B"); devcontrol &= ~PCI_EXP_DEVCTL_READRQ; devcontrol |= PCI_EXP_DEVCTL_READRQ_512B; pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL, devcontrol); } } } /* * Determine the organization of the uncached data area and * finish initializing the adapter structure */ bool esas2r_init_adapter_struct(struct esas2r_adapter *a, void **uncached_area) { u32 i; u8 *high; struct esas2r_inbound_list_source_entry *element; struct esas2r_request *rq; struct esas2r_mem_desc *sgl; spin_lock_init(&a->sg_list_lock); spin_lock_init(&a->mem_lock); spin_lock_init(&a->queue_lock); a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS]; if (!alloc_vda_req(a, &a->general_req)) { esas2r_hdebug( "failed to allocate a VDA request for the general req!"); return false; } /* allocate requests for asynchronous events */ a->first_ae_req = kcalloc(num_ae_requests, sizeof(struct esas2r_request), GFP_KERNEL); if (a->first_ae_req == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate memory for asynchronous events"); return false; } /* allocate the S/G list memory descriptors */ a->sg_list_mds = kcalloc(num_sg_lists, sizeof(struct esas2r_mem_desc), GFP_KERNEL); if (a->sg_list_mds == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate memory for s/g list descriptors"); return false; } /* allocate the request table */ a->req_table = kcalloc(num_requests + num_ae_requests + 1, sizeof(struct esas2r_request *), GFP_KERNEL); if (a->req_table == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "failed to allocate memory for the request table"); return false; } /* initialize PCI configuration space */ esas2r_init_pci_cfg_space(a); /* * the thunder_stream boards all have a serial flash part that has a * different base address on the AHB bus. */ if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID) && (a->pcid->subsystem_device & ATTO_SSDID_TBT)) a->flags2 |= AF2_THUNDERBOLT; if (test_bit(AF2_THUNDERBOLT, &a->flags2)) a->flags2 |= AF2_SERIAL_FLASH; if (a->pcid->subsystem_device == ATTO_TLSH_1068) a->flags2 |= AF2_THUNDERLINK; /* Uncached Area */ high = (u8 *)*uncached_area; /* initialize the scatter/gather table pages */ for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) { sgl->size = sgl_page_size; list_add_tail(&sgl->next_desc, &a->free_sg_list_head); if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) { /* Allow the driver to load if the minimum count met. */ if (i < NUM_SGL_MIN) return false; break; } } /* compute the size of the lists */ a->list_size = num_requests + ESAS2R_LIST_EXTRA; /* allocate the inbound list */ a->inbound_list_md.size = a->list_size * sizeof(struct esas2r_inbound_list_source_entry); if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) { esas2r_hdebug("failed to allocate IB list"); return false; } /* allocate the outbound list */ a->outbound_list_md.size = a->list_size * sizeof(struct atto_vda_ob_rsp); if (!esas2r_initmem_alloc(a, &a->outbound_list_md, ESAS2R_LIST_ALIGN)) { esas2r_hdebug("failed to allocate IB list"); return false; } /* allocate the NVRAM structure */ a->nvram = (struct esas2r_sas_nvram *)high; high += sizeof(struct esas2r_sas_nvram); /* allocate the discovery buffer */ a->disc_buffer = high; high += ESAS2R_DISC_BUF_LEN; high = PTR_ALIGN(high, 8); /* allocate the outbound list copy pointer */ a->outbound_copy = (u32 volatile *)high; high += sizeof(u32); if (!test_bit(AF_NVR_VALID, &a->flags)) esas2r_nvram_set_defaults(a); /* update the caller's uncached memory area pointer */ *uncached_area = (void *)high; /* initialize the allocated memory */ if (test_bit(AF_FIRST_INIT, &a->flags)) { esas2r_targ_db_initialize(a); /* prime parts of the inbound list */ element = (struct esas2r_inbound_list_source_entry *)a-> inbound_list_md. virt_addr; for (i = 0; i < a->list_size; i++) { element->address = 0; element->reserved = 0; element->length = cpu_to_le32(HWILSE_INTERFACE_F0 | (sizeof(union atto_vda_req) / sizeof(u32))); element++; } /* init the AE requests */ for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++, i++) { INIT_LIST_HEAD(&rq->req_list); if (!alloc_vda_req(a, rq)) { esas2r_hdebug( "failed to allocate a VDA request!"); return false; } esas2r_rq_init_request(rq, a); /* override the completion function */ rq->comp_cb = esas2r_ae_complete; } } return true; } /* This code will verify that the chip is operational. */ bool esas2r_check_adapter(struct esas2r_adapter *a) { u32 starttime; u32 doorbell; u64 ppaddr; u32 dw; /* * if the chip reset detected flag is set, we can bypass a bunch of * stuff. */ if (test_bit(AF_CHPRST_DETECTED, &a->flags)) goto skip_chip_reset; /* * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver * may have left them enabled or we may be recovering from a fault. */ esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK); esas2r_flush_register_dword(a, MU_INT_MASK_OUT); /* * wait for the firmware to become ready by forcing an interrupt and * waiting for a response. */ starttime = jiffies_to_msecs(jiffies); while (true) { esas2r_force_interrupt(a); doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell == 0xFFFFFFFF) { /* * Give the firmware up to two seconds to enable * register access after a reset. */ if ((jiffies_to_msecs(jiffies) - starttime) > 2000) return esas2r_set_degraded_mode(a, "unable to access registers"); } else if (doorbell & DRBL_FORCE_INT) { u32 ver = (doorbell & DRBL_FW_VER_MSK); /* * This driver supports version 0 and version 1 of * the API */ esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); if (ver == DRBL_FW_VER_0) { set_bit(AF_LEGACY_SGE_MODE, &a->flags); a->max_vdareq_size = 128; a->build_sgl = esas2r_build_sg_list_sge; } else if (ver == DRBL_FW_VER_1) { clear_bit(AF_LEGACY_SGE_MODE, &a->flags); a->max_vdareq_size = 1024; a->build_sgl = esas2r_build_sg_list_prd; } else { return esas2r_set_degraded_mode(a, "unknown firmware version"); } break; } schedule_timeout_interruptible(msecs_to_jiffies(100)); if ((jiffies_to_msecs(jiffies) - starttime) > 180000) { esas2r_hdebug("FW ready TMO"); esas2r_bugon(); return esas2r_set_degraded_mode(a, "firmware start has timed out"); } } /* purge any asynchronous events since we will repost them later */ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN); starttime = jiffies_to_msecs(jiffies); while (true) { doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell & DRBL_MSG_IFC_DOWN) { esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); break; } schedule_timeout_interruptible(msecs_to_jiffies(50)); if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { esas2r_hdebug("timeout waiting for interface down"); break; } } skip_chip_reset: /* * first things first, before we go changing any of these registers * disable the communication lists. */ dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); dw &= ~MU_ILC_ENABLE; esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); dw &= ~MU_OLC_ENABLE; esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); /* configure the communication list addresses */ ppaddr = a->inbound_list_md.phys_addr; esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO, lower_32_bits(ppaddr)); esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI, upper_32_bits(ppaddr)); ppaddr = a->outbound_list_md.phys_addr; esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO, lower_32_bits(ppaddr)); esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI, upper_32_bits(ppaddr)); ppaddr = a->uncached_phys + ((u8 *)a->outbound_copy - a->uncached); esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO, lower_32_bits(ppaddr)); esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI, upper_32_bits(ppaddr)); /* reset the read and write pointers */ *a->outbound_copy = a->last_write = a->last_read = a->list_size - 1; set_bit(AF_COMM_LIST_TOGGLE, &a->flags); esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE | a->last_write); esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE | a->last_write); esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE | a->last_write); esas2r_write_register_dword(a, MU_OUT_LIST_WRITE, MU_OLW_TOGGLE | a->last_write); /* configure the interface select fields */ dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG); dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST); esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG, (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR)); dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG); dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE); esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG, (dw | MU_OLIC_LIST_F0 | MU_OLIC_SOURCE_DDR)); /* finish configuring the communication lists */ dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK); dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC | (a->list_size << MU_ILC_NUMBER_SHIFT); esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK); dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT); esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); /* * notify the firmware that we're done setting up the communication * list registers. wait here until the firmware is done configuring * its lists. it will signal that it is done by enabling the lists. */ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT); starttime = jiffies_to_msecs(jiffies); while (true) { doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell & DRBL_MSG_IFC_INIT) { esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); break; } schedule_timeout_interruptible(msecs_to_jiffies(100)); if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { esas2r_hdebug( "timeout waiting for communication list init"); esas2r_bugon(); return esas2r_set_degraded_mode(a, "timeout waiting for communication list init"); } } /* * flag whether the firmware supports the power down doorbell. we * determine this by reading the inbound doorbell enable mask. */ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB); if (doorbell & DRBL_POWER_DOWN) set_bit(AF2_VDA_POWER_DOWN, &a->flags2); else clear_bit(AF2_VDA_POWER_DOWN, &a->flags2); /* * enable assertion of outbound queue and doorbell interrupts in the * main interrupt cause register. */ esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK); esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK); return true; } /* Process the initialization message just completed and format the next one. */ static bool esas2r_format_init_msg(struct esas2r_adapter *a, struct esas2r_request *rq) { u32 msg = a->init_msg; struct atto_vda_cfg_init *ci; a->init_msg = 0; switch (msg) { case ESAS2R_INIT_MSG_START: case ESAS2R_INIT_MSG_REINIT: { esas2r_hdebug("CFG init"); esas2r_build_cfg_req(a, rq, VDA_CFG_INIT, 0, NULL); ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; ci->sgl_page_size = cpu_to_le32(sgl_page_size); /* firmware interface overflows in y2106 */ ci->epoch_time = cpu_to_le32(ktime_get_real_seconds()); rq->flags |= RF_FAILURE_OK; a->init_msg = ESAS2R_INIT_MSG_INIT; break; } case ESAS2R_INIT_MSG_INIT: if (rq->req_stat == RS_SUCCESS) { u32 major; u32 minor; u16 fw_release; a->fw_version = le16_to_cpu( rq->func_rsp.cfg_rsp.vda_version); a->fw_build = rq->func_rsp.cfg_rsp.fw_build; fw_release = le16_to_cpu( rq->func_rsp.cfg_rsp.fw_release); major = LOBYTE(fw_release); minor = HIBYTE(fw_release); a->fw_version += (major << 16) + (minor << 24); } else { esas2r_hdebug("FAILED"); } /* * the 2.71 and earlier releases of R6xx firmware did not error * unsupported config requests correctly. */ if ((test_bit(AF2_THUNDERBOLT, &a->flags2)) || (be32_to_cpu(a->fw_version) > 0x00524702)) { esas2r_hdebug("CFG get init"); esas2r_build_cfg_req(a, rq, VDA_CFG_GET_INIT2, sizeof(struct atto_vda_cfg_init), NULL); rq->vrq->cfg.sg_list_offset = offsetof( struct atto_vda_cfg_req, data.sge); rq->vrq->cfg.data.prde.ctl_len = cpu_to_le32(sizeof(struct atto_vda_cfg_init)); rq->vrq->cfg.data.prde.address = cpu_to_le64( rq->vrq_md->phys_addr + sizeof(union atto_vda_req)); rq->flags |= RF_FAILURE_OK; a->init_msg = ESAS2R_INIT_MSG_GET_INIT; break; } fallthrough; case ESAS2R_INIT_MSG_GET_INIT: if (msg == ESAS2R_INIT_MSG_GET_INIT) { ci = (struct atto_vda_cfg_init *)rq->data_buf; if (rq->req_stat == RS_SUCCESS) { a->num_targets_backend = le32_to_cpu(ci->num_targets_backend); a->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel); } else { esas2r_hdebug("FAILED"); } } fallthrough; default: rq->req_stat = RS_SUCCESS; return false; } return true; } /* * Perform initialization messages via the request queue. Messages are * performed with interrupts disabled. */ bool esas2r_init_msgs(struct esas2r_adapter *a) { bool success = true; struct esas2r_request *rq = &a->general_req; esas2r_rq_init_request(rq, a); rq->comp_cb = esas2r_dummy_complete; if (a->init_msg == 0) a->init_msg = ESAS2R_INIT_MSG_REINIT; while (a->init_msg) { if (esas2r_format_init_msg(a, rq)) { unsigned long flags; while (true) { spin_lock_irqsave(&a->queue_lock, flags); esas2r_start_vda_request(a, rq); spin_unlock_irqrestore(&a->queue_lock, flags); esas2r_wait_request(a, rq); if (rq->req_stat != RS_PENDING) break; } } if (rq->req_stat == RS_SUCCESS || ((rq->flags & RF_FAILURE_OK) && rq->req_stat != RS_TIMEOUT)) continue; esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)", a->init_msg, rq->req_stat, rq->flags); a->init_msg = ESAS2R_INIT_MSG_START; success = false; break; } esas2r_rq_destroy_request(rq, a); return success; } /* Initialize the adapter chip */ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) { bool rslt = false; struct esas2r_request *rq; u32 i; if (test_bit(AF_DEGRADED_MODE, &a->flags)) goto exit; if (!test_bit(AF_NVR_VALID, &a->flags)) { if (!esas2r_nvram_read_direct(a)) esas2r_log(ESAS2R_LOG_WARN, "invalid/missing NVRAM parameters"); } if (!esas2r_init_msgs(a)) { esas2r_set_degraded_mode(a, "init messages failed"); goto exit; } /* The firmware is ready. */ clear_bit(AF_DEGRADED_MODE, &a->flags); clear_bit(AF_CHPRST_PENDING, &a->flags); /* Post all the async event requests */ for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++) esas2r_start_ae_request(a, rq); if (!a->flash_rev[0]) esas2r_read_flash_rev(a); if (!a->image_type[0]) esas2r_read_image_type(a); if (a->fw_version == 0) a->fw_rev[0] = 0; else sprintf(a->fw_rev, "%1d.%02d", (int)LOBYTE(HIWORD(a->fw_version)), (int)HIBYTE(HIWORD(a->fw_version))); esas2r_hdebug("firmware revision: %s", a->fw_rev); if (test_bit(AF_CHPRST_DETECTED, &a->flags) && (test_bit(AF_FIRST_INIT, &a->flags))) { esas2r_enable_chip_interrupts(a); return true; } /* initialize discovery */ esas2r_disc_initialize(a); /* * wait for the device wait time to expire here if requested. this is * usually requested during initial driver load and possibly when * resuming from a low power state. deferred device waiting will use * interrupts. chip reset recovery always defers device waiting to * avoid being in a TASKLET too long. */ if (init_poll) { u32 currtime = a->disc_start_time; u32 nexttick = 100; u32 deltatime; /* * Block Tasklets from getting scheduled and indicate this is * polled discovery. */ set_bit(AF_TASKLET_SCHEDULED, &a->flags); set_bit(AF_DISC_POLLED, &a->flags); /* * Temporarily bring the disable count to zero to enable * deferred processing. Note that the count is already zero * after the first initialization. */ if (test_bit(AF_FIRST_INIT, &a->flags)) atomic_dec(&a->disable_cnt); while (test_bit(AF_DISC_PENDING, &a->flags)) { schedule_timeout_interruptible(msecs_to_jiffies(100)); /* * Determine the need for a timer tick based on the * delta time between this and the last iteration of * this loop. We don't use the absolute time because * then we would have to worry about when nexttick * wraps and currtime hasn't yet. */ deltatime = jiffies_to_msecs(jiffies) - currtime; currtime += deltatime; /* * Process any waiting discovery as long as the chip is * up. If a chip reset happens during initial polling, * we have to make sure the timer tick processes the * doorbell indicating the firmware is ready. */ if (!test_bit(AF_CHPRST_PENDING, &a->flags)) esas2r_disc_check_for_work(a); /* Simulate a timer tick. */ if (nexttick <= deltatime) { /* Time for a timer tick */ nexttick += 100; esas2r_timer_tick(a); } if (nexttick > deltatime) nexttick -= deltatime; /* Do any deferred processing */ if (esas2r_is_tasklet_pending(a)) esas2r_do_tasklet_tasks(a); } if (test_bit(AF_FIRST_INIT, &a->flags)) atomic_inc(&a->disable_cnt); clear_bit(AF_DISC_POLLED, &a->flags); clear_bit(AF_TASKLET_SCHEDULED, &a->flags); } esas2r_targ_db_report_changes(a); /* * For cases where (a) the initialization messages processing may * handle an interrupt for a port event and a discovery is waiting, but * we are not waiting for devices, or (b) the device wait time has been * exhausted but there is still discovery pending, start any leftover * discovery in interrupt driven mode. */ esas2r_disc_start_waiting(a); /* Enable chip interrupts */ a->int_mask = ESAS2R_INT_STS_MASK; esas2r_enable_chip_interrupts(a); esas2r_enable_heartbeat(a); rslt = true; exit: /* * Regardless of whether initialization was successful, certain things * need to get done before we exit. */ if (test_bit(AF_CHPRST_DETECTED, &a->flags) && test_bit(AF_FIRST_INIT, &a->flags)) { /* * Reinitialization was performed during the first * initialization. Only clear the chip reset flag so the * original device polling is not cancelled. */ if (!rslt) clear_bit(AF_CHPRST_PENDING, &a->flags); } else { /* First initialization or a subsequent re-init is complete. */ if (!rslt) { clear_bit(AF_CHPRST_PENDING, &a->flags); clear_bit(AF_DISC_PENDING, &a->flags); } /* Enable deferred processing after the first initialization. */ if (test_bit(AF_FIRST_INIT, &a->flags)) { clear_bit(AF_FIRST_INIT, &a->flags); if (atomic_dec_return(&a->disable_cnt) == 0) esas2r_do_deferred_processes(a); } } return rslt; } void esas2r_reset_adapter(struct esas2r_adapter *a) { set_bit(AF_OS_RESET, &a->flags); esas2r_local_reset_adapter(a); esas2r_schedule_tasklet(a); } void esas2r_reset_chip(struct esas2r_adapter *a) { if (!esas2r_is_adapter_present(a)) return; /* * Before we reset the chip, save off the VDA core dump. The VDA core * dump is located in the upper 512KB of the onchip SRAM. Make sure * to not overwrite a previous crash that was saved. */ if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) && !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) { esas2r_read_mem_block(a, a->fw_coredump_buff, MW_DATA_ADDR_SRAM + 0x80000, ESAS2R_FWCOREDUMP_SZ); set_bit(AF2_COREDUMP_SAVED, &a->flags2); } clear_bit(AF2_COREDUMP_AVAIL, &a->flags2); /* Reset the chip */ if (a->pcid->revision == MVR_FREY_B2) esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2, MU_CTL_IN_FULL_RST2); else esas2r_write_register_dword(a, MU_CTL_STATUS_IN, MU_CTL_IN_FULL_RST); /* Stall a little while to let the reset condition clear */ mdelay(10); } static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a) { u32 starttime; u32 doorbell; esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN); starttime = jiffies_to_msecs(jiffies); while (true) { doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell & DRBL_POWER_DOWN) { esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); break; } schedule_timeout_interruptible(msecs_to_jiffies(100)); if ((jiffies_to_msecs(jiffies) - starttime) > 30000) { esas2r_hdebug("Timeout waiting for power down"); break; } } } /* * Perform power management processing including managing device states, adapter * states, interrupts, and I/O. */ void esas2r_power_down(struct esas2r_adapter *a) { set_bit(AF_POWER_MGT, &a->flags); set_bit(AF_POWER_DOWN, &a->flags); if (!test_bit(AF_DEGRADED_MODE, &a->flags)) { u32 starttime; u32 doorbell; /* * We are currently running OK and will be reinitializing later. * increment the disable count to coordinate with * esas2r_init_adapter. We don't have to do this in degraded * mode since we never enabled interrupts in the first place. */ esas2r_disable_chip_interrupts(a); esas2r_disable_heartbeat(a); /* wait for any VDA activity to clear before continuing */ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN); starttime = jiffies_to_msecs(jiffies); while (true) { doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell & DRBL_MSG_IFC_DOWN) { esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); break; } schedule_timeout_interruptible(msecs_to_jiffies(100)); if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { esas2r_hdebug( "timeout waiting for interface down"); break; } } /* * For versions of firmware that support it tell them the driver * is powering down. */ if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2)) esas2r_power_down_notify_firmware(a); } /* Suspend I/O processing. */ set_bit(AF_OS_RESET, &a->flags); set_bit(AF_DISC_PENDING, &a->flags); set_bit(AF_CHPRST_PENDING, &a->flags); esas2r_process_adapter_reset(a); /* Remove devices now that I/O is cleaned up. */ a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a); esas2r_targ_db_remove_all(a, false); } /* * Perform power management processing including managing device states, adapter * states, interrupts, and I/O. */ bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll) { bool ret; clear_bit(AF_POWER_DOWN, &a->flags); esas2r_init_pci_cfg_space(a); set_bit(AF_FIRST_INIT, &a->flags); atomic_inc(&a->disable_cnt); /* reinitialize the adapter */ ret = esas2r_check_adapter(a); if (!esas2r_init_adapter_hw(a, init_poll)) ret = false; /* send the reset asynchronous event */ esas2r_send_reset_ae(a, true); /* clear this flag after initialization. */ clear_bit(AF_POWER_MGT, &a->flags); return ret; } bool esas2r_is_adapter_present(struct esas2r_adapter *a) { if (test_bit(AF_NOT_PRESENT, &a->flags)) return false; if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) { set_bit(AF_NOT_PRESENT, &a->flags); return false; } return true; } const char *esas2r_get_model_name(struct esas2r_adapter *a) { switch (a->pcid->subsystem_device) { case ATTO_ESAS_R680: return "ATTO ExpressSAS R680"; case ATTO_ESAS_R608: return "ATTO ExpressSAS R608"; case ATTO_ESAS_R60F: return "ATTO ExpressSAS R60F"; case ATTO_ESAS_R6F0: return "ATTO ExpressSAS R6F0"; case ATTO_ESAS_R644: return "ATTO ExpressSAS R644"; case ATTO_ESAS_R648: return "ATTO ExpressSAS R648"; case ATTO_TSSC_3808: return "ATTO ThunderStream SC 3808D"; case ATTO_TSSC_3808E: return "ATTO ThunderStream SC 3808E"; case ATTO_TLSH_1068: return "ATTO ThunderLink SH 1068"; } return "ATTO SAS Controller"; } const char *esas2r_get_model_name_short(struct esas2r_adapter *a) { switch (a->pcid->subsystem_device) { case ATTO_ESAS_R680: return "R680"; case ATTO_ESAS_R608: return "R608"; case ATTO_ESAS_R60F: return "R60F"; case ATTO_ESAS_R6F0: return "R6F0"; case ATTO_ESAS_R644: return "R644"; case ATTO_ESAS_R648: return "R648"; case ATTO_TSSC_3808: return "SC 3808D"; case ATTO_TSSC_3808E: return "SC 3808E"; case ATTO_TLSH_1068: return "SH 1068"; } return "unknown"; }
linux-master
drivers/scsi/esas2r/esas2r_init.c
/* * linux/drivers/scsi/esas2r/esas2r_int.c * esas2r interrupt handling * * Copyright (c) 2001-2013 ATTO Technology, Inc. * (mailto:[email protected]) */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #include "esas2r.h" /* Local function prototypes */ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell); static void esas2r_get_outbound_responses(struct esas2r_adapter *a); static void esas2r_process_bus_reset(struct esas2r_adapter *a); /* * Poll the adapter for interrupts and service them. * This function handles both legacy interrupts and MSI. */ void esas2r_polled_interrupt(struct esas2r_adapter *a) { u32 intstat; u32 doorbell; esas2r_disable_chip_interrupts(a); intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); if (intstat & MU_INTSTAT_POST_OUT) { /* clear the interrupt */ esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, MU_OLIS_INT); esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); esas2r_get_outbound_responses(a); } if (intstat & MU_INTSTAT_DRBL) { doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell != 0) esas2r_doorbell_interrupt(a, doorbell); } esas2r_enable_chip_interrupts(a); if (atomic_read(&a->disable_cnt) == 0) esas2r_do_deferred_processes(a); } /* * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler * schedules a TASKLET to process events, whereas the MSI handler just * processes interrupt events directly. */ irqreturn_t esas2r_interrupt(int irq, void *dev_id) { struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id; if (!esas2r_adapter_interrupt_pending(a)) return IRQ_NONE; set_bit(AF2_INT_PENDING, &a->flags2); esas2r_schedule_tasklet(a); return IRQ_HANDLED; } void esas2r_adapter_interrupt(struct esas2r_adapter *a) { u32 doorbell; if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) { /* clear the interrupt */ esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, MU_OLIS_INT); esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); esas2r_get_outbound_responses(a); } if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) { doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell != 0) esas2r_doorbell_interrupt(a, doorbell); } a->int_mask = ESAS2R_INT_STS_MASK; esas2r_enable_chip_interrupts(a); if (likely(atomic_read(&a->disable_cnt) == 0)) esas2r_do_deferred_processes(a); } irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id) { struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id; u32 intstat; u32 doorbell; intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); if (likely(intstat & MU_INTSTAT_POST_OUT)) { /* clear the interrupt */ esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, MU_OLIS_INT); esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); esas2r_get_outbound_responses(a); } if (unlikely(intstat & MU_INTSTAT_DRBL)) { doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell != 0) esas2r_doorbell_interrupt(a, doorbell); } /* * Work around a chip bug and force a new MSI to be sent if one is * still pending. */ esas2r_disable_chip_interrupts(a); esas2r_enable_chip_interrupts(a); if (likely(atomic_read(&a->disable_cnt) == 0)) esas2r_do_deferred_processes(a); esas2r_do_tasklet_tasks(a); return 1; } static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a, struct esas2r_request *rq, struct atto_vda_ob_rsp *rsp) { /* * For I/O requests, only copy the response if an error * occurred and setup a callback to do error processing. */ if (unlikely(rq->req_stat != RS_SUCCESS)) { memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); if (rq->req_stat == RS_ABORTED) { if (rq->timeout > RQ_MAX_TIMEOUT) rq->req_stat = RS_TIMEOUT; } else if (rq->req_stat == RS_SCSI_ERROR) { u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat; esas2r_trace("scsistatus: %x", scsistatus); /* Any of these are a good result. */ if (scsistatus == SAM_STAT_GOOD || scsistatus == SAM_STAT_CONDITION_MET || scsistatus == SAM_STAT_INTERMEDIATE || scsistatus == SAM_STAT_INTERMEDIATE_CONDITION_MET) { rq->req_stat = RS_SUCCESS; rq->func_rsp.scsi_rsp.scsi_stat = SAM_STAT_GOOD; } } } } static void esas2r_get_outbound_responses(struct esas2r_adapter *a) { struct atto_vda_ob_rsp *rsp; u32 rspput_ptr; u32 rspget_ptr; struct esas2r_request *rq; u32 handle; unsigned long flags; LIST_HEAD(comp_list); esas2r_trace_enter(); spin_lock_irqsave(&a->queue_lock, flags); /* Get the outbound limit and pointers */ rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR; rspget_ptr = a->last_read; esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr); /* If we don't have anything to process, get out */ if (unlikely(rspget_ptr == rspput_ptr)) { spin_unlock_irqrestore(&a->queue_lock, flags); esas2r_trace_exit(); return; } /* Make sure the firmware is healthy */ if (unlikely(rspput_ptr >= a->list_size)) { spin_unlock_irqrestore(&a->queue_lock, flags); esas2r_bugon(); esas2r_local_reset_adapter(a); esas2r_trace_exit(); return; } do { rspget_ptr++; if (rspget_ptr >= a->list_size) rspget_ptr = 0; rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr + rspget_ptr; handle = rsp->handle; /* Verify the handle range */ if (unlikely(LOWORD(handle) == 0 || LOWORD(handle) > num_requests + num_ae_requests + 1)) { esas2r_bugon(); continue; } /* Get the request for this handle */ rq = a->req_table[LOWORD(handle)]; if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) { esas2r_bugon(); continue; } list_del(&rq->req_list); /* Get the completion status */ rq->req_stat = rsp->req_stat; esas2r_trace("handle: %x", handle); esas2r_trace("rq: %p", rq); esas2r_trace("req_status: %x", rq->req_stat); if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { esas2r_handle_outbound_rsp_err(a, rq, rsp); } else { /* * Copy the outbound completion struct for non-I/O * requests. */ memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); } /* Queue the request for completion. */ list_add_tail(&rq->comp_list, &comp_list); } while (rspget_ptr != rspput_ptr); a->last_read = rspget_ptr; spin_unlock_irqrestore(&a->queue_lock, flags); esas2r_comp_list_drain(a, &comp_list); esas2r_trace_exit(); } /* * Perform all deferred processes for the adapter. Deferred * processes can only be done while the current interrupt * disable_cnt for the adapter is zero. */ void esas2r_do_deferred_processes(struct esas2r_adapter *a) { int startreqs = 2; struct esas2r_request *rq; unsigned long flags; /* * startreqs is used to control starting requests * that are on the deferred queue * = 0 - do not start any requests * = 1 - can start discovery requests * = 2 - can start any request */ if (test_bit(AF_CHPRST_PENDING, &a->flags) || test_bit(AF_FLASHING, &a->flags)) startreqs = 0; else if (test_bit(AF_DISC_PENDING, &a->flags)) startreqs = 1; atomic_inc(&a->disable_cnt); /* Clear off the completed list to be processed later. */ if (esas2r_is_tasklet_pending(a)) { esas2r_schedule_tasklet(a); startreqs = 0; } /* * If we can start requests then traverse the defer queue * looking for requests to start or complete */ if (startreqs && !list_empty(&a->defer_list)) { LIST_HEAD(comp_list); struct list_head *element, *next; spin_lock_irqsave(&a->queue_lock, flags); list_for_each_safe(element, next, &a->defer_list) { rq = list_entry(element, struct esas2r_request, req_list); if (rq->req_stat != RS_PENDING) { list_del(element); list_add_tail(&rq->comp_list, &comp_list); } /* * Process discovery and OS requests separately. We * can't hold up discovery requests when discovery is * pending. In general, there may be different sets of * conditions for starting different types of requests. */ else if (rq->req_type == RT_DISC_REQ) { list_del(element); esas2r_disc_local_start_request(a, rq); } else if (startreqs == 2) { list_del(element); esas2r_local_start_request(a, rq); /* * Flashing could have been set by last local * start */ if (test_bit(AF_FLASHING, &a->flags)) break; } } spin_unlock_irqrestore(&a->queue_lock, flags); esas2r_comp_list_drain(a, &comp_list); } atomic_dec(&a->disable_cnt); } /* * Process an adapter reset (or one that is about to happen) * by making sure all outstanding requests are completed that * haven't been already. */ void esas2r_process_adapter_reset(struct esas2r_adapter *a) { struct esas2r_request *rq = &a->general_req; unsigned long flags; struct esas2r_disc_context *dc; LIST_HEAD(comp_list); struct list_head *element; esas2r_trace_enter(); spin_lock_irqsave(&a->queue_lock, flags); /* abort the active discovery, if any. */ if (rq->interrupt_cx) { dc = (struct esas2r_disc_context *)rq->interrupt_cx; dc->disc_evt = 0; clear_bit(AF_DISC_IN_PROG, &a->flags); } /* * just clear the interrupt callback for now. it will be dequeued if * and when we find it on the active queue and we don't want the * callback called. also set the dummy completion callback in case we * were doing an I/O request. */ rq->interrupt_cx = NULL; rq->interrupt_cb = NULL; rq->comp_cb = esas2r_dummy_complete; /* Reset the read and write pointers */ *a->outbound_copy = a->last_write = a->last_read = a->list_size - 1; set_bit(AF_COMM_LIST_TOGGLE, &a->flags); /* Kill all the requests on the active list */ list_for_each(element, &a->defer_list) { rq = list_entry(element, struct esas2r_request, req_list); if (rq->req_stat == RS_STARTED) if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) list_add_tail(&rq->comp_list, &comp_list); } spin_unlock_irqrestore(&a->queue_lock, flags); esas2r_comp_list_drain(a, &comp_list); esas2r_process_bus_reset(a); esas2r_trace_exit(); } static void esas2r_process_bus_reset(struct esas2r_adapter *a) { struct esas2r_request *rq; struct list_head *element; unsigned long flags; LIST_HEAD(comp_list); esas2r_trace_enter(); esas2r_hdebug("reset detected"); spin_lock_irqsave(&a->queue_lock, flags); /* kill all the requests on the deferred queue */ list_for_each(element, &a->defer_list) { rq = list_entry(element, struct esas2r_request, req_list); if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) list_add_tail(&rq->comp_list, &comp_list); } spin_unlock_irqrestore(&a->queue_lock, flags); esas2r_comp_list_drain(a, &comp_list); if (atomic_read(&a->disable_cnt) == 0) esas2r_do_deferred_processes(a); clear_bit(AF_OS_RESET, &a->flags); esas2r_trace_exit(); } static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a) { clear_bit(AF_CHPRST_NEEDED, &a->flags); clear_bit(AF_BUSRST_NEEDED, &a->flags); clear_bit(AF_BUSRST_DETECTED, &a->flags); clear_bit(AF_BUSRST_PENDING, &a->flags); /* * Make sure we don't get attempt more than 3 resets * when the uptime between resets does not exceed one * minute. This will stop any situation where there is * really something wrong with the hardware. The way * this works is that we start with uptime ticks at 0. * Each time we do a reset, we add 20 seconds worth to * the count. Each time a timer tick occurs, as long * as a chip reset is not pending, we decrement the * tick count. If the uptime ticks ever gets to 60 * seconds worth, we disable the adapter from that * point forward. Three strikes, you're out. */ if (!esas2r_is_adapter_present(a) || (a->chip_uptime >= ESAS2R_CHP_UPTIME_MAX)) { esas2r_hdebug("*** adapter disabled ***"); /* * Ok, some kind of hard failure. Make sure we * exit this loop with chip interrupts * permanently disabled so we don't lock up the * entire system. Also flag degraded mode to * prevent the heartbeat from trying to recover. */ set_bit(AF_DEGRADED_MODE, &a->flags); set_bit(AF_DISABLED, &a->flags); clear_bit(AF_CHPRST_PENDING, &a->flags); clear_bit(AF_DISC_PENDING, &a->flags); esas2r_disable_chip_interrupts(a); a->int_mask = 0; esas2r_process_adapter_reset(a); esas2r_log(ESAS2R_LOG_CRIT, "Adapter disabled because of hardware failure"); } else { bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags); if (!alrdyrst) /* * Only disable interrupts if this is * the first reset attempt. */ esas2r_disable_chip_interrupts(a); if ((test_bit(AF_POWER_MGT, &a->flags)) && !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) { /* * Don't reset the chip on the first * deferred power up attempt. */ } else { esas2r_hdebug("*** resetting chip ***"); esas2r_reset_chip(a); } /* Kick off the reinitialization */ a->chip_uptime += ESAS2R_CHP_UPTIME_CNT; a->chip_init_time = jiffies_to_msecs(jiffies); if (!test_bit(AF_POWER_MGT, &a->flags)) { esas2r_process_adapter_reset(a); if (!alrdyrst) { /* Remove devices now that I/O is cleaned up. */ a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a); esas2r_targ_db_remove_all(a, false); } } a->int_mask = 0; } } static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a) { while (test_bit(AF_CHPRST_DETECTED, &a->flags)) { /* * Balance the enable in esas2r_initadapter_hw. * Esas2r_power_down already took care of it for power * management. */ if (!test_bit(AF_DEGRADED_MODE, &a->flags) && !test_bit(AF_POWER_MGT, &a->flags)) esas2r_disable_chip_interrupts(a); /* Reinitialize the chip. */ esas2r_check_adapter(a); esas2r_init_adapter_hw(a, 0); if (test_bit(AF_CHPRST_NEEDED, &a->flags)) break; if (test_bit(AF_POWER_MGT, &a->flags)) { /* Recovery from power management. */ if (test_bit(AF_FIRST_INIT, &a->flags)) { /* Chip reset during normal power up */ esas2r_log(ESAS2R_LOG_CRIT, "The firmware was reset during a normal power-up sequence"); } else { /* Deferred power up complete. */ clear_bit(AF_POWER_MGT, &a->flags); esas2r_send_reset_ae(a, true); } } else { /* Recovery from online chip reset. */ if (test_bit(AF_FIRST_INIT, &a->flags)) { /* Chip reset during driver load */ } else { /* Chip reset after driver load */ esas2r_send_reset_ae(a, false); } esas2r_log(ESAS2R_LOG_CRIT, "Recovering from a chip reset while the chip was online"); } clear_bit(AF_CHPRST_STARTED, &a->flags); esas2r_enable_chip_interrupts(a); /* * Clear this flag last! this indicates that the chip has been * reset already during initialization. */ clear_bit(AF_CHPRST_DETECTED, &a->flags); } } /* Perform deferred tasks when chip interrupts are disabled */ void esas2r_do_tasklet_tasks(struct esas2r_adapter *a) { if (test_bit(AF_CHPRST_NEEDED, &a->flags) || test_bit(AF_CHPRST_DETECTED, &a->flags)) { if (test_bit(AF_CHPRST_NEEDED, &a->flags)) esas2r_chip_rst_needed_during_tasklet(a); esas2r_handle_chip_rst_during_tasklet(a); } if (test_bit(AF_BUSRST_NEEDED, &a->flags)) { esas2r_hdebug("hard resetting bus"); clear_bit(AF_BUSRST_NEEDED, &a->flags); if (test_bit(AF_FLASHING, &a->flags)) set_bit(AF_BUSRST_DETECTED, &a->flags); else esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_RESET_BUS); } if (test_bit(AF_BUSRST_DETECTED, &a->flags)) { esas2r_process_bus_reset(a); esas2r_log_dev(ESAS2R_LOG_WARN, &(a->host->shost_gendev), "scsi_report_bus_reset() called"); scsi_report_bus_reset(a->host, 0); clear_bit(AF_BUSRST_DETECTED, &a->flags); clear_bit(AF_BUSRST_PENDING, &a->flags); esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete"); } if (test_bit(AF_PORT_CHANGE, &a->flags)) { clear_bit(AF_PORT_CHANGE, &a->flags); esas2r_targ_db_report_changes(a); } if (atomic_read(&a->disable_cnt) == 0) esas2r_do_deferred_processes(a); } static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell) { if (!(doorbell & DRBL_FORCE_INT)) { esas2r_trace_enter(); esas2r_trace("doorbell: %x", doorbell); } /* First clear the doorbell bits */ esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); if (doorbell & DRBL_RESET_BUS) set_bit(AF_BUSRST_DETECTED, &a->flags); if (doorbell & DRBL_FORCE_INT) clear_bit(AF_HEARTBEAT, &a->flags); if (doorbell & DRBL_PANIC_REASON_MASK) { esas2r_hdebug("*** Firmware Panic ***"); esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked"); } if (doorbell & DRBL_FW_RESET) { set_bit(AF2_COREDUMP_AVAIL, &a->flags2); esas2r_local_reset_adapter(a); } if (!(doorbell & DRBL_FORCE_INT)) { esas2r_trace_exit(); } } void esas2r_force_interrupt(struct esas2r_adapter *a) { esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT | DRBL_DRV_VER); } static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae, u16 target, u32 length) { struct esas2r_target *t = a->targetdb + target; u32 cplen = length; unsigned long flags; if (cplen > sizeof(t->lu_event)) cplen = sizeof(t->lu_event); esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent); esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate); spin_lock_irqsave(&a->mem_lock, flags); t->new_target_state = TS_INVALID; if (ae->lu.dwevent & VDAAE_LU_LOST) { t->new_target_state = TS_NOT_PRESENT; } else { switch (ae->lu.bystate) { case VDAAE_LU_NOT_PRESENT: case VDAAE_LU_OFFLINE: case VDAAE_LU_DELETED: case VDAAE_LU_FACTORY_DISABLED: t->new_target_state = TS_NOT_PRESENT; break; case VDAAE_LU_ONLINE: case VDAAE_LU_DEGRADED: t->new_target_state = TS_PRESENT; break; } } if (t->new_target_state != TS_INVALID) { memcpy(&t->lu_event, &ae->lu, cplen); esas2r_disc_queue_event(a, DCDE_DEV_CHANGE); } spin_unlock_irqrestore(&a->mem_lock, flags); } void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq) { union atto_vda_ae *ae = (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data; u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length); union atto_vda_ae *last = (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data + length); esas2r_trace_enter(); esas2r_trace("length: %d", length); if (length > sizeof(struct atto_vda_ae_data) || (length & 3) != 0 || length == 0) { esas2r_log(ESAS2R_LOG_WARN, "The AE request response length (%p) is too long: %d", rq, length); esas2r_hdebug("aereq->length (0x%x) too long", length); esas2r_bugon(); last = ae; } while (ae < last) { u16 target; esas2r_trace("ae: %p", ae); esas2r_trace("ae->hdr: %p", &(ae->hdr)); length = ae->hdr.bylength; if (length > (u32)((u8 *)last - (u8 *)ae) || (length & 3) != 0 || length == 0) { esas2r_log(ESAS2R_LOG_CRIT, "the async event length is invalid (%p): %d", ae, length); esas2r_hdebug("ae->hdr.length (0x%x) invalid", length); esas2r_bugon(); break; } esas2r_nuxi_ae_data(ae); esas2r_queue_fw_event(a, fw_event_vda_ae, ae, sizeof(union atto_vda_ae)); switch (ae->hdr.bytype) { case VDAAE_HDR_TYPE_RAID: if (ae->raid.dwflags & (VDAAE_GROUP_STATE | VDAAE_RBLD_STATE | VDAAE_MEMBER_CHG | VDAAE_PART_CHG)) { esas2r_log(ESAS2R_LOG_INFO, "RAID event received - name:%s rebuild_state:%d group_state:%d", ae->raid.acname, ae->raid.byrebuild_state, ae->raid.bygroup_state); } break; case VDAAE_HDR_TYPE_LU: esas2r_log(ESAS2R_LOG_INFO, "LUN event received: event:%d target_id:%d LUN:%d state:%d", ae->lu.dwevent, ae->lu.id.tgtlun.wtarget_id, ae->lu.id.tgtlun.bylun, ae->lu.bystate); target = ae->lu.id.tgtlun.wtarget_id; if (target < ESAS2R_MAX_TARGETS) esas2r_lun_event(a, ae, target, length); break; case VDAAE_HDR_TYPE_DISK: esas2r_log(ESAS2R_LOG_INFO, "Disk event received"); break; default: /* Silently ignore the rest and let the apps deal with * them. */ break; } ae = (union atto_vda_ae *)((u8 *)ae + length); } /* Now requeue it. */ esas2r_start_ae_request(a, rq); esas2r_trace_exit(); } /* Send an asynchronous event for a chip reset or power management. */ void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt) { struct atto_vda_ae_hdr ae; if (pwr_mgt) ae.bytype = VDAAE_HDR_TYPE_PWRMGT; else ae.bytype = VDAAE_HDR_TYPE_RESET; ae.byversion = VDAAE_HDR_VER_0; ae.byflags = 0; ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr); if (pwr_mgt) { esas2r_hdebug("*** sending power management AE ***"); } else { esas2r_hdebug("*** sending reset AE ***"); } esas2r_queue_fw_event(a, fw_event_vda_ae, &ae, sizeof(union atto_vda_ae)); } void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq) {} static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a, struct esas2r_request *rq) { u8 snslen, snslen2; snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len; if (snslen > rq->sense_len) snslen = rq->sense_len; if (snslen) { if (rq->sense_buf) memcpy(rq->sense_buf, rq->data_buf, snslen); else rq->sense_buf = (u8 *)rq->data_buf; /* See about possible sense data */ if (snslen2 > 0x0c) { u8 *s = (u8 *)rq->data_buf; esas2r_trace_enter(); /* Report LUNS data has changed */ if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) { esas2r_trace("rq->target_id: %d", rq->target_id); esas2r_target_state_changed(a, rq->target_id, TS_LUN_CHANGE); } esas2r_trace("add_sense_key=%x", s[0x0c]); esas2r_trace("add_sense_qual=%x", s[0x0d]); esas2r_trace_exit(); } } rq->sense_len = snslen; } void esas2r_complete_request(struct esas2r_adapter *a, struct esas2r_request *rq) { if (rq->vrq->scsi.function == VDA_FUNC_FLASH && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) clear_bit(AF_FLASHING, &a->flags); /* See if we setup a callback to do special processing */ if (rq->interrupt_cb) { (*rq->interrupt_cb)(a, rq); if (rq->req_stat == RS_PENDING) { esas2r_start_request(a, rq); return; } } if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI) && unlikely(rq->req_stat != RS_SUCCESS)) { esas2r_check_req_rsp_sense(a, rq); esas2r_log_request_failure(a, rq); } (*rq->comp_cb)(a, rq); }
linux-master
drivers/scsi/esas2r/esas2r_int.c
/* * linux/drivers/scsi/esas2r/esas2r_io.c * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers * * Copyright (c) 2001-2013 ATTO Technology, Inc. * (mailto:[email protected])mpt3sas/mpt3sas_trigger_diag. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include "esas2r.h" void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_target *t = NULL; struct esas2r_request *startrq = rq; unsigned long flags; if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) || test_bit(AF_POWER_DOWN, &a->flags))) { if (rq->vrq->scsi.function == VDA_FUNC_SCSI) rq->req_stat = RS_SEL2; else rq->req_stat = RS_DEGRADED; } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { t = a->targetdb + rq->target_id; if (unlikely(t >= a->targetdb_end || !(t->flags & TF_USED))) { rq->req_stat = RS_SEL; } else { /* copy in the target ID. */ rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id); /* * Test if we want to report RS_SEL for missing target. * Note that if AF_DISC_PENDING is set than this will * go on the defer queue. */ if (unlikely(t->target_state != TS_PRESENT && !test_bit(AF_DISC_PENDING, &a->flags))) rq->req_stat = RS_SEL; } } if (unlikely(rq->req_stat != RS_PENDING)) { esas2r_complete_request(a, rq); return; } esas2r_trace("rq=%p", rq); esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle); if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { esas2r_trace("rq->target_id=%d", rq->target_id); esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags); } spin_lock_irqsave(&a->queue_lock, flags); if (likely(list_empty(&a->defer_list) && !test_bit(AF_CHPRST_PENDING, &a->flags) && !test_bit(AF_FLASHING, &a->flags) && !test_bit(AF_DISC_PENDING, &a->flags))) esas2r_local_start_request(a, startrq); else list_add_tail(&startrq->req_list, &a->defer_list); spin_unlock_irqrestore(&a->queue_lock, flags); } /* * Starts the specified request. all requests have RS_PENDING set when this * routine is called. The caller is usually esas2r_start_request, but * esas2r_do_deferred_processes will start request that are deferred. * * The caller must ensure that requests can be started. * * esas2r_start_request will defer a request if there are already requests * waiting or there is a chip reset pending. once the reset condition clears, * esas2r_do_deferred_processes will call this function to start the request. * * When a request is started, it is placed on the active list and queued to * the controller. */ void esas2r_local_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) { esas2r_trace_enter(); esas2r_trace("rq=%p", rq); esas2r_trace("rq->vrq:%p", rq->vrq); esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr); if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)) set_bit(AF_FLASHING, &a->flags); list_add_tail(&rq->req_list, &a->active_list); esas2r_start_vda_request(a, rq); esas2r_trace_exit(); return; } void esas2r_start_vda_request(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_inbound_list_source_entry *element; u32 dw; rq->req_stat = RS_STARTED; /* * Calculate the inbound list entry location and the current state of * toggle bit. */ a->last_write++; if (a->last_write >= a->list_size) { a->last_write = 0; /* update the toggle bit */ if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags)) clear_bit(AF_COMM_LIST_TOGGLE, &a->flags); else set_bit(AF_COMM_LIST_TOGGLE, &a->flags); } element = (struct esas2r_inbound_list_source_entry *)a->inbound_list_md. virt_addr + a->last_write; /* Set the VDA request size if it was never modified */ if (rq->vda_req_sz == RQ_SIZE_DEFAULT) rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32)); element->address = cpu_to_le64(rq->vrq_md->phys_addr); element->length = cpu_to_le32(rq->vda_req_sz); /* Update the write pointer */ dw = a->last_write; if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags)) dw |= MU_ILW_TOGGLE; esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle); esas2r_trace("dw:%x", dw); esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz); esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw); } /* * Build the scatter/gather list for an I/O request according to the * specifications placed in the s/g context. The caller must initialize * context prior to the initial call by calling esas2r_sgc_init(). */ bool esas2r_build_sg_list_sge(struct esas2r_adapter *a, struct esas2r_sg_context *sgc) { struct esas2r_request *rq = sgc->first_req; union atto_vda_req *vrq = rq->vrq; while (sgc->length) { u32 rem = 0; u64 addr; u32 len; len = (*sgc->get_phys_addr)(sgc, &addr); if (unlikely(len == 0)) return false; /* if current length is more than what's left, stop there */ if (unlikely(len > sgc->length)) len = sgc->length; another_entry: /* limit to a round number less than the maximum length */ if (len > SGE_LEN_MAX) { /* * Save the remainder of the split. Whenever we limit * an entry we come back around to build entries out * of the leftover. We do this to prevent multiple * calls to the get_phys_addr() function for an SGE * that is too large. */ rem = len - SGE_LEN_MAX; len = SGE_LEN_MAX; } /* See if we need to allocate a new SGL */ if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) { u8 sgelen; struct esas2r_mem_desc *sgl; /* * If no SGls are available, return failure. The * caller can call us later with the current context * to pick up here. */ sgl = esas2r_alloc_sgl(a); if (unlikely(sgl == NULL)) return false; /* Calculate the length of the last SGE filled in */ sgelen = (u8)((u8 *)sgc->sge.a64.curr - (u8 *)sgc->sge.a64.last); /* * Copy the last SGE filled in to the first entry of * the new SGL to make room for the chain entry. */ memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen); /* Figure out the new curr pointer in the new segment */ sgc->sge.a64.curr = (struct atto_vda_sge *)((u8 *)sgl->virt_addr + sgelen); /* Set the limit pointer and build the chain entry */ sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)sgl->virt_addr + sgl_page_size - sizeof(struct atto_vda_sge)); sgc->sge.a64.last->length = cpu_to_le32( SGE_CHAIN | SGE_ADDR_64); sgc->sge.a64.last->address = cpu_to_le64(sgl->phys_addr); /* * Now, if there was a previous chain entry, then * update it to contain the length of this segment * and size of this chain. otherwise this is the * first SGL, so set the chain_offset in the request. */ if (sgc->sge.a64.chain) { sgc->sge.a64.chain->length |= cpu_to_le32( ((u8 *)(sgc->sge.a64. last + 1) - (u8 *)rq->sg_table-> virt_addr) + sizeof(struct atto_vda_sge) * LOBIT(SGE_CHAIN_SZ)); } else { vrq->scsi.chain_offset = (u8) ((u8 *)sgc-> sge.a64.last - (u8 *)vrq); /* * This is the first SGL, so set the * chain_offset and the VDA request size in * the request. */ rq->vda_req_sz = (vrq->scsi.chain_offset + sizeof(struct atto_vda_sge) + 3) / sizeof(u32); } /* * Remember this so when we get a new SGL filled in we * can update the length of this chain entry. */ sgc->sge.a64.chain = sgc->sge.a64.last; /* Now link the new SGL onto the primary request. */ list_add(&sgl->next_desc, &rq->sg_table_head); } /* Update last one filled in */ sgc->sge.a64.last = sgc->sge.a64.curr; /* Build the new SGE and update the S/G context */ sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len); sgc->sge.a64.curr->address = cpu_to_le32(addr); sgc->sge.a64.curr++; sgc->cur_offset += len; sgc->length -= len; /* * Check if we previously split an entry. If so we have to * pick up where we left off. */ if (rem) { addr += len; len = rem; rem = 0; goto another_entry; } } /* Mark the end of the SGL */ sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST); /* * If there was a previous chain entry, update the length to indicate * the length of this last segment. */ if (sgc->sge.a64.chain) { sgc->sge.a64.chain->length |= cpu_to_le32( ((u8 *)(sgc->sge.a64.curr) - (u8 *)rq->sg_table->virt_addr)); } else { u16 reqsize; /* * The entire VDA request was not used so lets * set the size of the VDA request to be DMA'd */ reqsize = ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq) + sizeof(struct atto_vda_sge) + 3) / sizeof(u32); /* * Only update the request size if it is bigger than what is * already there. We can come in here twice for some management * commands. */ if (reqsize > rq->vda_req_sz) rq->vda_req_sz = reqsize; } return true; } /* * Create PRD list for each I-block consumed by the command. This routine * determines how much data is required from each I-block being consumed * by the command. The first and last I-blocks can be partials and all of * the I-blocks in between are for a full I-block of data. * * The interleave size is used to determine the number of bytes in the 1st * I-block and the remaining I-blocks are what remeains. */ static bool esas2r_build_prd_iblk(struct esas2r_adapter *a, struct esas2r_sg_context *sgc) { struct esas2r_request *rq = sgc->first_req; u64 addr; u32 len; struct esas2r_mem_desc *sgl; u32 numchain = 1; u32 rem = 0; while (sgc->length) { /* Get the next address/length pair */ len = (*sgc->get_phys_addr)(sgc, &addr); if (unlikely(len == 0)) return false; /* If current length is more than what's left, stop there */ if (unlikely(len > sgc->length)) len = sgc->length; another_entry: /* Limit to a round number less than the maximum length */ if (len > PRD_LEN_MAX) { /* * Save the remainder of the split. whenever we limit * an entry we come back around to build entries out * of the leftover. We do this to prevent multiple * calls to the get_phys_addr() function for an SGE * that is too large. */ rem = len - PRD_LEN_MAX; len = PRD_LEN_MAX; } /* See if we need to allocate a new SGL */ if (sgc->sge.prd.sge_cnt == 0) { if (len == sgc->length) { /* * We only have 1 PRD entry left. * It can be placed where the chain * entry would have gone */ /* Build the simple SGE */ sgc->sge.prd.curr->ctl_len = cpu_to_le32( PRD_DATA | len); sgc->sge.prd.curr->address = cpu_to_le64(addr); /* Adjust length related fields */ sgc->cur_offset += len; sgc->length -= len; /* We use the reserved chain entry for data */ numchain = 0; break; } if (sgc->sge.prd.chain) { /* * Fill # of entries of current SGL in previous * chain the length of this current SGL may not * full. */ sgc->sge.prd.chain->ctl_len |= cpu_to_le32( sgc->sge.prd.sgl_max_cnt); } /* * If no SGls are available, return failure. The * caller can call us later with the current context * to pick up here. */ sgl = esas2r_alloc_sgl(a); if (unlikely(sgl == NULL)) return false; /* * Link the new SGL onto the chain * They are in reverse order */ list_add(&sgl->next_desc, &rq->sg_table_head); /* * An SGL was just filled in and we are starting * a new SGL. Prime the chain of the ending SGL with * info that points to the new SGL. The length gets * filled in when the new SGL is filled or ended */ sgc->sge.prd.chain = sgc->sge.prd.curr; sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN); sgc->sge.prd.chain->address = cpu_to_le64(sgl->phys_addr); /* * Start a new segment. * Take one away and save for chain SGE */ sgc->sge.prd.curr = (struct atto_physical_region_description *)sgl -> virt_addr; sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1; } sgc->sge.prd.sge_cnt--; /* Build the simple SGE */ sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len); sgc->sge.prd.curr->address = cpu_to_le64(addr); /* Used another element. Point to the next one */ sgc->sge.prd.curr++; /* Adjust length related fields */ sgc->cur_offset += len; sgc->length -= len; /* * Check if we previously split an entry. If so we have to * pick up where we left off. */ if (rem) { addr += len; len = rem; rem = 0; goto another_entry; } } if (!list_empty(&rq->sg_table_head)) { if (sgc->sge.prd.chain) { sgc->sge.prd.chain->ctl_len |= cpu_to_le32(sgc->sge.prd.sgl_max_cnt - sgc->sge.prd.sge_cnt - numchain); } } return true; } bool esas2r_build_sg_list_prd(struct esas2r_adapter *a, struct esas2r_sg_context *sgc) { struct esas2r_request *rq = sgc->first_req; u32 len = sgc->length; struct esas2r_target *t = a->targetdb + rq->target_id; u8 is_i_o = 0; u16 reqsize; struct atto_physical_region_description *curr_iblk_chn; u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0]; /* * extract LBA from command so we can determine * the I-Block boundary */ if (rq->vrq->scsi.function == VDA_FUNC_SCSI && t->target_state == TS_PRESENT && !(t->flags & TF_PASS_THRU)) { u32 lbalo = 0; switch (rq->vrq->scsi.cdb[0]) { case READ_16: case WRITE_16: { lbalo = MAKEDWORD(MAKEWORD(cdb[9], cdb[8]), MAKEWORD(cdb[7], cdb[6])); is_i_o = 1; break; } case READ_12: case WRITE_12: case READ_10: case WRITE_10: { lbalo = MAKEDWORD(MAKEWORD(cdb[5], cdb[4]), MAKEWORD(cdb[3], cdb[2])); is_i_o = 1; break; } case READ_6: case WRITE_6: { lbalo = MAKEDWORD(MAKEWORD(cdb[3], cdb[2]), MAKEWORD(cdb[1] & 0x1F, 0)); is_i_o = 1; break; } default: break; } if (is_i_o) { u32 startlba; rq->vrq->scsi.iblk_cnt_prd = 0; /* Determine size of 1st I-block PRD list */ startlba = t->inter_block - (lbalo & (t->inter_block - 1)); sgc->length = startlba * t->block_size; /* Chk if the 1st iblk chain starts at base of Iblock */ if ((lbalo & (t->inter_block - 1)) == 0) rq->flags |= RF_1ST_IBLK_BASE; if (sgc->length > len) sgc->length = len; } else { sgc->length = len; } } else { sgc->length = len; } /* get our starting chain address */ curr_iblk_chn = (struct atto_physical_region_description *)sgc->sge.a64.curr; sgc->sge.prd.sgl_max_cnt = sgl_page_size / sizeof(struct atto_physical_region_description); /* create all of the I-block PRD lists */ while (len) { sgc->sge.prd.sge_cnt = 0; sgc->sge.prd.chain = NULL; sgc->sge.prd.curr = curr_iblk_chn; /* increment to next I-Block */ len -= sgc->length; /* go build the next I-Block PRD list */ if (unlikely(!esas2r_build_prd_iblk(a, sgc))) return false; curr_iblk_chn++; if (is_i_o) { rq->vrq->scsi.iblk_cnt_prd++; if (len > t->inter_byte) sgc->length = t->inter_byte; else sgc->length = len; } } /* figure out the size used of the VDA request */ reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq)) / sizeof(u32); /* * only update the request size if it is bigger than what is * already there. we can come in here twice for some management * commands. */ if (reqsize > rq->vda_req_sz) rq->vda_req_sz = reqsize; return true; } static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime) { u32 delta = currtime - a->chip_init_time; if (delta <= ESAS2R_CHPRST_WAIT_TIME) { /* Wait before accessing registers */ } else if (delta >= ESAS2R_CHPRST_TIME) { /* * The last reset failed so try again. Reset * processing will give up after three tries. */ esas2r_local_reset_adapter(a); } else { /* We can now see if the firmware is ready */ u32 doorbell; doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) { esas2r_force_interrupt(a); } else { u32 ver = (doorbell & DRBL_FW_VER_MSK); /* Driver supports API version 0 and 1 */ esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); if (ver == DRBL_FW_VER_0) { set_bit(AF_CHPRST_DETECTED, &a->flags); set_bit(AF_LEGACY_SGE_MODE, &a->flags); a->max_vdareq_size = 128; a->build_sgl = esas2r_build_sg_list_sge; } else if (ver == DRBL_FW_VER_1) { set_bit(AF_CHPRST_DETECTED, &a->flags); clear_bit(AF_LEGACY_SGE_MODE, &a->flags); a->max_vdareq_size = 1024; a->build_sgl = esas2r_build_sg_list_prd; } else { esas2r_local_reset_adapter(a); } } } } /* This function must be called once per timer tick */ void esas2r_timer_tick(struct esas2r_adapter *a) { u32 currtime = jiffies_to_msecs(jiffies); u32 deltatime = currtime - a->last_tick_time; a->last_tick_time = currtime; /* count down the uptime */ if (a->chip_uptime && !test_bit(AF_CHPRST_PENDING, &a->flags) && !test_bit(AF_DISC_PENDING, &a->flags)) { if (deltatime >= a->chip_uptime) a->chip_uptime = 0; else a->chip_uptime -= deltatime; } if (test_bit(AF_CHPRST_PENDING, &a->flags)) { if (!test_bit(AF_CHPRST_NEEDED, &a->flags) && !test_bit(AF_CHPRST_DETECTED, &a->flags)) esas2r_handle_pending_reset(a, currtime); } else { if (test_bit(AF_DISC_PENDING, &a->flags)) esas2r_disc_check_complete(a); if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) { if (test_bit(AF_HEARTBEAT, &a->flags)) { if ((currtime - a->heartbeat_time) >= ESAS2R_HEARTBEAT_TIME) { clear_bit(AF_HEARTBEAT, &a->flags); esas2r_hdebug("heartbeat failed"); esas2r_log(ESAS2R_LOG_CRIT, "heartbeat failed"); esas2r_bugon(); esas2r_local_reset_adapter(a); } } else { set_bit(AF_HEARTBEAT, &a->flags); a->heartbeat_time = currtime; esas2r_force_interrupt(a); } } } if (atomic_read(&a->disable_cnt) == 0) esas2r_do_deferred_processes(a); } /* * Send the specified task management function to the target and LUN * specified in rqaux. in addition, immediately abort any commands that * are queued but not sent to the device according to the rules specified * by the task management function. */ bool esas2r_send_task_mgmt(struct esas2r_adapter *a, struct esas2r_request *rqaux, u8 task_mgt_func) { u16 targetid = rqaux->target_id; u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags); bool ret = false; struct esas2r_request *rq; struct list_head *next, *element; unsigned long flags; LIST_HEAD(comp_list); esas2r_trace_enter(); esas2r_trace("rqaux:%p", rqaux); esas2r_trace("task_mgt_func:%x", task_mgt_func); spin_lock_irqsave(&a->queue_lock, flags); /* search the defer queue looking for requests for the device */ list_for_each_safe(element, next, &a->defer_list) { rq = list_entry(element, struct esas2r_request, req_list); if (rq->vrq->scsi.function == VDA_FUNC_SCSI && rq->target_id == targetid && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun || task_mgt_func == 0x20)) { /* target reset */ /* Found a request affected by the task management */ if (rq->req_stat == RS_PENDING) { /* * The request is pending or waiting. We can * safelycomplete the request now. */ if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) list_add_tail(&rq->comp_list, &comp_list); } } } /* Send the task management request to the firmware */ rqaux->sense_len = 0; rqaux->vrq->scsi.length = 0; rqaux->target_id = targetid; rqaux->vrq->scsi.flags |= cpu_to_le32(lun); memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb)); rqaux->vrq->scsi.flags |= cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK)); if (test_bit(AF_FLASHING, &a->flags)) { /* Assume success. if there are active requests, return busy */ rqaux->req_stat = RS_SUCCESS; list_for_each_safe(element, next, &a->active_list) { rq = list_entry(element, struct esas2r_request, req_list); if (rq->vrq->scsi.function == VDA_FUNC_SCSI && rq->target_id == targetid && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun || task_mgt_func == 0x20)) /* target reset */ rqaux->req_stat = RS_BUSY; } ret = true; } spin_unlock_irqrestore(&a->queue_lock, flags); if (!test_bit(AF_FLASHING, &a->flags)) esas2r_start_request(a, rqaux); esas2r_comp_list_drain(a, &comp_list); if (atomic_read(&a->disable_cnt) == 0) esas2r_do_deferred_processes(a); esas2r_trace_exit(); return ret; } void esas2r_reset_bus(struct esas2r_adapter *a) { esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset"); if (!test_bit(AF_DEGRADED_MODE, &a->flags) && !test_bit(AF_CHPRST_PENDING, &a->flags) && !test_bit(AF_DISC_PENDING, &a->flags)) { set_bit(AF_BUSRST_NEEDED, &a->flags); set_bit(AF_BUSRST_PENDING, &a->flags); set_bit(AF_OS_RESET, &a->flags); esas2r_schedule_tasklet(a); } } bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq, u8 status) { esas2r_trace_enter(); esas2r_trace("rq:%p", rq); list_del_init(&rq->req_list); if (rq->timeout > RQ_MAX_TIMEOUT) { /* * The request timed out, but we could not abort it because a * chip reset occurred. Return busy status. */ rq->req_stat = RS_BUSY; esas2r_trace_exit(); return true; } rq->req_stat = status; esas2r_trace_exit(); return true; }
linux-master
drivers/scsi/esas2r/esas2r_io.c
/* * linux/drivers/scsi/esas2r/esas2r_disc.c * esas2r device discovery routines * * Copyright (c) 2001-2013 ATTO Technology, Inc. * (mailto:[email protected]) */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #include "esas2r.h" /* Miscellaneous internal discovery routines */ static void esas2r_disc_abort(struct esas2r_adapter *a, struct esas2r_request *rq); static bool esas2r_disc_continue(struct esas2r_adapter *a, struct esas2r_request *rq); static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a); static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr); static bool esas2r_disc_start_request(struct esas2r_adapter *a, struct esas2r_request *rq); /* Internal discovery routines that process the states */ static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a, struct esas2r_request *rq); static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a, struct esas2r_request *rq); static bool esas2r_disc_dev_add(struct esas2r_adapter *a, struct esas2r_request *rq); static bool esas2r_disc_dev_remove(struct esas2r_adapter *a, struct esas2r_request *rq); static bool esas2r_disc_part_info(struct esas2r_adapter *a, struct esas2r_request *rq); static void esas2r_disc_part_info_cb(struct esas2r_adapter *a, struct esas2r_request *rq); static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a, struct esas2r_request *rq); static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a, struct esas2r_request *rq); static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a, struct esas2r_request *rq); static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a, struct esas2r_request *rq); static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a, struct esas2r_request *rq); static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a, struct esas2r_request *rq); void esas2r_disc_initialize(struct esas2r_adapter *a) { struct esas2r_sas_nvram *nvr = a->nvram; esas2r_trace_enter(); clear_bit(AF_DISC_IN_PROG, &a->flags); clear_bit(AF2_DEV_SCAN, &a->flags2); clear_bit(AF2_DEV_CNT_OK, &a->flags2); a->disc_start_time = jiffies_to_msecs(jiffies); a->disc_wait_time = nvr->dev_wait_time * 1000; a->disc_wait_cnt = nvr->dev_wait_count; if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS) a->disc_wait_cnt = ESAS2R_MAX_TARGETS; /* * If we are doing chip reset or power management processing, always * wait for devices. use the NVRAM device count if it is greater than * previously discovered devices. */ esas2r_hdebug("starting discovery..."); a->general_req.interrupt_cx = NULL; if (test_bit(AF_CHPRST_DETECTED, &a->flags) || test_bit(AF_POWER_MGT, &a->flags)) { if (a->prev_dev_cnt == 0) { /* Don't bother waiting if there is nothing to wait * for. */ a->disc_wait_time = 0; } else { /* * Set the device wait count to what was previously * found. We don't care if the user only configured * a time because we know the exact count to wait for. * There is no need to honor the user's wishes to * always wait the full time. */ a->disc_wait_cnt = a->prev_dev_cnt; /* * bump the minimum wait time to 15 seconds since the * default is 3 (system boot or the boot driver usually * buys us more time). */ if (a->disc_wait_time < 15000) a->disc_wait_time = 15000; } } esas2r_trace("disc wait count: %d", a->disc_wait_cnt); esas2r_trace("disc wait time: %d", a->disc_wait_time); if (a->disc_wait_time == 0) esas2r_disc_check_complete(a); esas2r_trace_exit(); } void esas2r_disc_start_waiting(struct esas2r_adapter *a) { unsigned long flags; spin_lock_irqsave(&a->mem_lock, flags); if (a->disc_ctx.disc_evt) esas2r_disc_start_port(a); spin_unlock_irqrestore(&a->mem_lock, flags); } void esas2r_disc_check_for_work(struct esas2r_adapter *a) { struct esas2r_request *rq = &a->general_req; /* service any pending interrupts first */ esas2r_polled_interrupt(a); /* * now, interrupt processing may have queued up a discovery event. go * see if we have one to start. we couldn't start it in the ISR since * polled discovery would cause a deadlock. */ esas2r_disc_start_waiting(a); if (rq->interrupt_cx == NULL) return; if (rq->req_stat == RS_STARTED && rq->timeout <= RQ_MAX_TIMEOUT) { /* wait for the current discovery request to complete. */ esas2r_wait_request(a, rq); if (rq->req_stat == RS_TIMEOUT) { esas2r_disc_abort(a, rq); esas2r_local_reset_adapter(a); return; } } if (rq->req_stat == RS_PENDING || rq->req_stat == RS_STARTED) return; esas2r_disc_continue(a, rq); } void esas2r_disc_check_complete(struct esas2r_adapter *a) { unsigned long flags; esas2r_trace_enter(); /* check to see if we should be waiting for devices */ if (a->disc_wait_time) { u32 currtime = jiffies_to_msecs(jiffies); u32 time = currtime - a->disc_start_time; /* * Wait until the device wait time is exhausted or the device * wait count is satisfied. */ if (time < a->disc_wait_time && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt || a->disc_wait_cnt == 0)) { /* After three seconds of waiting, schedule a scan. */ if (time >= 3000 && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { spin_lock_irqsave(&a->mem_lock, flags); esas2r_disc_queue_event(a, DCDE_DEV_SCAN); spin_unlock_irqrestore(&a->mem_lock, flags); } esas2r_trace_exit(); return; } /* * We are done waiting...we think. Adjust the wait time to * consume events after the count is met. */ if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2)) a->disc_wait_time = time + 3000; /* If we haven't done a full scan yet, do it now. */ if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { spin_lock_irqsave(&a->mem_lock, flags); esas2r_disc_queue_event(a, DCDE_DEV_SCAN); spin_unlock_irqrestore(&a->mem_lock, flags); esas2r_trace_exit(); return; } /* * Now, if there is still time left to consume events, continue * waiting. */ if (time < a->disc_wait_time) { esas2r_trace_exit(); return; } } else { if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { spin_lock_irqsave(&a->mem_lock, flags); esas2r_disc_queue_event(a, DCDE_DEV_SCAN); spin_unlock_irqrestore(&a->mem_lock, flags); } } /* We want to stop waiting for devices. */ a->disc_wait_time = 0; if (test_bit(AF_DISC_POLLED, &a->flags) && test_bit(AF_DISC_IN_PROG, &a->flags)) { /* * Polled discovery is still pending so continue the active * discovery until it is done. At that point, we will stop * polled discovery and transition to interrupt driven * discovery. */ } else { /* * Done waiting for devices. Note that we get here immediately * after deferred waiting completes because that is interrupt * driven; i.e. There is no transition. */ esas2r_disc_fix_curr_requests(a); clear_bit(AF_DISC_PENDING, &a->flags); /* * We have deferred target state changes until now because we * don't want to report any removals (due to the first arrival) * until the device wait time expires. */ set_bit(AF_PORT_CHANGE, &a->flags); } esas2r_trace_exit(); } void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt) { struct esas2r_disc_context *dc = &a->disc_ctx; esas2r_trace_enter(); esas2r_trace("disc_event: %d", disc_evt); /* Initialize the discovery context */ dc->disc_evt |= disc_evt; /* * Don't start discovery before or during polled discovery. if we did, * we would have a deadlock if we are in the ISR already. */ if (!test_bit(AF_CHPRST_PENDING, &a->flags) && !test_bit(AF_DISC_POLLED, &a->flags)) esas2r_disc_start_port(a); esas2r_trace_exit(); } bool esas2r_disc_start_port(struct esas2r_adapter *a) { struct esas2r_request *rq = &a->general_req; struct esas2r_disc_context *dc = &a->disc_ctx; bool ret; esas2r_trace_enter(); if (test_bit(AF_DISC_IN_PROG, &a->flags)) { esas2r_trace_exit(); return false; } /* If there is a discovery waiting, process it. */ if (dc->disc_evt) { if (test_bit(AF_DISC_POLLED, &a->flags) && a->disc_wait_time == 0) { /* * We are doing polled discovery, but we no longer want * to wait for devices. Stop polled discovery and * transition to interrupt driven discovery. */ esas2r_trace_exit(); return false; } } else { /* Discovery is complete. */ esas2r_hdebug("disc done"); set_bit(AF_PORT_CHANGE, &a->flags); esas2r_trace_exit(); return false; } /* Handle the discovery context */ esas2r_trace("disc_evt: %d", dc->disc_evt); set_bit(AF_DISC_IN_PROG, &a->flags); dc->flags = 0; if (test_bit(AF_DISC_POLLED, &a->flags)) dc->flags |= DCF_POLLED; rq->interrupt_cx = dc; rq->req_stat = RS_SUCCESS; /* Decode the event code */ if (dc->disc_evt & DCDE_DEV_SCAN) { dc->disc_evt &= ~DCDE_DEV_SCAN; dc->flags |= DCF_DEV_SCAN; dc->state = DCS_BLOCK_DEV_SCAN; } else if (dc->disc_evt & DCDE_DEV_CHANGE) { dc->disc_evt &= ~DCDE_DEV_CHANGE; dc->flags |= DCF_DEV_CHANGE; dc->state = DCS_DEV_RMV; } /* Continue interrupt driven discovery */ if (!test_bit(AF_DISC_POLLED, &a->flags)) ret = esas2r_disc_continue(a, rq); else ret = true; esas2r_trace_exit(); return ret; } static bool esas2r_disc_continue(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; bool rslt; /* Device discovery/removal */ while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) { rslt = false; switch (dc->state) { case DCS_DEV_RMV: rslt = esas2r_disc_dev_remove(a, rq); break; case DCS_DEV_ADD: rslt = esas2r_disc_dev_add(a, rq); break; case DCS_BLOCK_DEV_SCAN: rslt = esas2r_disc_block_dev_scan(a, rq); break; case DCS_RAID_GRP_INFO: rslt = esas2r_disc_raid_grp_info(a, rq); break; case DCS_PART_INFO: rslt = esas2r_disc_part_info(a, rq); break; case DCS_PT_DEV_INFO: rslt = esas2r_disc_passthru_dev_info(a, rq); break; case DCS_PT_DEV_ADDR: rslt = esas2r_disc_passthru_dev_addr(a, rq); break; case DCS_DISC_DONE: dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN); break; default: esas2r_bugon(); dc->state = DCS_DISC_DONE; break; } if (rslt) return true; } /* Discovery is done...for now. */ rq->interrupt_cx = NULL; if (!test_bit(AF_DISC_PENDING, &a->flags)) esas2r_disc_fix_curr_requests(a); clear_bit(AF_DISC_IN_PROG, &a->flags); /* Start the next discovery. */ return esas2r_disc_start_port(a); } static bool esas2r_disc_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) { unsigned long flags; /* Set the timeout to a minimum value. */ if (rq->timeout < ESAS2R_DEFAULT_TMO) rq->timeout = ESAS2R_DEFAULT_TMO; /* * Override the request type to distinguish discovery requests. If we * end up deferring the request, esas2r_disc_local_start_request() * will be called to restart it. */ rq->req_type = RT_DISC_REQ; spin_lock_irqsave(&a->queue_lock, flags); if (!test_bit(AF_CHPRST_PENDING, &a->flags) && !test_bit(AF_FLASHING, &a->flags)) esas2r_disc_local_start_request(a, rq); else list_add_tail(&rq->req_list, &a->defer_list); spin_unlock_irqrestore(&a->queue_lock, flags); return true; } void esas2r_disc_local_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) { esas2r_trace_enter(); list_add_tail(&rq->req_list, &a->active_list); esas2r_start_vda_request(a, rq); esas2r_trace_exit(); return; } static void esas2r_disc_abort(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; esas2r_trace_enter(); /* abort the current discovery */ dc->state = DCS_DISC_DONE; esas2r_trace_exit(); } static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; bool rslt; esas2r_trace_enter(); esas2r_rq_init_request(rq, a); esas2r_build_mgt_req(a, rq, VDAMGT_DEV_SCAN, 0, 0, 0, NULL); rq->comp_cb = esas2r_disc_block_dev_scan_cb; rq->timeout = 30000; rq->interrupt_cx = dc; rslt = esas2r_disc_start_request(a, rq); esas2r_trace_exit(); return rslt; } static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; unsigned long flags; esas2r_trace_enter(); spin_lock_irqsave(&a->mem_lock, flags); if (rq->req_stat == RS_SUCCESS) dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; dc->state = DCS_RAID_GRP_INFO; dc->raid_grp_ix = 0; esas2r_rq_destroy_request(rq, a); /* continue discovery if it's interrupt driven */ if (!(dc->flags & DCF_POLLED)) esas2r_disc_continue(a, rq); spin_unlock_irqrestore(&a->mem_lock, flags); esas2r_trace_exit(); } static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; bool rslt; struct atto_vda_grp_info *grpinfo; esas2r_trace_enter(); esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix); if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) { dc->state = DCS_DISC_DONE; esas2r_trace_exit(); return false; } esas2r_rq_init_request(rq, a); grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info; memset(grpinfo, 0, sizeof(struct atto_vda_grp_info)); esas2r_build_mgt_req(a, rq, VDAMGT_GRP_INFO, dc->scan_gen, 0, sizeof(struct atto_vda_grp_info), NULL); grpinfo->grp_index = dc->raid_grp_ix; rq->comp_cb = esas2r_disc_raid_grp_info_cb; rq->interrupt_cx = dc; rslt = esas2r_disc_start_request(a, rq); esas2r_trace_exit(); return rslt; } static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; unsigned long flags; struct atto_vda_grp_info *grpinfo; esas2r_trace_enter(); spin_lock_irqsave(&a->mem_lock, flags); if (rq->req_stat == RS_SCAN_GEN) { dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; dc->raid_grp_ix = 0; goto done; } if (rq->req_stat == RS_SUCCESS) { grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info; if (grpinfo->status != VDA_GRP_STAT_ONLINE && grpinfo->status != VDA_GRP_STAT_DEGRADED) { /* go to the next group. */ dc->raid_grp_ix++; } else { memcpy(&dc->raid_grp_name[0], &grpinfo->grp_name[0], sizeof(grpinfo->grp_name)); dc->interleave = le32_to_cpu(grpinfo->interleave); dc->block_size = le32_to_cpu(grpinfo->block_size); dc->state = DCS_PART_INFO; dc->part_num = 0; } } else { if (!(rq->req_stat == RS_GRP_INVALID)) { esas2r_log(ESAS2R_LOG_WARN, "A request for RAID group info failed - " "returned with %x", rq->req_stat); } dc->dev_ix = 0; dc->state = DCS_PT_DEV_INFO; } done: esas2r_rq_destroy_request(rq, a); /* continue discovery if it's interrupt driven */ if (!(dc->flags & DCF_POLLED)) esas2r_disc_continue(a, rq); spin_unlock_irqrestore(&a->mem_lock, flags); esas2r_trace_exit(); } static bool esas2r_disc_part_info(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; bool rslt; struct atto_vdapart_info *partinfo; esas2r_trace_enter(); esas2r_trace("part_num: %d", dc->part_num); if (dc->part_num >= VDA_MAX_PARTITIONS) { dc->state = DCS_RAID_GRP_INFO; dc->raid_grp_ix++; esas2r_trace_exit(); return false; } esas2r_rq_init_request(rq, a); partinfo = &rq->vda_rsp_data->mgt_data.data.part_info; memset(partinfo, 0, sizeof(struct atto_vdapart_info)); esas2r_build_mgt_req(a, rq, VDAMGT_PART_INFO, dc->scan_gen, 0, sizeof(struct atto_vdapart_info), NULL); partinfo->part_no = dc->part_num; memcpy(&partinfo->grp_name[0], &dc->raid_grp_name[0], sizeof(partinfo->grp_name)); rq->comp_cb = esas2r_disc_part_info_cb; rq->interrupt_cx = dc; rslt = esas2r_disc_start_request(a, rq); esas2r_trace_exit(); return rslt; } static void esas2r_disc_part_info_cb(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; unsigned long flags; struct atto_vdapart_info *partinfo; esas2r_trace_enter(); spin_lock_irqsave(&a->mem_lock, flags); if (rq->req_stat == RS_SCAN_GEN) { dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; dc->raid_grp_ix = 0; dc->state = DCS_RAID_GRP_INFO; } else if (rq->req_stat == RS_SUCCESS) { partinfo = &rq->vda_rsp_data->mgt_data.data.part_info; dc->part_num = partinfo->part_no; dc->curr_virt_id = le16_to_cpu(partinfo->target_id); esas2r_targ_db_add_raid(a, dc); dc->part_num++; } else { if (!(rq->req_stat == RS_PART_LAST)) { esas2r_log(ESAS2R_LOG_WARN, "A request for RAID group partition info " "failed - status:%d", rq->req_stat); } dc->state = DCS_RAID_GRP_INFO; dc->raid_grp_ix++; } esas2r_rq_destroy_request(rq, a); /* continue discovery if it's interrupt driven */ if (!(dc->flags & DCF_POLLED)) esas2r_disc_continue(a, rq); spin_unlock_irqrestore(&a->mem_lock, flags); esas2r_trace_exit(); } static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; bool rslt; struct atto_vda_devinfo *devinfo; esas2r_trace_enter(); esas2r_trace("dev_ix: %d", dc->dev_ix); esas2r_rq_init_request(rq, a); devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info; memset(devinfo, 0, sizeof(struct atto_vda_devinfo)); esas2r_build_mgt_req(a, rq, VDAMGT_DEV_PT_INFO, dc->scan_gen, dc->dev_ix, sizeof(struct atto_vda_devinfo), NULL); rq->comp_cb = esas2r_disc_passthru_dev_info_cb; rq->interrupt_cx = dc; rslt = esas2r_disc_start_request(a, rq); esas2r_trace_exit(); return rslt; } static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; unsigned long flags; struct atto_vda_devinfo *devinfo; esas2r_trace_enter(); spin_lock_irqsave(&a->mem_lock, flags); if (rq->req_stat == RS_SCAN_GEN) { dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; dc->dev_ix = 0; dc->state = DCS_PT_DEV_INFO; } else if (rq->req_stat == RS_SUCCESS) { devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info; dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index); dc->curr_virt_id = le16_to_cpu(devinfo->target_id); if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) { dc->curr_phys_id = le16_to_cpu(devinfo->phys_target_id); dc->dev_addr_type = ATTO_GDA_AT_PORT; dc->state = DCS_PT_DEV_ADDR; esas2r_trace("curr_virt_id: %d", dc->curr_virt_id); esas2r_trace("curr_phys_id: %d", dc->curr_phys_id); } else { dc->dev_ix++; } } else { if (!(rq->req_stat == RS_DEV_INVALID)) { esas2r_log(ESAS2R_LOG_WARN, "A request for device information failed - " "status:%d", rq->req_stat); } dc->state = DCS_DISC_DONE; } esas2r_rq_destroy_request(rq, a); /* continue discovery if it's interrupt driven */ if (!(dc->flags & DCF_POLLED)) esas2r_disc_continue(a, rq); spin_unlock_irqrestore(&a->mem_lock, flags); esas2r_trace_exit(); } static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; bool rslt; struct atto_ioctl *hi; struct esas2r_sg_context sgc; esas2r_trace_enter(); esas2r_rq_init_request(rq, a); /* format the request. */ sgc.cur_offset = NULL; sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr; sgc.length = offsetof(struct atto_ioctl, data) + sizeof(struct atto_hba_get_device_address); esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge); esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA); if (!esas2r_build_sg_list(a, rq, &sgc)) { esas2r_rq_destroy_request(rq, a); esas2r_trace_exit(); return false; } rq->comp_cb = esas2r_disc_passthru_dev_addr_cb; rq->interrupt_cx = dc; /* format the IOCTL data. */ hi = (struct atto_ioctl *)a->disc_buffer; memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN); hi->version = ATTO_VER_GET_DEV_ADDR0; hi->function = ATTO_FUNC_GET_DEV_ADDR; hi->flags = HBAF_TUNNEL; hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id); hi->data.get_dev_addr.addr_type = dc->dev_addr_type; /* start it up. */ rslt = esas2r_disc_start_request(a, rq); esas2r_trace_exit(); return rslt; } static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; struct esas2r_target *t = NULL; unsigned long flags; struct atto_ioctl *hi; u16 addrlen; esas2r_trace_enter(); spin_lock_irqsave(&a->mem_lock, flags); hi = (struct atto_ioctl *)a->disc_buffer; if (rq->req_stat == RS_SUCCESS && hi->status == ATTO_STS_SUCCESS) { addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len); if (dc->dev_addr_type == ATTO_GDA_AT_PORT) { if (addrlen == sizeof(u64)) memcpy(&dc->sas_addr, &hi->data.get_dev_addr.address[0], addrlen); else memset(&dc->sas_addr, 0, sizeof(dc->sas_addr)); /* Get the unique identifier. */ dc->dev_addr_type = ATTO_GDA_AT_UNIQUE; goto next_dev_addr; } else { /* Add the pass through target. */ if (HIBYTE(addrlen) == 0) { t = esas2r_targ_db_add_pthru(a, dc, &hi->data. get_dev_addr. address[0], (u8)hi->data. get_dev_addr. addr_len); if (t) memcpy(&t->sas_addr, &dc->sas_addr, sizeof(t->sas_addr)); } else { /* getting the back end data failed */ esas2r_log(ESAS2R_LOG_WARN, "an error occurred retrieving the " "back end data (%s:%d)", __func__, __LINE__); } } } else { /* getting the back end data failed */ esas2r_log(ESAS2R_LOG_WARN, "an error occurred retrieving the back end data - " "rq->req_stat:%d hi->status:%d", rq->req_stat, hi->status); } /* proceed to the next device. */ if (dc->flags & DCF_DEV_SCAN) { dc->dev_ix++; dc->state = DCS_PT_DEV_INFO; } else if (dc->flags & DCF_DEV_CHANGE) { dc->curr_targ++; dc->state = DCS_DEV_ADD; } else { esas2r_bugon(); } next_dev_addr: esas2r_rq_destroy_request(rq, a); /* continue discovery if it's interrupt driven */ if (!(dc->flags & DCF_POLLED)) esas2r_disc_continue(a, rq); spin_unlock_irqrestore(&a->mem_lock, flags); esas2r_trace_exit(); } static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr) { struct esas2r_adapter *a = sgc->adapter; if (sgc->length > ESAS2R_DISC_BUF_LEN) { esas2r_bugon(); } *addr = a->uncached_phys + (u64)((u8 *)a->disc_buffer - a->uncached); return sgc->length; } static bool esas2r_disc_dev_remove(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; struct esas2r_target *t; struct esas2r_target *t2; esas2r_trace_enter(); /* process removals. */ for (t = a->targetdb; t < a->targetdb_end; t++) { if (t->new_target_state != TS_NOT_PRESENT) continue; t->new_target_state = TS_INVALID; /* remove the right target! */ t2 = esas2r_targ_db_find_by_virt_id(a, esas2r_targ_get_id(t, a)); if (t2) esas2r_targ_db_remove(a, t2); } /* removals complete. process arrivals. */ dc->state = DCS_DEV_ADD; dc->curr_targ = a->targetdb; esas2r_trace_exit(); return false; } static bool esas2r_disc_dev_add(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_disc_context *dc = (struct esas2r_disc_context *)rq->interrupt_cx; struct esas2r_target *t = dc->curr_targ; if (t >= a->targetdb_end) { /* done processing state changes. */ dc->state = DCS_DISC_DONE; } else if (t->new_target_state == TS_PRESENT) { struct atto_vda_ae_lu *luevt = &t->lu_event; esas2r_trace_enter(); /* clear this now in case more events come in. */ t->new_target_state = TS_INVALID; /* setup the discovery context for adding this device. */ dc->curr_virt_id = esas2r_targ_get_id(t, a); if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id) + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) { dc->block_size = luevt->id.tgtlun_raid.dwblock_size; dc->interleave = luevt->id.tgtlun_raid.dwinterleave; } else { dc->block_size = 0; dc->interleave = 0; } /* determine the device type being added. */ if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) { if (luevt->dwevent & VDAAE_LU_PHYS_ID) { dc->state = DCS_PT_DEV_ADDR; dc->dev_addr_type = ATTO_GDA_AT_PORT; dc->curr_phys_id = luevt->wphys_target_id; } else { esas2r_log(ESAS2R_LOG_WARN, "luevt->dwevent does not have the " "VDAAE_LU_PHYS_ID bit set (%s:%d)", __func__, __LINE__); } } else { dc->raid_grp_name[0] = 0; esas2r_targ_db_add_raid(a, dc); } esas2r_trace("curr_virt_id: %d", dc->curr_virt_id); esas2r_trace("curr_phys_id: %d", dc->curr_phys_id); esas2r_trace("dwevent: %d", luevt->dwevent); esas2r_trace_exit(); } if (dc->state == DCS_DEV_ADD) { /* go to the next device. */ dc->curr_targ++; } return false; } /* * When discovery is done, find all requests on defer queue and * test if they need to be modified. If a target is no longer present * then complete the request with RS_SEL. Otherwise, update the * target_id since after a hibernate it can be a different value. * VDA does not make passthrough target IDs persistent. */ static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a) { unsigned long flags; struct esas2r_target *t; struct esas2r_request *rq; struct list_head *element; /* update virt_targ_id in any outstanding esas2r_requests */ spin_lock_irqsave(&a->queue_lock, flags); list_for_each(element, &a->defer_list) { rq = list_entry(element, struct esas2r_request, req_list); if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { t = a->targetdb + rq->target_id; if (t->target_state == TS_PRESENT) rq->vrq->scsi.target_id = le16_to_cpu( t->virt_targ_id); else rq->req_stat = RS_SEL; } } spin_unlock_irqrestore(&a->queue_lock, flags); }
linux-master
drivers/scsi/esas2r/esas2r_disc.c
/* * linux/drivers/scsi/esas2r/esas2r_flash.c * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers * * Copyright (c) 2001-2013 ATTO Technology, Inc. * (mailto:[email protected]) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include "esas2r.h" /* local macro defs */ #define esas2r_nvramcalc_cksum(n) \ (esas2r_calc_byte_cksum((u8 *)(n), sizeof(struct esas2r_sas_nvram), \ SASNVR_CKSUM_SEED)) #define esas2r_nvramcalc_xor_cksum(n) \ (esas2r_calc_byte_xor_cksum((u8 *)(n), \ sizeof(struct esas2r_sas_nvram), 0)) #define ESAS2R_FS_DRVR_VER 2 static struct esas2r_sas_nvram default_sas_nvram = { { 'E', 'S', 'A', 'S' }, /* signature */ SASNVR_VERSION, /* version */ 0, /* checksum */ 31, /* max_lun_for_target */ SASNVR_PCILAT_MAX, /* pci_latency */ SASNVR1_BOOT_DRVR, /* options1 */ SASNVR2_HEARTBEAT | SASNVR2_SINGLE_BUS /* options2 */ | SASNVR2_SW_MUX_CTRL, SASNVR_COAL_DIS, /* int_coalescing */ SASNVR_CMDTHR_NONE, /* cmd_throttle */ 3, /* dev_wait_time */ 1, /* dev_wait_count */ 0, /* spin_up_delay */ 0, /* ssp_align_rate */ { 0x50, 0x01, 0x08, 0x60, /* sas_addr */ 0x00, 0x00, 0x00, 0x00 }, { SASNVR_SPEED_AUTO }, /* phy_speed */ { SASNVR_MUX_DISABLED }, /* SAS multiplexing */ { 0 }, /* phy_flags */ SASNVR_SORT_SAS_ADDR, /* sort_type */ 3, /* dpm_reqcmd_lmt */ 3, /* dpm_stndby_time */ 0, /* dpm_active_time */ { 0 }, /* phy_target_id */ SASNVR_VSMH_DISABLED, /* virt_ses_mode */ SASNVR_RWM_DEFAULT, /* read_write_mode */ 0, /* link down timeout */ { 0 } /* reserved */ }; static u8 cmd_to_fls_func[] = { 0xFF, VDA_FLASH_READ, VDA_FLASH_BEGINW, VDA_FLASH_WRITE, VDA_FLASH_COMMIT, VDA_FLASH_CANCEL }; static u8 esas2r_calc_byte_xor_cksum(u8 *addr, u32 len, u8 seed) { u32 cksum = seed; u8 *p = (u8 *)&cksum; while (len) { if (((uintptr_t)addr & 3) == 0) break; cksum = cksum ^ *addr; addr++; len--; } while (len >= sizeof(u32)) { cksum = cksum ^ *(u32 *)addr; addr += 4; len -= 4; } while (len--) { cksum = cksum ^ *addr; addr++; } return p[0] ^ p[1] ^ p[2] ^ p[3]; } static u8 esas2r_calc_byte_cksum(void *addr, u32 len, u8 seed) { u8 *p = (u8 *)addr; u8 cksum = seed; while (len--) cksum = cksum + p[len]; return cksum; } /* Interrupt callback to process FM API write requests. */ static void esas2r_fmapi_callback(struct esas2r_adapter *a, struct esas2r_request *rq) { struct atto_vda_flash_req *vrq = &rq->vrq->flash; struct esas2r_flash_context *fc = (struct esas2r_flash_context *)rq->interrupt_cx; if (rq->req_stat == RS_SUCCESS) { /* Last request was successful. See what to do now. */ switch (vrq->sub_func) { case VDA_FLASH_BEGINW: if (fc->sgc.cur_offset == NULL) goto commit; vrq->sub_func = VDA_FLASH_WRITE; rq->req_stat = RS_PENDING; break; case VDA_FLASH_WRITE: commit: vrq->sub_func = VDA_FLASH_COMMIT; rq->req_stat = RS_PENDING; rq->interrupt_cb = fc->interrupt_cb; break; default: break; } } if (rq->req_stat != RS_PENDING) /* * All done. call the real callback to complete the FM API * request. We should only get here if a BEGINW or WRITE * operation failed. */ (*fc->interrupt_cb)(a, rq); } /* * Build a flash request based on the flash context. The request status * is filled in on an error. */ static void build_flash_msg(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_flash_context *fc = (struct esas2r_flash_context *)rq->interrupt_cx; struct esas2r_sg_context *sgc = &fc->sgc; u8 cksum = 0; /* calculate the checksum */ if (fc->func == VDA_FLASH_BEGINW) { if (sgc->cur_offset) cksum = esas2r_calc_byte_xor_cksum(sgc->cur_offset, sgc->length, 0); rq->interrupt_cb = esas2r_fmapi_callback; } else { rq->interrupt_cb = fc->interrupt_cb; } esas2r_build_flash_req(a, rq, fc->func, cksum, fc->flsh_addr, sgc->length); esas2r_rq_free_sg_lists(rq, a); /* * remember the length we asked for. we have to keep track of * the current amount done so we know how much to compare when * doing the verification phase. */ fc->curr_len = fc->sgc.length; if (sgc->cur_offset) { /* setup the S/G context to build the S/G table */ esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]); if (!esas2r_build_sg_list(a, rq, sgc)) { rq->req_stat = RS_BUSY; return; } } else { fc->sgc.length = 0; } /* update the flsh_addr to the next one to write to */ fc->flsh_addr += fc->curr_len; } /* determine the method to process the flash request */ static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq) { /* * assume we have more to do. if we return with the status set to * RS_PENDING, FM API tasks will continue. */ rq->req_stat = RS_PENDING; if (test_bit(AF_DEGRADED_MODE, &a->flags)) /* not supported for now */; else build_flash_msg(a, rq); return rq->req_stat == RS_PENDING; } /* boot image fixer uppers called before downloading the image. */ static void fix_bios(struct esas2r_adapter *a, struct esas2r_flash_img *fi) { struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_BIOS]; struct esas2r_pc_image *pi; struct esas2r_boot_header *bh; pi = (struct esas2r_pc_image *)((u8 *)fi + ch->image_offset); bh = (struct esas2r_boot_header *)((u8 *)pi + le16_to_cpu(pi->header_offset)); bh->device_id = cpu_to_le16(a->pcid->device); /* Recalculate the checksum in the PNP header if there */ if (pi->pnp_offset) { u8 *pnp_header_bytes = ((u8 *)pi + le16_to_cpu(pi->pnp_offset)); /* Identifier - dword that starts at byte 10 */ *((u32 *)&pnp_header_bytes[10]) = cpu_to_le32(MAKEDWORD(a->pcid->subsystem_vendor, a->pcid->subsystem_device)); /* Checksum - byte 9 */ pnp_header_bytes[9] -= esas2r_calc_byte_cksum(pnp_header_bytes, 32, 0); } /* Recalculate the checksum needed by the PC */ pi->checksum = pi->checksum - esas2r_calc_byte_cksum((u8 *)pi, ch->length, 0); } static void fix_efi(struct esas2r_adapter *a, struct esas2r_flash_img *fi) { struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_EFI]; u32 len = ch->length; u32 offset = ch->image_offset; struct esas2r_efi_image *ei; struct esas2r_boot_header *bh; while (len) { u32 thislen; ei = (struct esas2r_efi_image *)((u8 *)fi + offset); bh = (struct esas2r_boot_header *)((u8 *)ei + le16_to_cpu( ei->header_offset)); bh->device_id = cpu_to_le16(a->pcid->device); thislen = (u32)le16_to_cpu(bh->image_length) * 512; if (thislen > len) break; len -= thislen; offset += thislen; } } /* Complete a FM API request with the specified status. */ static bool complete_fmapi_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 fi_stat) { struct esas2r_flash_context *fc = (struct esas2r_flash_context *)rq->interrupt_cx; struct esas2r_flash_img *fi = fc->fi; fi->status = fi_stat; fi->driver_error = rq->req_stat; rq->interrupt_cb = NULL; rq->req_stat = RS_SUCCESS; if (fi_stat != FI_STAT_IMG_VER) memset(fc->scratch, 0, FM_BUF_SZ); esas2r_enable_heartbeat(a); clear_bit(AF_FLASH_LOCK, &a->flags); return false; } /* Process each phase of the flash download process. */ static void fw_download_proc(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_flash_context *fc = (struct esas2r_flash_context *)rq->interrupt_cx; struct esas2r_flash_img *fi = fc->fi; struct esas2r_component_header *ch; u32 len; u8 *p, *q; /* If the previous operation failed, just return. */ if (rq->req_stat != RS_SUCCESS) goto error; /* * If an upload just completed and the compare length is non-zero, * then we just read back part of the image we just wrote. verify the * section and continue reading until the entire image is verified. */ if (fc->func == VDA_FLASH_READ && fc->cmp_len) { ch = &fi->cmp_hdr[fc->comp_typ]; p = fc->scratch; q = (u8 *)fi /* start of the whole gob */ + ch->image_offset /* start of the current image */ + ch->length /* end of the current image */ - fc->cmp_len; /* where we are now */ /* * NOTE - curr_len is the exact count of bytes for the read * even when the end is read and its not a full buffer */ for (len = fc->curr_len; len; len--) if (*p++ != *q++) goto error; fc->cmp_len -= fc->curr_len; /* # left to compare */ /* Update fc and determine the length for the next upload */ if (fc->cmp_len > FM_BUF_SZ) fc->sgc.length = FM_BUF_SZ; else fc->sgc.length = fc->cmp_len; fc->sgc.cur_offset = fc->sgc_offset + ((u8 *)fc->scratch - (u8 *)fi); } /* * This code uses a 'while' statement since the next component may * have a length = zero. This can happen since some components are * not required. At the end of this 'while' we set up the length * for the next request and therefore sgc.length can be = 0. */ while (fc->sgc.length == 0) { ch = &fi->cmp_hdr[fc->comp_typ]; switch (fc->task) { case FMTSK_ERASE_BOOT: /* the BIOS image is written next */ ch = &fi->cmp_hdr[CH_IT_BIOS]; if (ch->length == 0) goto no_bios; fc->task = FMTSK_WRTBIOS; fc->func = VDA_FLASH_BEGINW; fc->comp_typ = CH_IT_BIOS; fc->flsh_addr = FLS_OFFSET_BOOT; fc->sgc.length = ch->length; fc->sgc.cur_offset = fc->sgc_offset + ch->image_offset; break; case FMTSK_WRTBIOS: /* * The BIOS image has been written - read it and * verify it */ fc->task = FMTSK_READBIOS; fc->func = VDA_FLASH_READ; fc->flsh_addr = FLS_OFFSET_BOOT; fc->cmp_len = ch->length; fc->sgc.length = FM_BUF_SZ; fc->sgc.cur_offset = fc->sgc_offset + ((u8 *)fc->scratch - (u8 *)fi); break; case FMTSK_READBIOS: no_bios: /* * Mark the component header status for the image * completed */ ch->status = CH_STAT_SUCCESS; /* The MAC image is written next */ ch = &fi->cmp_hdr[CH_IT_MAC]; if (ch->length == 0) goto no_mac; fc->task = FMTSK_WRTMAC; fc->func = VDA_FLASH_BEGINW; fc->comp_typ = CH_IT_MAC; fc->flsh_addr = FLS_OFFSET_BOOT + fi->cmp_hdr[CH_IT_BIOS].length; fc->sgc.length = ch->length; fc->sgc.cur_offset = fc->sgc_offset + ch->image_offset; break; case FMTSK_WRTMAC: /* The MAC image has been written - read and verify */ fc->task = FMTSK_READMAC; fc->func = VDA_FLASH_READ; fc->flsh_addr -= ch->length; fc->cmp_len = ch->length; fc->sgc.length = FM_BUF_SZ; fc->sgc.cur_offset = fc->sgc_offset + ((u8 *)fc->scratch - (u8 *)fi); break; case FMTSK_READMAC: no_mac: /* * Mark the component header status for the image * completed */ ch->status = CH_STAT_SUCCESS; /* The EFI image is written next */ ch = &fi->cmp_hdr[CH_IT_EFI]; if (ch->length == 0) goto no_efi; fc->task = FMTSK_WRTEFI; fc->func = VDA_FLASH_BEGINW; fc->comp_typ = CH_IT_EFI; fc->flsh_addr = FLS_OFFSET_BOOT + fi->cmp_hdr[CH_IT_BIOS].length + fi->cmp_hdr[CH_IT_MAC].length; fc->sgc.length = ch->length; fc->sgc.cur_offset = fc->sgc_offset + ch->image_offset; break; case FMTSK_WRTEFI: /* The EFI image has been written - read and verify */ fc->task = FMTSK_READEFI; fc->func = VDA_FLASH_READ; fc->flsh_addr -= ch->length; fc->cmp_len = ch->length; fc->sgc.length = FM_BUF_SZ; fc->sgc.cur_offset = fc->sgc_offset + ((u8 *)fc->scratch - (u8 *)fi); break; case FMTSK_READEFI: no_efi: /* * Mark the component header status for the image * completed */ ch->status = CH_STAT_SUCCESS; /* The CFG image is written next */ ch = &fi->cmp_hdr[CH_IT_CFG]; if (ch->length == 0) goto no_cfg; fc->task = FMTSK_WRTCFG; fc->func = VDA_FLASH_BEGINW; fc->comp_typ = CH_IT_CFG; fc->flsh_addr = FLS_OFFSET_CPYR - ch->length; fc->sgc.length = ch->length; fc->sgc.cur_offset = fc->sgc_offset + ch->image_offset; break; case FMTSK_WRTCFG: /* The CFG image has been written - read and verify */ fc->task = FMTSK_READCFG; fc->func = VDA_FLASH_READ; fc->flsh_addr = FLS_OFFSET_CPYR - ch->length; fc->cmp_len = ch->length; fc->sgc.length = FM_BUF_SZ; fc->sgc.cur_offset = fc->sgc_offset + ((u8 *)fc->scratch - (u8 *)fi); break; case FMTSK_READCFG: no_cfg: /* * Mark the component header status for the image * completed */ ch->status = CH_STAT_SUCCESS; /* * The download is complete. If in degraded mode, * attempt a chip reset. */ if (test_bit(AF_DEGRADED_MODE, &a->flags)) esas2r_local_reset_adapter(a); a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version; esas2r_print_flash_rev(a); /* Update the type of boot image on the card */ memcpy(a->image_type, fi->rel_version, sizeof(fi->rel_version)); complete_fmapi_req(a, rq, FI_STAT_SUCCESS); return; } /* If verifying, don't try reading more than what's there */ if (fc->func == VDA_FLASH_READ && fc->sgc.length > fc->cmp_len) fc->sgc.length = fc->cmp_len; } /* Build the request to perform the next action */ if (!load_image(a, rq)) { error: if (fc->comp_typ < fi->num_comps) { ch = &fi->cmp_hdr[fc->comp_typ]; ch->status = CH_STAT_FAILED; } complete_fmapi_req(a, rq, FI_STAT_FAILED); } } /* Determine the flash image adaptyp for this adapter */ static u8 get_fi_adap_type(struct esas2r_adapter *a) { u8 type; /* use the device ID to get the correct adap_typ for this HBA */ switch (a->pcid->device) { case ATTO_DID_INTEL_IOP348: type = FI_AT_SUN_LAKE; break; case ATTO_DID_MV_88RC9580: case ATTO_DID_MV_88RC9580TS: case ATTO_DID_MV_88RC9580TSE: case ATTO_DID_MV_88RC9580TL: type = FI_AT_MV_9580; break; default: type = FI_AT_UNKNWN; break; } return type; } /* Size of config + copyright + flash_ver images, 0 for failure. */ static u32 chk_cfg(u8 *cfg, u32 length, u32 *flash_ver) { u16 *pw = (u16 *)cfg - 1; u32 sz = 0; u32 len = length; if (len == 0) len = FM_BUF_SZ; if (flash_ver) *flash_ver = 0; while (true) { u16 type; u16 size; type = le16_to_cpu(*pw--); size = le16_to_cpu(*pw--); if (type != FBT_CPYR && type != FBT_SETUP && type != FBT_FLASH_VER) break; if (type == FBT_FLASH_VER && flash_ver) *flash_ver = le32_to_cpu(*(u32 *)(pw - 1)); sz += size + (2 * sizeof(u16)); pw -= size / sizeof(u16); if (sz > len - (2 * sizeof(u16))) break; } /* See if we are comparing the size to the specified length */ if (length && sz != length) return 0; return sz; } /* Verify that the boot image is valid */ static u8 chk_boot(u8 *boot_img, u32 length) { struct esas2r_boot_image *bi = (struct esas2r_boot_image *)boot_img; u16 hdroffset = le16_to_cpu(bi->header_offset); struct esas2r_boot_header *bh; if (bi->signature != le16_to_cpu(0xaa55) || (long)hdroffset > (long)(65536L - sizeof(struct esas2r_boot_header)) || (hdroffset & 3) || (hdroffset < sizeof(struct esas2r_boot_image)) || ((u32)hdroffset + sizeof(struct esas2r_boot_header) > length)) return 0xff; bh = (struct esas2r_boot_header *)((char *)bi + hdroffset); if (bh->signature[0] != 'P' || bh->signature[1] != 'C' || bh->signature[2] != 'I' || bh->signature[3] != 'R' || le16_to_cpu(bh->struct_length) < (u16)sizeof(struct esas2r_boot_header) || bh->class_code[2] != 0x01 || bh->class_code[1] != 0x04 || bh->class_code[0] != 0x00 || (bh->code_type != CODE_TYPE_PC && bh->code_type != CODE_TYPE_OPEN && bh->code_type != CODE_TYPE_EFI)) return 0xff; return bh->code_type; } /* The sum of all the WORDS of the image */ static u16 calc_fi_checksum(struct esas2r_flash_context *fc) { struct esas2r_flash_img *fi = fc->fi; u16 cksum; u32 len; u16 *pw; for (len = (fi->length - fc->fi_hdr_len) / 2, pw = (u16 *)((u8 *)fi + fc->fi_hdr_len), cksum = 0; len; len--, pw++) cksum = cksum + le16_to_cpu(*pw); return cksum; } /* * Verify the flash image structure. The following verifications will * be performed: * 1) verify the fi_version is correct * 2) verify the checksum of the entire image. * 3) validate the adap_typ, action and length fields. * 4) validate each component header. check the img_type and * length fields * 5) validate each component image. validate signatures and * local checksums */ static bool verify_fi(struct esas2r_adapter *a, struct esas2r_flash_context *fc) { struct esas2r_flash_img *fi = fc->fi; u8 type; bool imgerr; u16 i; u32 len; struct esas2r_component_header *ch; /* Verify the length - length must even since we do a word checksum */ len = fi->length; if ((len & 1) || len < fc->fi_hdr_len) { fi->status = FI_STAT_LENGTH; return false; } /* Get adapter type and verify type in flash image */ type = get_fi_adap_type(a); if ((type == FI_AT_UNKNWN) || (fi->adap_typ != type)) { fi->status = FI_STAT_ADAPTYP; return false; } /* * Loop through each component and verify the img_type and length * fields. Keep a running count of the sizes sooze we can verify total * size to additive size. */ imgerr = false; for (i = 0, len = 0, ch = fi->cmp_hdr; i < fi->num_comps; i++, ch++) { bool cmperr = false; /* * Verify that the component header has the same index as the * image type. The headers must be ordered correctly */ if (i != ch->img_type) { imgerr = true; ch->status = CH_STAT_INVALID; continue; } switch (ch->img_type) { case CH_IT_BIOS: type = CODE_TYPE_PC; break; case CH_IT_MAC: type = CODE_TYPE_OPEN; break; case CH_IT_EFI: type = CODE_TYPE_EFI; break; } switch (ch->img_type) { case CH_IT_FW: case CH_IT_NVR: break; case CH_IT_BIOS: case CH_IT_MAC: case CH_IT_EFI: if (ch->length & 0x1ff) cmperr = true; /* Test if component image is present */ if (ch->length == 0) break; /* Image is present - verify the image */ if (chk_boot((u8 *)fi + ch->image_offset, ch->length) != type) cmperr = true; break; case CH_IT_CFG: /* Test if component image is present */ if (ch->length == 0) { cmperr = true; break; } /* Image is present - verify the image */ if (!chk_cfg((u8 *)fi + ch->image_offset + ch->length, ch->length, NULL)) cmperr = true; break; default: fi->status = FI_STAT_UNKNOWN; return false; } if (cmperr) { imgerr = true; ch->status = CH_STAT_INVALID; } else { ch->status = CH_STAT_PENDING; len += ch->length; } } if (imgerr) { fi->status = FI_STAT_MISSING; return false; } /* Compare fi->length to the sum of ch->length fields */ if (len != fi->length - fc->fi_hdr_len) { fi->status = FI_STAT_LENGTH; return false; } /* Compute the checksum - it should come out zero */ if (fi->checksum != calc_fi_checksum(fc)) { fi->status = FI_STAT_CHKSUM; return false; } return true; } /* Fill in the FS IOCTL response data from a completed request. */ static void esas2r_complete_fs_ioctl(struct esas2r_adapter *a, struct esas2r_request *rq) { struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)rq->interrupt_cx; if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) esas2r_enable_heartbeat(a); fs->driver_error = rq->req_stat; if (fs->driver_error == RS_SUCCESS) fs->status = ATTO_STS_SUCCESS; else fs->status = ATTO_STS_FAILED; } /* Prepare an FS IOCTL request to be sent to the firmware. */ bool esas2r_process_fs_ioctl(struct esas2r_adapter *a, struct esas2r_ioctl_fs *fs, struct esas2r_request *rq, struct esas2r_sg_context *sgc) { u8 cmdcnt = (u8)ARRAY_SIZE(cmd_to_fls_func); struct esas2r_ioctlfs_command *fsc = &fs->command; u8 func = 0; u32 datalen; fs->status = ATTO_STS_FAILED; fs->driver_error = RS_PENDING; if (fs->version > ESAS2R_FS_VER) { fs->status = ATTO_STS_INV_VERSION; return false; } if (fsc->command >= cmdcnt) { fs->status = ATTO_STS_INV_FUNC; return false; } func = cmd_to_fls_func[fsc->command]; if (func == 0xFF) { fs->status = ATTO_STS_INV_FUNC; return false; } if (fsc->command != ESAS2R_FS_CMD_CANCEL) { if ((a->pcid->device != ATTO_DID_MV_88RC9580 || fs->adap_type != ESAS2R_FS_AT_ESASRAID2) && (a->pcid->device != ATTO_DID_MV_88RC9580TS || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2) && (a->pcid->device != ATTO_DID_MV_88RC9580TSE || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2E) && (a->pcid->device != ATTO_DID_MV_88RC9580TL || fs->adap_type != ESAS2R_FS_AT_TLSASHBA)) { fs->status = ATTO_STS_INV_ADAPTER; return false; } if (fs->driver_ver > ESAS2R_FS_DRVR_VER) { fs->status = ATTO_STS_INV_DRVR_VER; return false; } } if (test_bit(AF_DEGRADED_MODE, &a->flags)) { fs->status = ATTO_STS_DEGRADED; return false; } rq->interrupt_cb = esas2r_complete_fs_ioctl; rq->interrupt_cx = fs; datalen = le32_to_cpu(fsc->length); esas2r_build_flash_req(a, rq, func, fsc->checksum, le32_to_cpu(fsc->flash_addr), datalen); if (func == VDA_FLASH_WRITE || func == VDA_FLASH_READ) { if (datalen == 0) { fs->status = ATTO_STS_INV_FUNC; return false; } esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge); sgc->length = datalen; if (!esas2r_build_sg_list(a, rq, sgc)) { fs->status = ATTO_STS_OUT_OF_RSRC; return false; } } if (func == VDA_FLASH_COMMIT) esas2r_disable_heartbeat(a); esas2r_start_request(a, rq); return true; } static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function) { u32 starttime; u32 timeout; u32 intstat; u32 doorbell; /* Disable chip interrupts awhile */ if (function == DRBL_FLASH_REQ) esas2r_disable_chip_interrupts(a); /* Issue the request to the firmware */ esas2r_write_register_dword(a, MU_DOORBELL_IN, function); /* Now wait for the firmware to process it */ starttime = jiffies_to_msecs(jiffies); if (test_bit(AF_CHPRST_PENDING, &a->flags) || test_bit(AF_DISC_PENDING, &a->flags)) timeout = 40000; else timeout = 5000; while (true) { intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); if (intstat & MU_INTSTAT_DRBL) { /* Got a doorbell interrupt. Check for the function */ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); if (doorbell & function) break; } schedule_timeout_interruptible(msecs_to_jiffies(100)); if ((jiffies_to_msecs(jiffies) - starttime) > timeout) { /* * Iimeout. If we were requesting flash access, * indicate we are done so the firmware knows we gave * up. If this was a REQ, we also need to re-enable * chip interrupts. */ if (function == DRBL_FLASH_REQ) { esas2r_hdebug("flash access timeout"); esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FLASH_DONE); esas2r_enable_chip_interrupts(a); } else { esas2r_hdebug("flash release timeout"); } return false; } } /* if we're done, re-enable chip interrupts */ if (function == DRBL_FLASH_DONE) esas2r_enable_chip_interrupts(a); return true; } #define WINDOW_SIZE ((signed int)MW_DATA_WINDOW_SIZE) bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from, u32 size) { u8 *end = (u8 *)to; /* Try to acquire access to the flash */ if (!esas2r_flash_access(a, DRBL_FLASH_REQ)) return false; while (size) { u32 len; u32 offset; u32 iatvr; if (test_bit(AF2_SERIAL_FLASH, &a->flags2)) iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE); else iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE); esas2r_map_data_window(a, iatvr); offset = from & (WINDOW_SIZE - 1); len = size; if (len > WINDOW_SIZE - offset) len = WINDOW_SIZE - offset; from += len; size -= len; while (len--) { *end++ = esas2r_read_data_byte(a, offset); offset++; } } /* Release flash access */ esas2r_flash_access(a, DRBL_FLASH_DONE); return true; } bool esas2r_read_flash_rev(struct esas2r_adapter *a) { u8 bytes[256]; u16 *pw; u16 *pwstart; u16 type; u16 size; u32 sz; sz = sizeof(bytes); pw = (u16 *)(bytes + sz); pwstart = (u16 *)bytes + 2; if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_CPYR - sz, sz)) goto invalid_rev; while (pw >= pwstart) { pw--; type = le16_to_cpu(*pw); pw--; size = le16_to_cpu(*pw); pw -= size / 2; if (type == FBT_CPYR || type == FBT_SETUP || pw < pwstart) continue; if (type == FBT_FLASH_VER) a->flash_ver = le32_to_cpu(*(u32 *)pw); break; } invalid_rev: return esas2r_print_flash_rev(a); } bool esas2r_print_flash_rev(struct esas2r_adapter *a) { u16 year = LOWORD(a->flash_ver); u8 day = LOBYTE(HIWORD(a->flash_ver)); u8 month = HIBYTE(HIWORD(a->flash_ver)); if (day == 0 || month == 0 || day > 31 || month > 12 || year < 2006 || year > 9999) { strcpy(a->flash_rev, "not found"); a->flash_ver = 0; return false; } sprintf(a->flash_rev, "%02d/%02d/%04d", month, day, year); esas2r_hdebug("flash version: %s", a->flash_rev); return true; } /* * Find the type of boot image type that is currently in the flash. * The chip only has a 64 KB PCI-e expansion ROM * size so only one image can be flashed at a time. */ bool esas2r_read_image_type(struct esas2r_adapter *a) { u8 bytes[256]; struct esas2r_boot_image *bi; struct esas2r_boot_header *bh; u32 sz; u32 len; u32 offset; /* Start at the base of the boot images and look for a valid image */ sz = sizeof(bytes); len = FLS_LENGTH_BOOT; offset = 0; while (true) { if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_BOOT + offset, sz)) goto invalid_rev; bi = (struct esas2r_boot_image *)bytes; bh = (struct esas2r_boot_header *)((u8 *)bi + le16_to_cpu( bi->header_offset)); if (bi->signature != cpu_to_le16(0xAA55)) goto invalid_rev; if (bh->code_type == CODE_TYPE_PC) { strcpy(a->image_type, "BIOS"); return true; } else if (bh->code_type == CODE_TYPE_EFI) { struct esas2r_efi_image *ei; /* * So we have an EFI image. There are several types * so see which architecture we have. */ ei = (struct esas2r_efi_image *)bytes; switch (le16_to_cpu(ei->machine_type)) { case EFI_MACHINE_IA32: strcpy(a->image_type, "EFI 32-bit"); return true; case EFI_MACHINE_IA64: strcpy(a->image_type, "EFI itanium"); return true; case EFI_MACHINE_X64: strcpy(a->image_type, "EFI 64-bit"); return true; case EFI_MACHINE_EBC: strcpy(a->image_type, "EFI EBC"); return true; default: goto invalid_rev; } } else { u32 thislen; /* jump to the next image */ thislen = (u32)le16_to_cpu(bh->image_length) * 512; if (thislen == 0 || thislen + offset > len || bh->indicator == INDICATOR_LAST) break; offset += thislen; } } invalid_rev: strcpy(a->image_type, "no boot images"); return false; } /* * Read and validate current NVRAM parameters by accessing * physical NVRAM directly. if currently stored parameters are * invalid, use the defaults. */ bool esas2r_nvram_read_direct(struct esas2r_adapter *a) { bool result; if (down_interruptible(&a->nvram_semaphore)) return false; if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR, sizeof(struct esas2r_sas_nvram))) { esas2r_hdebug("NVRAM read failed, using defaults"); up(&a->nvram_semaphore); return false; } result = esas2r_nvram_validate(a); up(&a->nvram_semaphore); return result; } /* Interrupt callback to process NVRAM completions. */ static void esas2r_nvram_callback(struct esas2r_adapter *a, struct esas2r_request *rq) { struct atto_vda_flash_req *vrq = &rq->vrq->flash; if (rq->req_stat == RS_SUCCESS) { /* last request was successful. see what to do now. */ switch (vrq->sub_func) { case VDA_FLASH_BEGINW: vrq->sub_func = VDA_FLASH_WRITE; rq->req_stat = RS_PENDING; break; case VDA_FLASH_WRITE: vrq->sub_func = VDA_FLASH_COMMIT; rq->req_stat = RS_PENDING; break; case VDA_FLASH_READ: esas2r_nvram_validate(a); break; case VDA_FLASH_COMMIT: default: break; } } if (rq->req_stat != RS_PENDING) { /* update the NVRAM state */ if (rq->req_stat == RS_SUCCESS) set_bit(AF_NVR_VALID, &a->flags); else clear_bit(AF_NVR_VALID, &a->flags); esas2r_enable_heartbeat(a); up(&a->nvram_semaphore); } } /* * Write the contents of nvram to the adapter's physical NVRAM. * The cached copy of the NVRAM is also updated. */ bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sas_nvram *nvram) { struct esas2r_sas_nvram *n = nvram; u8 sas_address_bytes[8]; u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0]; struct atto_vda_flash_req *vrq = &rq->vrq->flash; if (test_bit(AF_DEGRADED_MODE, &a->flags)) return false; if (down_interruptible(&a->nvram_semaphore)) return false; if (n == NULL) n = a->nvram; /* check the validity of the settings */ if (n->version > SASNVR_VERSION) { up(&a->nvram_semaphore); return false; } memcpy(&sas_address_bytes[0], n->sas_addr, 8); if (sas_address_bytes[0] != 0x50 || sas_address_bytes[1] != 0x01 || sas_address_bytes[2] != 0x08 || (sas_address_bytes[3] & 0xF0) != 0x60 || ((sas_address_bytes[3] & 0x0F) | sas_address_dwords[1]) == 0) { up(&a->nvram_semaphore); return false; } if (n->spin_up_delay > SASNVR_SPINUP_MAX) n->spin_up_delay = SASNVR_SPINUP_MAX; n->version = SASNVR_VERSION; n->checksum = n->checksum - esas2r_nvramcalc_cksum(n); memcpy(a->nvram, n, sizeof(struct esas2r_sas_nvram)); /* write the NVRAM */ n = a->nvram; esas2r_disable_heartbeat(a); esas2r_build_flash_req(a, rq, VDA_FLASH_BEGINW, esas2r_nvramcalc_xor_cksum(n), FLS_OFFSET_NVR, sizeof(struct esas2r_sas_nvram)); if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { vrq->data.sge[0].length = cpu_to_le32(SGE_LAST | sizeof(struct esas2r_sas_nvram)); vrq->data.sge[0].address = cpu_to_le64( a->uncached_phys + (u64)((u8 *)n - a->uncached)); } else { vrq->data.prde[0].ctl_len = cpu_to_le32(sizeof(struct esas2r_sas_nvram)); vrq->data.prde[0].address = cpu_to_le64( a->uncached_phys + (u64)((u8 *)n - a->uncached)); } rq->interrupt_cb = esas2r_nvram_callback; esas2r_start_request(a, rq); return true; } /* Validate the cached NVRAM. if the NVRAM is invalid, load the defaults. */ bool esas2r_nvram_validate(struct esas2r_adapter *a) { struct esas2r_sas_nvram *n = a->nvram; bool rslt = false; if (n->signature[0] != 'E' || n->signature[1] != 'S' || n->signature[2] != 'A' || n->signature[3] != 'S') { esas2r_hdebug("invalid NVRAM signature"); } else if (esas2r_nvramcalc_cksum(n)) { esas2r_hdebug("invalid NVRAM checksum"); } else if (n->version > SASNVR_VERSION) { esas2r_hdebug("invalid NVRAM version"); } else { set_bit(AF_NVR_VALID, &a->flags); rslt = true; } if (rslt == false) { esas2r_hdebug("using defaults"); esas2r_nvram_set_defaults(a); } return rslt; } /* * Set the cached NVRAM to defaults. note that this function sets the default * NVRAM when it has been determined that the physical NVRAM is invalid. * In this case, the SAS address is fabricated. */ void esas2r_nvram_set_defaults(struct esas2r_adapter *a) { struct esas2r_sas_nvram *n = a->nvram; u32 time = jiffies_to_msecs(jiffies); clear_bit(AF_NVR_VALID, &a->flags); *n = default_sas_nvram; n->sas_addr[3] |= 0x0F; n->sas_addr[4] = HIBYTE(LOWORD(time)); n->sas_addr[5] = LOBYTE(LOWORD(time)); n->sas_addr[6] = a->pcid->bus->number; n->sas_addr[7] = a->pcid->devfn; } void esas2r_nvram_get_defaults(struct esas2r_adapter *a, struct esas2r_sas_nvram *nvram) { u8 sas_addr[8]; /* * in case we are copying the defaults into the adapter, copy the SAS * address out first. */ memcpy(&sas_addr[0], a->nvram->sas_addr, 8); *nvram = default_sas_nvram; memcpy(&nvram->sas_addr[0], &sas_addr[0], 8); } bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, struct esas2r_request *rq, struct esas2r_sg_context *sgc) { struct esas2r_flash_context *fc = &a->flash_context; u8 j; struct esas2r_component_header *ch; if (test_and_set_bit(AF_FLASH_LOCK, &a->flags)) { /* flag was already set */ fi->status = FI_STAT_BUSY; return false; } memcpy(&fc->sgc, sgc, sizeof(struct esas2r_sg_context)); sgc = &fc->sgc; fc->fi = fi; fc->sgc_offset = sgc->cur_offset; rq->req_stat = RS_SUCCESS; rq->interrupt_cx = fc; switch (fi->fi_version) { case FI_VERSION_1: fc->scratch = ((struct esas2r_flash_img *)fi)->scratch_buf; fc->num_comps = FI_NUM_COMPS_V1; fc->fi_hdr_len = sizeof(struct esas2r_flash_img); break; default: return complete_fmapi_req(a, rq, FI_STAT_IMG_VER); } if (test_bit(AF_DEGRADED_MODE, &a->flags)) return complete_fmapi_req(a, rq, FI_STAT_DEGRADED); switch (fi->action) { case FI_ACT_DOWN: /* Download the components */ /* Verify the format of the flash image */ if (!verify_fi(a, fc)) return complete_fmapi_req(a, rq, fi->status); /* Adjust the BIOS fields that are dependent on the HBA */ ch = &fi->cmp_hdr[CH_IT_BIOS]; if (ch->length) fix_bios(a, fi); /* Adjust the EFI fields that are dependent on the HBA */ ch = &fi->cmp_hdr[CH_IT_EFI]; if (ch->length) fix_efi(a, fi); /* * Since the image was just modified, compute the checksum on * the modified image. First update the CRC for the composite * expansion ROM image. */ fi->checksum = calc_fi_checksum(fc); /* Disable the heartbeat */ esas2r_disable_heartbeat(a); /* Now start up the download sequence */ fc->task = FMTSK_ERASE_BOOT; fc->func = VDA_FLASH_BEGINW; fc->comp_typ = CH_IT_CFG; fc->flsh_addr = FLS_OFFSET_BOOT; fc->sgc.length = FLS_LENGTH_BOOT; fc->sgc.cur_offset = NULL; /* Setup the callback address */ fc->interrupt_cb = fw_download_proc; break; case FI_ACT_UPSZ: /* Get upload sizes */ fi->adap_typ = get_fi_adap_type(a); fi->flags = 0; fi->num_comps = fc->num_comps; fi->length = fc->fi_hdr_len; /* Report the type of boot image in the rel_version string */ memcpy(fi->rel_version, a->image_type, sizeof(fi->rel_version)); /* Build the component headers */ for (j = 0, ch = fi->cmp_hdr; j < fi->num_comps; j++, ch++) { ch->img_type = j; ch->status = CH_STAT_PENDING; ch->length = 0; ch->version = 0xffffffff; ch->image_offset = 0; ch->pad[0] = 0; ch->pad[1] = 0; } if (a->flash_ver != 0) { fi->cmp_hdr[CH_IT_BIOS].version = fi->cmp_hdr[CH_IT_MAC].version = fi->cmp_hdr[CH_IT_EFI].version = fi->cmp_hdr[CH_IT_CFG].version = a->flash_ver; fi->cmp_hdr[CH_IT_BIOS].status = fi->cmp_hdr[CH_IT_MAC].status = fi->cmp_hdr[CH_IT_EFI].status = fi->cmp_hdr[CH_IT_CFG].status = CH_STAT_SUCCESS; return complete_fmapi_req(a, rq, FI_STAT_SUCCESS); } fallthrough; case FI_ACT_UP: /* Upload the components */ default: return complete_fmapi_req(a, rq, FI_STAT_INVALID); } /* * If we make it here, fc has been setup to do the first task. Call * load_image to format the request, start it, and get out. The * interrupt code will call the callback when the first message is * complete. */ if (!load_image(a, rq)) return complete_fmapi_req(a, rq, FI_STAT_FAILED); esas2r_start_request(a, rq); return true; }
linux-master
drivers/scsi/esas2r/esas2r_flash.c
/* * linux/drivers/scsi/esas2r/esas2r_targdb.c * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers * * Copyright (c) 2001-2013 ATTO Technology, Inc. * (mailto:[email protected]) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include "esas2r.h" void esas2r_targ_db_initialize(struct esas2r_adapter *a) { struct esas2r_target *t; for (t = a->targetdb; t < a->targetdb_end; t++) { memset(t, 0, sizeof(struct esas2r_target)); t->target_state = TS_NOT_PRESENT; t->buffered_target_state = TS_NOT_PRESENT; t->new_target_state = TS_INVALID; } } void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify) { struct esas2r_target *t; unsigned long flags; for (t = a->targetdb; t < a->targetdb_end; t++) { if (t->target_state != TS_PRESENT) continue; spin_lock_irqsave(&a->mem_lock, flags); esas2r_targ_db_remove(a, t); spin_unlock_irqrestore(&a->mem_lock, flags); if (notify) { esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a)); esas2r_target_state_changed(a, esas2r_targ_get_id(t, a), TS_NOT_PRESENT); } } } void esas2r_targ_db_report_changes(struct esas2r_adapter *a) { struct esas2r_target *t; unsigned long flags; esas2r_trace_enter(); if (test_bit(AF_DISC_PENDING, &a->flags)) { esas2r_trace_exit(); return; } for (t = a->targetdb; t < a->targetdb_end; t++) { u8 state = TS_INVALID; spin_lock_irqsave(&a->mem_lock, flags); if (t->buffered_target_state != t->target_state) state = t->buffered_target_state = t->target_state; spin_unlock_irqrestore(&a->mem_lock, flags); if (state != TS_INVALID) { esas2r_trace("targ_db_report_changes:%d", esas2r_targ_get_id( t, a)); esas2r_trace("state:%d", state); esas2r_target_state_changed(a, esas2r_targ_get_id(t, a), state); } } esas2r_trace_exit(); } struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a, struct esas2r_disc_context * dc) { struct esas2r_target *t; esas2r_trace_enter(); if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) { esas2r_bugon(); esas2r_trace_exit(); return NULL; } t = a->targetdb + dc->curr_virt_id; if (t->target_state == TS_PRESENT) { esas2r_trace_exit(); return NULL; } esas2r_hdebug("add RAID %s, T:%d", dc->raid_grp_name, esas2r_targ_get_id( t, a)); if (dc->interleave == 0 || dc->block_size == 0) { /* these are invalid values, don't create the target entry. */ esas2r_hdebug("invalid RAID group dimensions"); esas2r_trace_exit(); return NULL; } t->block_size = dc->block_size; t->inter_byte = dc->interleave; t->inter_block = dc->interleave / dc->block_size; t->virt_targ_id = dc->curr_virt_id; t->phys_targ_id = ESAS2R_TARG_ID_INV; t->flags &= ~TF_PASS_THRU; t->flags |= TF_USED; t->identifier_len = 0; t->target_state = TS_PRESENT; return t; } struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a, struct esas2r_disc_context *dc, u8 *ident, u8 ident_len) { struct esas2r_target *t; esas2r_trace_enter(); if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) { esas2r_bugon(); esas2r_trace_exit(); return NULL; } /* see if we found this device before. */ t = esas2r_targ_db_find_by_ident(a, ident, ident_len); if (t == NULL) { t = a->targetdb + dc->curr_virt_id; if (ident_len > sizeof(t->identifier) || t->target_state == TS_PRESENT) { esas2r_trace_exit(); return NULL; } } esas2r_hdebug("add PT; T:%d, V:%d, P:%d", esas2r_targ_get_id(t, a), dc->curr_virt_id, dc->curr_phys_id); t->block_size = 0; t->inter_byte = 0; t->inter_block = 0; t->virt_targ_id = dc->curr_virt_id; t->phys_targ_id = dc->curr_phys_id; t->identifier_len = ident_len; memcpy(t->identifier, ident, ident_len); t->flags |= TF_PASS_THRU | TF_USED; t->target_state = TS_PRESENT; return t; } void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t) { esas2r_trace_enter(); t->target_state = TS_NOT_PRESENT; esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a)); esas2r_trace_exit(); } struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a, u64 *sas_addr) { struct esas2r_target *t; for (t = a->targetdb; t < a->targetdb_end; t++) if (t->sas_addr == *sas_addr) return t; return NULL; } struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a, void *identifier, u8 ident_len) { struct esas2r_target *t; for (t = a->targetdb; t < a->targetdb_end; t++) { if (ident_len == t->identifier_len && memcmp(&t->identifier[0], identifier, ident_len) == 0) return t; } return NULL; } u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id) { u16 id = target_id + 1; while (id < ESAS2R_MAX_TARGETS) { struct esas2r_target *t = a->targetdb + id; if (t->target_state == TS_PRESENT) break; id++; } return id; } struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a, u16 virt_id) { struct esas2r_target *t; for (t = a->targetdb; t < a->targetdb_end; t++) { if (t->target_state != TS_PRESENT) continue; if (t->virt_targ_id == virt_id) return t; } return NULL; } u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a) { u16 devcnt = 0; struct esas2r_target *t; unsigned long flags; spin_lock_irqsave(&a->mem_lock, flags); for (t = a->targetdb; t < a->targetdb_end; t++) if (t->target_state == TS_PRESENT) devcnt++; spin_unlock_irqrestore(&a->mem_lock, flags); return devcnt; }
linux-master
drivers/scsi/esas2r/esas2r_targdb.c
/* * linux/drivers/scsi/esas2r/esas2r_log.c * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers * * Copyright (c) 2001-2013 ATTO Technology, Inc. * (mailto:[email protected]) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include "esas2r.h" /* * this module within the driver is tasked with providing logging functionality. * the event_log_level module parameter controls the level of messages that are * written to the system log. the default level of messages that are written * are critical and warning messages. if other types of messages are desired, * one simply needs to load the module with the correct value for the * event_log_level module parameter. for example: * * insmod <module> event_log_level=1 * * will load the module and only critical events will be written by this module * to the system log. if critical, warning, and information-level messages are * desired, the correct value for the event_log_level module parameter * would be as follows: * * insmod <module> event_log_level=3 */ #define EVENT_LOG_BUFF_SIZE 1024 static long event_log_level = ESAS2R_LOG_DFLT; module_param(event_log_level, long, S_IRUGO | S_IRUSR); MODULE_PARM_DESC(event_log_level, "Specifies the level of events to report to the system log. Critical and warning level events are logged by default."); /* A shared buffer to use for formatting messages. */ static char event_buffer[EVENT_LOG_BUFF_SIZE]; /* A lock to protect the shared buffer used for formatting messages. */ static DEFINE_SPINLOCK(event_buffer_lock); /* * translates an esas2r-defined logging event level to a kernel logging level. * * @param [in] level the esas2r-defined logging event level to translate * * @return the corresponding kernel logging level. */ static const char *translate_esas2r_event_level_to_kernel(const long level) { switch (level) { case ESAS2R_LOG_CRIT: return KERN_CRIT; case ESAS2R_LOG_WARN: return KERN_WARNING; case ESAS2R_LOG_INFO: return KERN_INFO; case ESAS2R_LOG_DEBG: case ESAS2R_LOG_TRCE: default: return KERN_DEBUG; } } #pragma GCC diagnostic push #ifndef __clang__ #pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif /* * the master logging function. this function will format the message as * outlined by the formatting string, the input device information and the * substitution arguments and output the resulting string to the system log. * * @param [in] level the event log level of the message * @param [in] dev the device information * @param [in] format the formatting string for the message * @param [in] args the substition arguments to the formatting string * * @return 0 on success, or -1 if an error occurred. */ static int esas2r_log_master(const long level, const struct device *dev, const char *format, va_list args) { if (level <= event_log_level) { unsigned long flags = 0; int retval = 0; char *buffer = event_buffer; size_t buflen = EVENT_LOG_BUFF_SIZE; const char *fmt_nodev = "%s%s: "; const char *fmt_dev = "%s%s [%s, %s, %s]"; const char *slevel = translate_esas2r_event_level_to_kernel(level); spin_lock_irqsave(&event_buffer_lock, flags); memset(buffer, 0, buflen); /* * format the level onto the beginning of the string and do * some pointer arithmetic to move the pointer to the point * where the actual message can be inserted. */ if (dev == NULL) { snprintf(buffer, buflen, fmt_nodev, slevel, ESAS2R_DRVR_NAME); } else { snprintf(buffer, buflen, fmt_dev, slevel, ESAS2R_DRVR_NAME, (dev->driver ? dev->driver->name : "unknown"), (dev->bus ? dev->bus->name : "unknown"), dev_name(dev)); } buffer += strlen(event_buffer); buflen -= strlen(event_buffer); retval = vsnprintf(buffer, buflen, format, args); if (retval < 0) { spin_unlock_irqrestore(&event_buffer_lock, flags); return -1; } /* * Put a line break at the end of the formatted string so that * we don't wind up with run-on messages. */ printk("%s\n", event_buffer); spin_unlock_irqrestore(&event_buffer_lock, flags); } return 0; } #pragma GCC diagnostic pop /* * formats and logs a message to the system log. * * @param [in] level the event level of the message * @param [in] format the formating string for the message * @param [in] ... the substitution arguments to the formatting string * * @return 0 on success, or -1 if an error occurred. */ int esas2r_log(const long level, const char *format, ...) { int retval = 0; va_list args; va_start(args, format); retval = esas2r_log_master(level, NULL, format, args); va_end(args); return retval; } /* * formats and logs a message to the system log. this message will include * device information. * * @param [in] level the event level of the message * @param [in] dev the device information * @param [in] format the formatting string for the message * @param [in] ... the substitution arguments to the formatting string * * @return 0 on success, or -1 if an error occurred. */ int esas2r_log_dev(const long level, const struct device *dev, const char *format, ...) { int retval = 0; va_list args; va_start(args, format); retval = esas2r_log_master(level, dev, format, args); va_end(args); return retval; } /* * formats and logs a message to the system log. this message will include * device information. * * @param [in] level the event level of the message * @param [in] buf * @param [in] len * * @return 0 on success, or -1 if an error occurred. */ int esas2r_log_hexdump(const long level, const void *buf, size_t len) { if (level <= event_log_level) { print_hex_dump(translate_esas2r_event_level_to_kernel(level), "", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true); } return 1; }
linux-master
drivers/scsi/esas2r/esas2r_log.c
/* * linux/drivers/scsi/esas2r/esas2r_main.c * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers * * Copyright (c) 2001-2013 ATTO Technology, Inc. * (mailto:[email protected]) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include "esas2r.h" MODULE_DESCRIPTION(ESAS2R_DRVR_NAME ": " ESAS2R_LONGNAME " driver"); MODULE_AUTHOR("ATTO Technology, Inc."); MODULE_LICENSE("GPL"); MODULE_VERSION(ESAS2R_VERSION_STR); /* global definitions */ static int found_adapters; struct esas2r_adapter *esas2r_adapters[MAX_ADAPTERS]; #define ESAS2R_VDA_EVENT_PORT1 54414 #define ESAS2R_VDA_EVENT_PORT2 54415 #define ESAS2R_VDA_EVENT_SOCK_COUNT 2 static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj) { struct device *dev = container_of(kobj, struct device, kobj); struct Scsi_Host *host = class_to_shost(dev); return (struct esas2r_adapter *)host->hostdata; } static ssize_t read_fw(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); return esas2r_read_fw(a, buf, off, count); } static ssize_t write_fw(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); return esas2r_write_fw(a, buf, off, count); } static ssize_t read_fs(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); return esas2r_read_fs(a, buf, off, count); } static ssize_t write_fs(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); int length = min(sizeof(struct esas2r_ioctl_fs), count); int result = 0; result = esas2r_write_fs(a, buf, off, count); if (result < 0) result = 0; return length; } static ssize_t read_vda(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); return esas2r_read_vda(a, buf, off, count); } static ssize_t write_vda(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); return esas2r_write_vda(a, buf, off, count); } static ssize_t read_live_nvram(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); int length = min_t(size_t, sizeof(struct esas2r_sas_nvram), PAGE_SIZE); memcpy(buf, a->nvram, length); return length; } static ssize_t write_live_nvram(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); struct esas2r_request *rq; int result = -EFAULT; rq = esas2r_alloc_request(a); if (rq == NULL) return -ENOMEM; if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf)) result = count; esas2r_free_request(a, rq); return result; } static ssize_t read_default_nvram(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); esas2r_nvram_get_defaults(a, (struct esas2r_sas_nvram *)buf); return sizeof(struct esas2r_sas_nvram); } static ssize_t read_hw(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); int length = min_t(size_t, sizeof(struct atto_ioctl), PAGE_SIZE); if (!a->local_atto_ioctl) return -ENOMEM; if (handle_hba_ioctl(a, a->local_atto_ioctl) != IOCTL_SUCCESS) return -ENOMEM; memcpy(buf, a->local_atto_ioctl, length); return length; } static ssize_t write_hw(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); int length = min(sizeof(struct atto_ioctl), count); if (!a->local_atto_ioctl) { a->local_atto_ioctl = kmalloc(sizeof(struct atto_ioctl), GFP_KERNEL); if (a->local_atto_ioctl == NULL) { esas2r_log(ESAS2R_LOG_WARN, "write_hw kzalloc failed for %zu bytes", sizeof(struct atto_ioctl)); return -ENOMEM; } } memset(a->local_atto_ioctl, 0, sizeof(struct atto_ioctl)); memcpy(a->local_atto_ioctl, buf, length); return length; } #define ESAS2R_RW_BIN_ATTR(_name) \ struct bin_attribute bin_attr_ ## _name = { \ .attr = \ { .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \ .size = 0, \ .read = read_ ## _name, \ .write = write_ ## _name } ESAS2R_RW_BIN_ATTR(fw); ESAS2R_RW_BIN_ATTR(fs); ESAS2R_RW_BIN_ATTR(vda); ESAS2R_RW_BIN_ATTR(hw); ESAS2R_RW_BIN_ATTR(live_nvram); struct bin_attribute bin_attr_default_nvram = { .attr = { .name = "default_nvram", .mode = S_IRUGO }, .size = 0, .read = read_default_nvram, .write = NULL }; static const struct scsi_host_template driver_template = { .module = THIS_MODULE, .show_info = esas2r_show_info, .name = ESAS2R_LONGNAME, .info = esas2r_info, .ioctl = esas2r_ioctl, .queuecommand = esas2r_queuecommand, .eh_abort_handler = esas2r_eh_abort, .eh_device_reset_handler = esas2r_device_reset, .eh_bus_reset_handler = esas2r_bus_reset, .eh_host_reset_handler = esas2r_host_reset, .eh_target_reset_handler = esas2r_target_reset, .can_queue = 128, .this_id = -1, .sg_tablesize = SG_CHUNK_SIZE, .cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN, .proc_name = ESAS2R_DRVR_NAME, .change_queue_depth = scsi_change_queue_depth, .max_sectors = 0xFFFF, }; int sgl_page_size = 512; module_param(sgl_page_size, int, 0); MODULE_PARM_DESC(sgl_page_size, "Scatter/gather list (SGL) page size in number of S/G " "entries. If your application is doing a lot of very large " "transfers, you may want to increase the SGL page size. " "Default 512."); int num_sg_lists = 1024; module_param(num_sg_lists, int, 0); MODULE_PARM_DESC(num_sg_lists, "Number of scatter/gather lists. Default 1024."); int sg_tablesize = SG_CHUNK_SIZE; module_param(sg_tablesize, int, 0); MODULE_PARM_DESC(sg_tablesize, "Maximum number of entries in a scatter/gather table."); int num_requests = 256; module_param(num_requests, int, 0); MODULE_PARM_DESC(num_requests, "Number of requests. Default 256."); int num_ae_requests = 4; module_param(num_ae_requests, int, 0); MODULE_PARM_DESC(num_ae_requests, "Number of VDA asynchronous event requests. Default 4."); int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN; module_param(cmd_per_lun, int, 0); MODULE_PARM_DESC(cmd_per_lun, "Maximum number of commands per LUN. Default " DEFINED_NUM_TO_STR(ESAS2R_DEFAULT_CMD_PER_LUN) "."); int can_queue = 128; module_param(can_queue, int, 0); MODULE_PARM_DESC(can_queue, "Maximum number of commands per adapter. Default 128."); int esas2r_max_sectors = 0xFFFF; module_param(esas2r_max_sectors, int, 0); MODULE_PARM_DESC(esas2r_max_sectors, "Maximum number of disk sectors in a single data transfer. " "Default 65535 (largest possible setting)."); int interrupt_mode = 1; module_param(interrupt_mode, int, 0); MODULE_PARM_DESC(interrupt_mode, "Defines the interrupt mode to use. 0 for legacy" ", 1 for MSI. Default is MSI (1)."); static const struct pci_device_id esas2r_pci_table[] = { { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x0049, 0, 0, 0 }, { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004A, 0, 0, 0 }, { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004B, 0, 0, 0 }, { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004C, 0, 0, 0 }, { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004D, 0, 0, 0 }, { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004E, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, esas2r_pci_table); static int esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id); static void esas2r_remove(struct pci_dev *pcid); static struct pci_driver esas2r_pci_driver = { .name = ESAS2R_DRVR_NAME, .id_table = esas2r_pci_table, .probe = esas2r_probe, .remove = esas2r_remove, .driver.pm = &esas2r_pm_ops, }; static int esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id) { struct Scsi_Host *host = NULL; struct esas2r_adapter *a; int err; size_t host_alloc_size = sizeof(struct esas2r_adapter) + ((num_requests) + 1) * sizeof(struct esas2r_request); esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev), "esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x", pcid->vendor, pcid->device, pcid->subsystem_vendor, pcid->subsystem_device); esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), "before pci_enable_device() " "enable_cnt: %d", pcid->enable_cnt.counter); err = pci_enable_device(pcid); if (err != 0) { esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev), "pci_enable_device() FAIL (%d)", err); return -ENODEV; } esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), "pci_enable_device() OK"); esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), "after pci_enable_device() enable_cnt: %d", pcid->enable_cnt.counter); host = scsi_host_alloc(&driver_template, host_alloc_size); if (host == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL"); return -ENODEV; } memset(host->hostdata, 0, host_alloc_size); a = (struct esas2r_adapter *)host->hostdata; esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host); /* override max LUN and max target id */ host->max_id = ESAS2R_MAX_ID + 1; host->max_lun = 255; /* we can handle 16-byte CDbs */ host->max_cmd_len = 16; host->can_queue = can_queue; host->cmd_per_lun = cmd_per_lun; host->this_id = host->max_id + 1; host->max_channel = 0; host->unique_id = found_adapters; host->sg_tablesize = sg_tablesize; host->max_sectors = esas2r_max_sectors; /* set to bus master for BIOses that don't do it for us */ esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called"); pci_set_master(pcid); if (!esas2r_init_adapter(host, pcid, found_adapters)) { esas2r_log(ESAS2R_LOG_CRIT, "unable to initialize device at PCI bus %x:%x", pcid->bus->number, pcid->devfn); esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), "scsi_host_put() called"); scsi_host_put(host); return 0; } esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid, host->hostdata); pci_set_drvdata(pcid, host); esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called"); err = scsi_add_host(host, &pcid->dev); if (err) { esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err); esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev), "scsi_add_host() FAIL"); esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), "scsi_host_put() called"); scsi_host_put(host); esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), "pci_set_drvdata(%p, NULL) called", pcid); pci_set_drvdata(pcid, NULL); return -ENODEV; } esas2r_fw_event_on(a); esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), "scsi_scan_host() called"); scsi_scan_host(host); /* Add sysfs binary files */ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw)) esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), "Failed to create sysfs binary file: fw"); else a->sysfs_fw_created = 1; if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs)) esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), "Failed to create sysfs binary file: fs"); else a->sysfs_fs_created = 1; if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda)) esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), "Failed to create sysfs binary file: vda"); else a->sysfs_vda_created = 1; if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw)) esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), "Failed to create sysfs binary file: hw"); else a->sysfs_hw_created = 1; if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram)) esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), "Failed to create sysfs binary file: live_nvram"); else a->sysfs_live_nvram_created = 1; if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_default_nvram)) esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), "Failed to create sysfs binary file: default_nvram"); else a->sysfs_default_nvram_created = 1; found_adapters++; return 0; } static void esas2r_remove(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_remove(%p) called; " "host:%p", pdev, host); esas2r_kill_adapter(a->index); found_adapters--; } static int __init esas2r_init(void) { int i; esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__); /* verify valid parameters */ if (can_queue < 1) { esas2r_log(ESAS2R_LOG_WARN, "warning: can_queue must be at least 1, value " "forced."); can_queue = 1; } else if (can_queue > 2048) { esas2r_log(ESAS2R_LOG_WARN, "warning: can_queue must be no larger than 2048, " "value forced."); can_queue = 2048; } if (cmd_per_lun < 1) { esas2r_log(ESAS2R_LOG_WARN, "warning: cmd_per_lun must be at least 1, value " "forced."); cmd_per_lun = 1; } else if (cmd_per_lun > 2048) { esas2r_log(ESAS2R_LOG_WARN, "warning: cmd_per_lun must be no larger than " "2048, value forced."); cmd_per_lun = 2048; } if (sg_tablesize < 32) { esas2r_log(ESAS2R_LOG_WARN, "warning: sg_tablesize must be at least 32, " "value forced."); sg_tablesize = 32; } if (esas2r_max_sectors < 1) { esas2r_log(ESAS2R_LOG_WARN, "warning: esas2r_max_sectors must be at least " "1, value forced."); esas2r_max_sectors = 1; } else if (esas2r_max_sectors > 0xffff) { esas2r_log(ESAS2R_LOG_WARN, "warning: esas2r_max_sectors must be no larger " "than 0xffff, value forced."); esas2r_max_sectors = 0xffff; } sgl_page_size &= ~(ESAS2R_SGL_ALIGN - 1); if (sgl_page_size < SGL_PG_SZ_MIN) sgl_page_size = SGL_PG_SZ_MIN; else if (sgl_page_size > SGL_PG_SZ_MAX) sgl_page_size = SGL_PG_SZ_MAX; if (num_sg_lists < NUM_SGL_MIN) num_sg_lists = NUM_SGL_MIN; else if (num_sg_lists > NUM_SGL_MAX) num_sg_lists = NUM_SGL_MAX; if (num_requests < NUM_REQ_MIN) num_requests = NUM_REQ_MIN; else if (num_requests > NUM_REQ_MAX) num_requests = NUM_REQ_MAX; if (num_ae_requests < NUM_AE_MIN) num_ae_requests = NUM_AE_MIN; else if (num_ae_requests > NUM_AE_MAX) num_ae_requests = NUM_AE_MAX; /* set up other globals */ for (i = 0; i < MAX_ADAPTERS; i++) esas2r_adapters[i] = NULL; return pci_register_driver(&esas2r_pci_driver); } /* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */ static const struct file_operations esas2r_proc_fops = { .compat_ioctl = compat_ptr_ioctl, .unlocked_ioctl = esas2r_proc_ioctl, }; static const struct proc_ops esas2r_proc_ops = { .proc_lseek = default_llseek, .proc_ioctl = esas2r_proc_ioctl, #ifdef CONFIG_COMPAT .proc_compat_ioctl = compat_ptr_ioctl, #endif }; static struct Scsi_Host *esas2r_proc_host; static int esas2r_proc_major; long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { return esas2r_ioctl_handler(esas2r_proc_host->hostdata, cmd, (void __user *)arg); } static void __exit esas2r_exit(void) { esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__); if (esas2r_proc_major > 0) { struct proc_dir_entry *proc_dir; esas2r_log(ESAS2R_LOG_INFO, "unregister proc"); proc_dir = scsi_template_proc_dir(esas2r_proc_host->hostt); if (proc_dir) remove_proc_entry(ATTONODE_NAME, proc_dir); unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME); esas2r_proc_major = 0; } esas2r_log(ESAS2R_LOG_INFO, "pci_unregister_driver() called"); pci_unregister_driver(&esas2r_pci_driver); } int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh) { struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata; struct esas2r_target *t; int dev_count = 0; esas2r_log(ESAS2R_LOG_DEBG, "esas2r_show_info (%p,%d)", m, sh->host_no); seq_printf(m, ESAS2R_LONGNAME "\n" "Driver version: "ESAS2R_VERSION_STR "\n" "Flash version: %s\n" "Firmware version: %s\n" "Copyright "ESAS2R_COPYRIGHT_YEARS "\n" "http://www.attotech.com\n" "\n", a->flash_rev, a->fw_rev[0] ? a->fw_rev : "(none)"); seq_printf(m, "Adapter information:\n" "--------------------\n" "Model: %s\n" "SAS address: %02X%02X%02X%02X:%02X%02X%02X%02X\n", esas2r_get_model_name(a), a->nvram->sas_addr[0], a->nvram->sas_addr[1], a->nvram->sas_addr[2], a->nvram->sas_addr[3], a->nvram->sas_addr[4], a->nvram->sas_addr[5], a->nvram->sas_addr[6], a->nvram->sas_addr[7]); seq_puts(m, "\n" "Discovered devices:\n" "\n" " # Target ID\n" "---------------\n"); for (t = a->targetdb; t < a->targetdb_end; t++) if (t->buffered_target_state == TS_PRESENT) { seq_printf(m, " %3d %3d\n", ++dev_count, (u16)(uintptr_t)(t - a->targetdb)); } if (dev_count == 0) seq_puts(m, "none\n"); seq_putc(m, '\n'); return 0; } const char *esas2r_info(struct Scsi_Host *sh) { struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata; static char esas2r_info_str[512]; esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev), "esas2r_info() called"); /* * if we haven't done so already, register as a char driver * and stick a node under "/proc/scsi/esas2r/ATTOnode" */ if (esas2r_proc_major <= 0) { esas2r_proc_host = sh; esas2r_proc_major = register_chrdev(0, ESAS2R_DRVR_NAME, &esas2r_proc_fops); esas2r_log_dev(ESAS2R_LOG_DEBG, &(sh->shost_gendev), "register_chrdev (major %d)", esas2r_proc_major); if (esas2r_proc_major > 0) { struct proc_dir_entry *proc_dir; struct proc_dir_entry *pde = NULL; proc_dir = scsi_template_proc_dir(sh->hostt); if (proc_dir) pde = proc_create(ATTONODE_NAME, 0, proc_dir, &esas2r_proc_ops); if (!pde) { esas2r_log_dev(ESAS2R_LOG_WARN, &(sh->shost_gendev), "failed to create_proc_entry"); esas2r_proc_major = -1; } } } sprintf(esas2r_info_str, ESAS2R_LONGNAME " (bus 0x%02X, device 0x%02X, IRQ 0x%02X)" " driver version: "ESAS2R_VERSION_STR " firmware version: " "%s\n", a->pcid->bus->number, a->pcid->devfn, a->pcid->irq, a->fw_rev[0] ? a->fw_rev : "(none)"); return esas2r_info_str; } /* Callback for building a request scatter/gather list */ static u32 get_physaddr_from_sgc(struct esas2r_sg_context *sgc, u64 *addr) { u32 len; if (likely(sgc->cur_offset == sgc->exp_offset)) { /* * the normal case: caller used all bytes from previous call, so * expected offset is the same as the current offset. */ if (sgc->sgel_count < sgc->num_sgel) { /* retrieve next segment, except for first time */ if (sgc->exp_offset > (u8 *)0) { /* advance current segment */ sgc->cur_sgel = sg_next(sgc->cur_sgel); ++(sgc->sgel_count); } len = sg_dma_len(sgc->cur_sgel); (*addr) = sg_dma_address(sgc->cur_sgel); /* save the total # bytes returned to caller so far */ sgc->exp_offset += len; } else { len = 0; } } else if (sgc->cur_offset < sgc->exp_offset) { /* * caller did not use all bytes from previous call. need to * compute the address based on current segment. */ len = sg_dma_len(sgc->cur_sgel); (*addr) = sg_dma_address(sgc->cur_sgel); sgc->exp_offset -= len; /* calculate PA based on prev segment address and offsets */ *addr = *addr + (sgc->cur_offset - sgc->exp_offset); sgc->exp_offset += len; /* re-calculate length based on offset */ len = lower_32_bits( sgc->exp_offset - sgc->cur_offset); } else { /* if ( sgc->cur_offset > sgc->exp_offset ) */ /* * we don't expect the caller to skip ahead. * cur_offset will never exceed the len we return */ len = 0; } return len; } int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) { struct esas2r_adapter *a = (struct esas2r_adapter *)cmd->device->host->hostdata; struct esas2r_request *rq; struct esas2r_sg_context sgc; unsigned bufflen; /* Assume success, if it fails we will fix the result later. */ cmd->result = DID_OK << 16; if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) { cmd->result = DID_NO_CONNECT << 16; scsi_done(cmd); return 0; } rq = esas2r_alloc_request(a); if (unlikely(rq == NULL)) { esas2r_debug("esas2r_alloc_request failed"); return SCSI_MLQUEUE_HOST_BUSY; } rq->cmd = cmd; bufflen = scsi_bufflen(cmd); if (likely(bufflen != 0)) { if (cmd->sc_data_direction == DMA_TO_DEVICE) rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); else if (cmd->sc_data_direction == DMA_FROM_DEVICE) rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD); } memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len); rq->vrq->scsi.length = cpu_to_le32(bufflen); rq->target_id = cmd->device->id; rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun); rq->sense_buf = cmd->sense_buffer; rq->sense_len = SCSI_SENSE_BUFFERSIZE; esas2r_sgc_init(&sgc, a, rq, NULL); sgc.length = bufflen; sgc.cur_offset = NULL; sgc.cur_sgel = scsi_sglist(cmd); sgc.exp_offset = NULL; sgc.num_sgel = scsi_dma_map(cmd); sgc.sgel_count = 0; if (unlikely(sgc.num_sgel < 0)) { esas2r_free_request(a, rq); return SCSI_MLQUEUE_HOST_BUSY; } sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc; if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) { scsi_dma_unmap(cmd); esas2r_free_request(a, rq); return SCSI_MLQUEUE_HOST_BUSY; } esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id, (int)cmd->device->lun); esas2r_start_request(a, rq); return 0; } static void complete_task_management_request(struct esas2r_adapter *a, struct esas2r_request *rq) { (*rq->task_management_status_ptr) = rq->req_stat; esas2r_free_request(a, rq); } /* * Searches the specified queue for the specified queue for the command * to abort. * * Return 0 on failure, 1 if command was not found, 2 if command was found */ static int esas2r_check_active_queue(struct esas2r_adapter *a, struct esas2r_request **abort_request, struct scsi_cmnd *cmd, struct list_head *queue) { bool found = false; struct esas2r_request *ar = *abort_request; struct esas2r_request *rq; struct list_head *element, *next; list_for_each_safe(element, next, queue) { rq = list_entry(element, struct esas2r_request, req_list); if (rq->cmd == cmd) { /* Found the request. See what to do with it. */ if (queue == &a->active_list) { /* * We are searching the active queue, which * means that we need to send an abort request * to the firmware. */ ar = esas2r_alloc_request(a); if (ar == NULL) { esas2r_log_dev(ESAS2R_LOG_WARN, &(a->host->shost_gendev), "unable to allocate an abort request for cmd %p", cmd); return 0; /* Failure */ } /* * Task management request must be formatted * with a lock held. */ ar->sense_len = 0; ar->vrq->scsi.length = 0; ar->target_id = rq->target_id; ar->vrq->scsi.flags |= cpu_to_le32( (u8)le32_to_cpu(rq->vrq->scsi.flags)); memset(ar->vrq->scsi.cdb, 0, sizeof(ar->vrq->scsi.cdb)); ar->vrq->scsi.flags |= cpu_to_le32( FCP_CMND_TRM); ar->vrq->scsi.u.abort_handle = rq->vrq->scsi.handle; } else { /* * The request is pending but not active on * the firmware. Just free it now and we'll * report the successful abort below. */ list_del_init(&rq->req_list); esas2r_free_request(a, rq); } found = true; break; } } if (!found) return 1; /* Not found */ return 2; /* found */ } int esas2r_eh_abort(struct scsi_cmnd *cmd) { struct esas2r_adapter *a = (struct esas2r_adapter *)cmd->device->host->hostdata; struct esas2r_request *abort_request = NULL; unsigned long flags; struct list_head *queue; int result; esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd); if (test_bit(AF_DEGRADED_MODE, &a->flags)) { cmd->result = DID_ABORT << 16; scsi_set_resid(cmd, 0); scsi_done(cmd); return SUCCESS; } spin_lock_irqsave(&a->queue_lock, flags); /* * Run through the defer and active queues looking for the request * to abort. */ queue = &a->defer_list; check_active_queue: result = esas2r_check_active_queue(a, &abort_request, cmd, queue); if (!result) { spin_unlock_irqrestore(&a->queue_lock, flags); return FAILED; } else if (result == 2 && (queue == &a->defer_list)) { queue = &a->active_list; goto check_active_queue; } spin_unlock_irqrestore(&a->queue_lock, flags); if (abort_request) { u8 task_management_status = RS_PENDING; /* * the request is already active, so we need to tell * the firmware to abort it and wait for the response. */ abort_request->comp_cb = complete_task_management_request; abort_request->task_management_status_ptr = &task_management_status; esas2r_start_request(a, abort_request); if (atomic_read(&a->disable_cnt) == 0) esas2r_do_deferred_processes(a); while (task_management_status == RS_PENDING) msleep(10); /* * Once we get here, the original request will have been * completed by the firmware and the abort request will have * been cleaned up. we're done! */ return SUCCESS; } /* * If we get here, either we found the inactive request and * freed it, or we didn't find it at all. Either way, success! */ cmd->result = DID_ABORT << 16; scsi_set_resid(cmd, 0); scsi_done(cmd); return SUCCESS; } static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset) { struct esas2r_adapter *a = (struct esas2r_adapter *)cmd->device->host->hostdata; if (test_bit(AF_DEGRADED_MODE, &a->flags)) return FAILED; if (host_reset) esas2r_reset_adapter(a); else esas2r_reset_bus(a); /* above call sets the AF_OS_RESET flag. wait for it to clear. */ while (test_bit(AF_OS_RESET, &a->flags)) { msleep(10); if (test_bit(AF_DEGRADED_MODE, &a->flags)) return FAILED; } if (test_bit(AF_DEGRADED_MODE, &a->flags)) return FAILED; return SUCCESS; } int esas2r_host_reset(struct scsi_cmnd *cmd) { esas2r_log(ESAS2R_LOG_INFO, "host_reset (%p)", cmd); return esas2r_host_bus_reset(cmd, true); } int esas2r_bus_reset(struct scsi_cmnd *cmd) { esas2r_log(ESAS2R_LOG_INFO, "bus_reset (%p)", cmd); return esas2r_host_bus_reset(cmd, false); } static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset) { struct esas2r_adapter *a = (struct esas2r_adapter *)cmd->device->host->hostdata; struct esas2r_request *rq; u8 task_management_status = RS_PENDING; bool completed; if (test_bit(AF_DEGRADED_MODE, &a->flags)) return FAILED; retry: rq = esas2r_alloc_request(a); if (rq == NULL) { if (target_reset) { esas2r_log(ESAS2R_LOG_CRIT, "unable to allocate a request for a " "target reset (%d)!", cmd->device->id); } else { esas2r_log(ESAS2R_LOG_CRIT, "unable to allocate a request for a " "device reset (%d:%llu)!", cmd->device->id, cmd->device->lun); } return FAILED; } rq->target_id = cmd->device->id; rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun); rq->req_stat = RS_PENDING; rq->comp_cb = complete_task_management_request; rq->task_management_status_ptr = &task_management_status; if (target_reset) { esas2r_debug("issuing target reset (%p) to id %d", rq, cmd->device->id); completed = esas2r_send_task_mgmt(a, rq, 0x20); } else { esas2r_debug("issuing device reset (%p) to id %d lun %d", rq, cmd->device->id, cmd->device->lun); completed = esas2r_send_task_mgmt(a, rq, 0x10); } if (completed) { /* Task management cmd completed right away, need to free it. */ esas2r_free_request(a, rq); } else { /* * Wait for firmware to complete the request. Completion * callback will free it. */ while (task_management_status == RS_PENDING) msleep(10); } if (test_bit(AF_DEGRADED_MODE, &a->flags)) return FAILED; if (task_management_status == RS_BUSY) { /* * Busy, probably because we are flashing. Wait a bit and * try again. */ msleep(100); goto retry; } return SUCCESS; } int esas2r_device_reset(struct scsi_cmnd *cmd) { esas2r_log(ESAS2R_LOG_INFO, "device_reset (%p)", cmd); return esas2r_dev_targ_reset(cmd, false); } int esas2r_target_reset(struct scsi_cmnd *cmd) { esas2r_log(ESAS2R_LOG_INFO, "target_reset (%p)", cmd); return esas2r_dev_targ_reset(cmd, true); } void esas2r_log_request_failure(struct esas2r_adapter *a, struct esas2r_request *rq) { u8 reqstatus = rq->req_stat; if (reqstatus == RS_SUCCESS) return; if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { if (reqstatus == RS_SCSI_ERROR) { if (rq->func_rsp.scsi_rsp.sense_len >= 13) { esas2r_log(ESAS2R_LOG_WARN, "request failure - SCSI error %x ASC:%x ASCQ:%x CDB:%x", rq->sense_buf[2], rq->sense_buf[12], rq->sense_buf[13], rq->vrq->scsi.cdb[0]); } else { esas2r_log(ESAS2R_LOG_WARN, "request failure - SCSI error CDB:%x\n", rq->vrq->scsi.cdb[0]); } } else if ((rq->vrq->scsi.cdb[0] != INQUIRY && rq->vrq->scsi.cdb[0] != REPORT_LUNS) || (reqstatus != RS_SEL && reqstatus != RS_SEL2)) { if ((reqstatus == RS_UNDERRUN) && (rq->vrq->scsi.cdb[0] == INQUIRY)) { /* Don't log inquiry underruns */ } else { esas2r_log(ESAS2R_LOG_WARN, "request failure - cdb:%x reqstatus:%d target:%d", rq->vrq->scsi.cdb[0], reqstatus, rq->target_id); } } } } void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq) { u32 starttime; u32 timeout; starttime = jiffies_to_msecs(jiffies); timeout = rq->timeout ? rq->timeout : 5000; while (true) { esas2r_polled_interrupt(a); if (rq->req_stat != RS_STARTED) break; schedule_timeout_interruptible(msecs_to_jiffies(100)); if ((jiffies_to_msecs(jiffies) - starttime) > timeout) { esas2r_hdebug("request TMO"); esas2r_bugon(); rq->req_stat = RS_TIMEOUT; esas2r_local_reset_adapter(a); return; } } } u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo) { u32 offset = addr_lo & (MW_DATA_WINDOW_SIZE - 1); u32 base = addr_lo & -(signed int)MW_DATA_WINDOW_SIZE; if (a->window_base != base) { esas2r_write_register_dword(a, MVR_PCI_WIN1_REMAP, base | MVRPW1R_ENABLE); esas2r_flush_register_dword(a, MVR_PCI_WIN1_REMAP); a->window_base = base; } return offset; } /* Read a block of data from chip memory */ bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from, u32 size) { u8 *end = (u8 *)to; while (size) { u32 len; u32 offset; u32 iatvr; iatvr = (from & -(signed int)MW_DATA_WINDOW_SIZE); esas2r_map_data_window(a, iatvr); offset = from & (MW_DATA_WINDOW_SIZE - 1); len = size; if (len > MW_DATA_WINDOW_SIZE - offset) len = MW_DATA_WINDOW_SIZE - offset; from += len; size -= len; while (len--) { *end++ = esas2r_read_data_byte(a, offset); offset++; } } return true; } void esas2r_nuxi_mgt_data(u8 function, void *data) { struct atto_vda_grp_info *g; struct atto_vda_devinfo *d; struct atto_vdapart_info *p; struct atto_vda_dh_info *h; struct atto_vda_metrics_info *m; struct atto_vda_schedule_info *s; struct atto_vda_buzzer_info *b; u8 i; switch (function) { case VDAMGT_BUZZER_INFO: case VDAMGT_BUZZER_SET: b = (struct atto_vda_buzzer_info *)data; b->duration = le32_to_cpu(b->duration); break; case VDAMGT_SCHEDULE_INFO: case VDAMGT_SCHEDULE_EVENT: s = (struct atto_vda_schedule_info *)data; s->id = le32_to_cpu(s->id); break; case VDAMGT_DEV_INFO: case VDAMGT_DEV_CLEAN: case VDAMGT_DEV_PT_INFO: case VDAMGT_DEV_FEATURES: case VDAMGT_DEV_PT_FEATURES: case VDAMGT_DEV_OPERATION: d = (struct atto_vda_devinfo *)data; d->capacity = le64_to_cpu(d->capacity); d->block_size = le32_to_cpu(d->block_size); d->ses_dev_index = le16_to_cpu(d->ses_dev_index); d->target_id = le16_to_cpu(d->target_id); d->lun = le16_to_cpu(d->lun); d->features = le16_to_cpu(d->features); break; case VDAMGT_GRP_INFO: case VDAMGT_GRP_CREATE: case VDAMGT_GRP_DELETE: case VDAMGT_ADD_STORAGE: case VDAMGT_MEMBER_ADD: case VDAMGT_GRP_COMMIT: case VDAMGT_GRP_REBUILD: case VDAMGT_GRP_COMMIT_INIT: case VDAMGT_QUICK_RAID: case VDAMGT_GRP_FEATURES: case VDAMGT_GRP_COMMIT_INIT_AUTOMAP: case VDAMGT_QUICK_RAID_INIT_AUTOMAP: case VDAMGT_SPARE_LIST: case VDAMGT_SPARE_ADD: case VDAMGT_SPARE_REMOVE: case VDAMGT_LOCAL_SPARE_ADD: case VDAMGT_GRP_OPERATION: g = (struct atto_vda_grp_info *)data; g->capacity = le64_to_cpu(g->capacity); g->block_size = le32_to_cpu(g->block_size); g->interleave = le32_to_cpu(g->interleave); g->features = le16_to_cpu(g->features); for (i = 0; i < 32; i++) g->members[i] = le16_to_cpu(g->members[i]); break; case VDAMGT_PART_INFO: case VDAMGT_PART_MAP: case VDAMGT_PART_UNMAP: case VDAMGT_PART_AUTOMAP: case VDAMGT_PART_SPLIT: case VDAMGT_PART_MERGE: p = (struct atto_vdapart_info *)data; p->part_size = le64_to_cpu(p->part_size); p->start_lba = le32_to_cpu(p->start_lba); p->block_size = le32_to_cpu(p->block_size); p->target_id = le16_to_cpu(p->target_id); break; case VDAMGT_DEV_HEALTH_REQ: h = (struct atto_vda_dh_info *)data; h->med_defect_cnt = le32_to_cpu(h->med_defect_cnt); h->info_exc_cnt = le32_to_cpu(h->info_exc_cnt); break; case VDAMGT_DEV_METRICS: m = (struct atto_vda_metrics_info *)data; for (i = 0; i < 32; i++) m->dev_indexes[i] = le16_to_cpu(m->dev_indexes[i]); break; default: break; } } void esas2r_nuxi_cfg_data(u8 function, void *data) { struct atto_vda_cfg_init *ci; switch (function) { case VDA_CFG_INIT: case VDA_CFG_GET_INIT: case VDA_CFG_GET_INIT2: ci = (struct atto_vda_cfg_init *)data; ci->date_time.year = le16_to_cpu(ci->date_time.year); ci->sgl_page_size = le32_to_cpu(ci->sgl_page_size); ci->vda_version = le32_to_cpu(ci->vda_version); ci->epoch_time = le32_to_cpu(ci->epoch_time); ci->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel); ci->num_targets_backend = le32_to_cpu(ci->num_targets_backend); break; default: break; } } void esas2r_nuxi_ae_data(union atto_vda_ae *ae) { struct atto_vda_ae_raid *r = &ae->raid; struct atto_vda_ae_lu *l = &ae->lu; switch (ae->hdr.bytype) { case VDAAE_HDR_TYPE_RAID: r->dwflags = le32_to_cpu(r->dwflags); break; case VDAAE_HDR_TYPE_LU: l->dwevent = le32_to_cpu(l->dwevent); l->wphys_target_id = le16_to_cpu(l->wphys_target_id); l->id.tgtlun.wtarget_id = le16_to_cpu(l->id.tgtlun.wtarget_id); if (l->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id) + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) { l->id.tgtlun_raid.dwinterleave = le32_to_cpu(l->id.tgtlun_raid.dwinterleave); l->id.tgtlun_raid.dwblock_size = le32_to_cpu(l->id.tgtlun_raid.dwblock_size); } break; case VDAAE_HDR_TYPE_DISK: default: break; } } void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq) { unsigned long flags; esas2r_rq_destroy_request(rq, a); spin_lock_irqsave(&a->request_lock, flags); list_add(&rq->comp_list, &a->avail_request); spin_unlock_irqrestore(&a->request_lock, flags); } struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a) { struct esas2r_request *rq; unsigned long flags; spin_lock_irqsave(&a->request_lock, flags); if (unlikely(list_empty(&a->avail_request))) { spin_unlock_irqrestore(&a->request_lock, flags); return NULL; } rq = list_first_entry(&a->avail_request, struct esas2r_request, comp_list); list_del(&rq->comp_list); spin_unlock_irqrestore(&a->request_lock, flags); esas2r_rq_init_request(rq, a); return rq; } void esas2r_complete_request_cb(struct esas2r_adapter *a, struct esas2r_request *rq) { esas2r_debug("completing request %p\n", rq); scsi_dma_unmap(rq->cmd); if (unlikely(rq->req_stat != RS_SUCCESS)) { esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id, rq->req_stat, rq->func_rsp.scsi_rsp.scsi_stat, rq->cmd); rq->cmd->result = ((esas2r_req_status_to_error(rq->req_stat) << 16) | rq->func_rsp.scsi_rsp.scsi_stat); if (rq->req_stat == RS_UNDERRUN) scsi_set_resid(rq->cmd, le32_to_cpu(rq->func_rsp.scsi_rsp. residual_length)); else scsi_set_resid(rq->cmd, 0); } scsi_done(rq->cmd); esas2r_free_request(a, rq); } /* Run tasklet to handle stuff outside of interrupt context. */ void esas2r_adapter_tasklet(unsigned long context) { struct esas2r_adapter *a = (struct esas2r_adapter *)context; if (unlikely(test_bit(AF2_TIMER_TICK, &a->flags2))) { clear_bit(AF2_TIMER_TICK, &a->flags2); esas2r_timer_tick(a); } if (likely(test_bit(AF2_INT_PENDING, &a->flags2))) { clear_bit(AF2_INT_PENDING, &a->flags2); esas2r_adapter_interrupt(a); } if (esas2r_is_tasklet_pending(a)) esas2r_do_tasklet_tasks(a); if (esas2r_is_tasklet_pending(a) || (test_bit(AF2_INT_PENDING, &a->flags2)) || (test_bit(AF2_TIMER_TICK, &a->flags2))) { clear_bit(AF_TASKLET_SCHEDULED, &a->flags); esas2r_schedule_tasklet(a); } else { clear_bit(AF_TASKLET_SCHEDULED, &a->flags); } } static void esas2r_timer_callback(struct timer_list *t); void esas2r_kickoff_timer(struct esas2r_adapter *a) { timer_setup(&a->timer, esas2r_timer_callback, 0); a->timer.expires = jiffies + msecs_to_jiffies(100); add_timer(&a->timer); } static void esas2r_timer_callback(struct timer_list *t) { struct esas2r_adapter *a = from_timer(a, t, timer); set_bit(AF2_TIMER_TICK, &a->flags2); esas2r_schedule_tasklet(a); esas2r_kickoff_timer(a); } /* * Firmware events need to be handled outside of interrupt context * so we schedule a delayed_work to handle them. */ static void esas2r_free_fw_event(struct esas2r_fw_event_work *fw_event) { unsigned long flags; struct esas2r_adapter *a = fw_event->a; spin_lock_irqsave(&a->fw_event_lock, flags); list_del(&fw_event->list); kfree(fw_event); spin_unlock_irqrestore(&a->fw_event_lock, flags); } void esas2r_fw_event_off(struct esas2r_adapter *a) { unsigned long flags; spin_lock_irqsave(&a->fw_event_lock, flags); a->fw_events_off = 1; spin_unlock_irqrestore(&a->fw_event_lock, flags); } void esas2r_fw_event_on(struct esas2r_adapter *a) { unsigned long flags; spin_lock_irqsave(&a->fw_event_lock, flags); a->fw_events_off = 0; spin_unlock_irqrestore(&a->fw_event_lock, flags); } static void esas2r_add_device(struct esas2r_adapter *a, u16 target_id) { int ret; struct scsi_device *scsi_dev; scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0); if (scsi_dev) { esas2r_log_dev( ESAS2R_LOG_WARN, &(scsi_dev-> sdev_gendev), "scsi device already exists at id %d", target_id); scsi_device_put(scsi_dev); } else { esas2r_log_dev( ESAS2R_LOG_INFO, &(a->host-> shost_gendev), "scsi_add_device() called for 0:%d:0", target_id); ret = scsi_add_device(a->host, 0, target_id, 0); if (ret) { esas2r_log_dev( ESAS2R_LOG_CRIT, &(a->host-> shost_gendev), "scsi_add_device failed with %d for id %d", ret, target_id); } } } static void esas2r_remove_device(struct esas2r_adapter *a, u16 target_id) { struct scsi_device *scsi_dev; scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0); if (scsi_dev) { scsi_device_set_state(scsi_dev, SDEV_OFFLINE); esas2r_log_dev( ESAS2R_LOG_INFO, &(scsi_dev-> sdev_gendev), "scsi_remove_device() called for 0:%d:0", target_id); scsi_remove_device(scsi_dev); esas2r_log_dev( ESAS2R_LOG_INFO, &(scsi_dev-> sdev_gendev), "scsi_device_put() called"); scsi_device_put(scsi_dev); } else { esas2r_log_dev( ESAS2R_LOG_WARN, &(a->host->shost_gendev), "no target found at id %d", target_id); } } /* * Sends a firmware asynchronous event to anyone who happens to be * listening on the defined ATTO VDA event ports. */ static void esas2r_send_ae_event(struct esas2r_fw_event_work *fw_event) { struct esas2r_vda_ae *ae = (struct esas2r_vda_ae *)fw_event->data; char *type; switch (ae->vda_ae.hdr.bytype) { case VDAAE_HDR_TYPE_RAID: type = "RAID group state change"; break; case VDAAE_HDR_TYPE_LU: type = "Mapped destination LU change"; break; case VDAAE_HDR_TYPE_DISK: type = "Physical disk inventory change"; break; case VDAAE_HDR_TYPE_RESET: type = "Firmware reset"; break; case VDAAE_HDR_TYPE_LOG_INFO: type = "Event Log message (INFO level)"; break; case VDAAE_HDR_TYPE_LOG_WARN: type = "Event Log message (WARN level)"; break; case VDAAE_HDR_TYPE_LOG_CRIT: type = "Event Log message (CRIT level)"; break; case VDAAE_HDR_TYPE_LOG_FAIL: type = "Event Log message (FAIL level)"; break; case VDAAE_HDR_TYPE_NVC: type = "NVCache change"; break; case VDAAE_HDR_TYPE_TLG_INFO: type = "Time stamped log message (INFO level)"; break; case VDAAE_HDR_TYPE_TLG_WARN: type = "Time stamped log message (WARN level)"; break; case VDAAE_HDR_TYPE_TLG_CRIT: type = "Time stamped log message (CRIT level)"; break; case VDAAE_HDR_TYPE_PWRMGT: type = "Power management"; break; case VDAAE_HDR_TYPE_MUTE: type = "Mute button pressed"; break; case VDAAE_HDR_TYPE_DEV: type = "Device attribute change"; break; default: type = "Unknown"; break; } esas2r_log(ESAS2R_LOG_WARN, "An async event of type \"%s\" was received from the firmware. The event contents are:", type); esas2r_log_hexdump(ESAS2R_LOG_WARN, &ae->vda_ae, ae->vda_ae.hdr.bylength); } static void esas2r_firmware_event_work(struct work_struct *work) { struct esas2r_fw_event_work *fw_event = container_of(work, struct esas2r_fw_event_work, work.work); struct esas2r_adapter *a = fw_event->a; u16 target_id = *(u16 *)&fw_event->data[0]; if (a->fw_events_off) goto done; switch (fw_event->type) { case fw_event_null: break; /* do nothing */ case fw_event_lun_change: esas2r_remove_device(a, target_id); esas2r_add_device(a, target_id); break; case fw_event_present: esas2r_add_device(a, target_id); break; case fw_event_not_present: esas2r_remove_device(a, target_id); break; case fw_event_vda_ae: esas2r_send_ae_event(fw_event); break; } done: esas2r_free_fw_event(fw_event); } void esas2r_queue_fw_event(struct esas2r_adapter *a, enum fw_event_type type, void *data, int data_sz) { struct esas2r_fw_event_work *fw_event; unsigned long flags; fw_event = kzalloc(sizeof(struct esas2r_fw_event_work), GFP_ATOMIC); if (!fw_event) { esas2r_log(ESAS2R_LOG_WARN, "esas2r_queue_fw_event failed to alloc"); return; } if (type == fw_event_vda_ae) { struct esas2r_vda_ae *ae = (struct esas2r_vda_ae *)fw_event->data; ae->signature = ESAS2R_VDA_EVENT_SIG; ae->bus_number = a->pcid->bus->number; ae->devfn = a->pcid->devfn; memcpy(&ae->vda_ae, data, sizeof(ae->vda_ae)); } else { memcpy(fw_event->data, data, data_sz); } fw_event->type = type; fw_event->a = a; spin_lock_irqsave(&a->fw_event_lock, flags); list_add_tail(&fw_event->list, &a->fw_event_list); INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work); queue_delayed_work_on( smp_processor_id(), a->fw_event_q, &fw_event->work, msecs_to_jiffies(1)); spin_unlock_irqrestore(&a->fw_event_lock, flags); } void esas2r_target_state_changed(struct esas2r_adapter *a, u16 targ_id, u8 state) { if (state == TS_LUN_CHANGE) esas2r_queue_fw_event(a, fw_event_lun_change, &targ_id, sizeof(targ_id)); else if (state == TS_PRESENT) esas2r_queue_fw_event(a, fw_event_present, &targ_id, sizeof(targ_id)); else if (state == TS_NOT_PRESENT) esas2r_queue_fw_event(a, fw_event_not_present, &targ_id, sizeof(targ_id)); } /* Translate status to a Linux SCSI mid-layer error code */ int esas2r_req_status_to_error(u8 req_stat) { switch (req_stat) { case RS_OVERRUN: case RS_UNDERRUN: case RS_SUCCESS: /* * NOTE: SCSI mid-layer wants a good status for a SCSI error, because * it will check the scsi_stat value in the completion anyway. */ case RS_SCSI_ERROR: return DID_OK; case RS_SEL: case RS_SEL2: return DID_NO_CONNECT; case RS_RESET: return DID_RESET; case RS_ABORTED: return DID_ABORT; case RS_BUSY: return DID_BUS_BUSY; } /* everything else is just an error. */ return DID_ERROR; } module_init(esas2r_init); module_exit(esas2r_exit);
linux-master
drivers/scsi/esas2r/esas2r_main.c
/* * linux/drivers/scsi/esas2r/esas2r_ioctl.c * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers * * Copyright (c) 2001-2013 ATTO Technology, Inc. * (mailto:[email protected]) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include "esas2r.h" /* * Buffered ioctl handlers. A buffered ioctl is one which requires that we * allocate a DMA-able memory area to communicate with the firmware. In * order to prevent continually allocating and freeing consistent memory, * we will allocate a global buffer the first time we need it and re-use * it for subsequent ioctl calls that require it. */ u8 *esas2r_buffered_ioctl; dma_addr_t esas2r_buffered_ioctl_addr; u32 esas2r_buffered_ioctl_size; struct pci_dev *esas2r_buffered_ioctl_pcid; static DEFINE_SEMAPHORE(buffered_ioctl_semaphore, 1); typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *, struct esas2r_request *, struct esas2r_sg_context *, void *); typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *, struct esas2r_request *, void *); struct esas2r_buffered_ioctl { struct esas2r_adapter *a; void *ioctl; u32 length; u32 control_code; u32 offset; BUFFERED_IOCTL_CALLBACK callback; void *context; BUFFERED_IOCTL_DONE_CALLBACK done_callback; void *done_context; }; static void complete_fm_api_req(struct esas2r_adapter *a, struct esas2r_request *rq) { a->fm_api_command_done = 1; wake_up_interruptible(&a->fm_api_waiter); } /* Callbacks for building scatter/gather lists for FM API requests */ static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr) { struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; int offset = sgc->cur_offset - a->save_offset; (*addr) = a->firmware.phys + offset; return a->firmware.orig_len - offset; } static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr) { struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; int offset = sgc->cur_offset - a->save_offset; (*addr) = a->firmware.header_buff_phys + offset; return sizeof(struct esas2r_flash_img) - offset; } /* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */ static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi) { struct esas2r_request *rq; if (mutex_lock_interruptible(&a->fm_api_mutex)) { fi->status = FI_STAT_BUSY; return; } rq = esas2r_alloc_request(a); if (rq == NULL) { fi->status = FI_STAT_BUSY; goto free_sem; } if (fi == &a->firmware.header) { a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev, (size_t)sizeof( struct esas2r_flash_img), (dma_addr_t *)&a-> firmware. header_buff_phys, GFP_KERNEL); if (a->firmware.header_buff == NULL) { esas2r_debug("failed to allocate header buffer!"); fi->status = FI_STAT_BUSY; goto free_req; } memcpy(a->firmware.header_buff, fi, sizeof(struct esas2r_flash_img)); a->save_offset = a->firmware.header_buff; a->fm_api_sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fm_api_header; } else { a->save_offset = (u8 *)fi; a->fm_api_sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fm_api; } rq->comp_cb = complete_fm_api_req; a->fm_api_command_done = 0; a->fm_api_sgc.cur_offset = a->save_offset; if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq, &a->fm_api_sgc)) goto all_done; /* Now wait around for it to complete. */ while (!a->fm_api_command_done) wait_event_interruptible(a->fm_api_waiter, a->fm_api_command_done); all_done: if (fi == &a->firmware.header) { memcpy(fi, a->firmware.header_buff, sizeof(struct esas2r_flash_img)); dma_free_coherent(&a->pcid->dev, (size_t)sizeof(struct esas2r_flash_img), a->firmware.header_buff, (dma_addr_t)a->firmware.header_buff_phys); } free_req: esas2r_free_request(a, (struct esas2r_request *)rq); free_sem: mutex_unlock(&a->fm_api_mutex); return; } static void complete_nvr_req(struct esas2r_adapter *a, struct esas2r_request *rq) { a->nvram_command_done = 1; wake_up_interruptible(&a->nvram_waiter); } /* Callback for building scatter/gather lists for buffered ioctls */ static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc, u64 *addr) { int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl; (*addr) = esas2r_buffered_ioctl_addr + offset; return esas2r_buffered_ioctl_size - offset; } static void complete_buffered_ioctl_req(struct esas2r_adapter *a, struct esas2r_request *rq) { a->buffered_ioctl_done = 1; wake_up_interruptible(&a->buffered_ioctl_waiter); } static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi) { struct esas2r_adapter *a = bi->a; struct esas2r_request *rq; struct esas2r_sg_context sgc; u8 result = IOCTL_SUCCESS; if (down_interruptible(&buffered_ioctl_semaphore)) return IOCTL_OUT_OF_RESOURCES; /* allocate a buffer or use the existing buffer. */ if (esas2r_buffered_ioctl) { if (esas2r_buffered_ioctl_size < bi->length) { /* free the too-small buffer and get a new one */ dma_free_coherent(&a->pcid->dev, (size_t)esas2r_buffered_ioctl_size, esas2r_buffered_ioctl, esas2r_buffered_ioctl_addr); goto allocate_buffer; } } else { allocate_buffer: esas2r_buffered_ioctl_size = bi->length; esas2r_buffered_ioctl_pcid = a->pcid; esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev, (size_t) esas2r_buffered_ioctl_size, & esas2r_buffered_ioctl_addr, GFP_KERNEL); } if (!esas2r_buffered_ioctl) { esas2r_log(ESAS2R_LOG_CRIT, "could not allocate %d bytes of consistent memory " "for a buffered ioctl!", bi->length); esas2r_debug("buffered ioctl alloc failure"); result = IOCTL_OUT_OF_RESOURCES; goto exit_cleanly; } memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length); rq = esas2r_alloc_request(a); if (rq == NULL) { esas2r_log(ESAS2R_LOG_CRIT, "could not allocate an internal request"); result = IOCTL_OUT_OF_RESOURCES; esas2r_debug("buffered ioctl - no requests"); goto exit_cleanly; } a->buffered_ioctl_done = 0; rq->comp_cb = complete_buffered_ioctl_req; sgc.cur_offset = esas2r_buffered_ioctl + bi->offset; sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl; sgc.length = esas2r_buffered_ioctl_size; if (!(*bi->callback)(a, rq, &sgc, bi->context)) { /* completed immediately, no need to wait */ a->buffered_ioctl_done = 0; goto free_andexit_cleanly; } /* now wait around for it to complete. */ while (!a->buffered_ioctl_done) wait_event_interruptible(a->buffered_ioctl_waiter, a->buffered_ioctl_done); free_andexit_cleanly: if (result == IOCTL_SUCCESS && bi->done_callback) (*bi->done_callback)(a, rq, bi->done_context); esas2r_free_request(a, rq); exit_cleanly: if (result == IOCTL_SUCCESS) memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length); up(&buffered_ioctl_semaphore); return result; } /* SMP ioctl support */ static int smp_ioctl_callback(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sg_context *sgc, void *context) { struct atto_ioctl_smp *si = (struct atto_ioctl_smp *)esas2r_buffered_ioctl; esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP); if (!esas2r_build_sg_list(a, rq, sgc)) { si->status = ATTO_STS_OUT_OF_RSRC; return false; } esas2r_start_request(a, rq); return true; } static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si) { struct esas2r_buffered_ioctl bi; memset(&bi, 0, sizeof(bi)); bi.a = a; bi.ioctl = si; bi.length = sizeof(struct atto_ioctl_smp) + le32_to_cpu(si->req_length) + le32_to_cpu(si->rsp_length); bi.offset = 0; bi.callback = smp_ioctl_callback; return handle_buffered_ioctl(&bi); } /* CSMI ioctl support */ static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a, struct esas2r_request *rq) { rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id); rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun); /* Now call the original completion callback. */ (*rq->aux_req_cb)(a, rq); } /* Tunnel a CSMI IOCTL to the back end driver for processing. */ static bool csmi_ioctl_tunnel(struct esas2r_adapter *a, union atto_ioctl_csmi *ci, struct esas2r_request *rq, struct esas2r_sg_context *sgc, u32 ctrl_code, u16 target_id) { struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl; if (test_bit(AF_DEGRADED_MODE, &a->flags)) return false; esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI); ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code); ioctl->csmi.target_id = cpu_to_le16(target_id); ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags); /* * Always usurp the completion callback since the interrupt callback * mechanism may be used. */ rq->aux_req_cx = ci; rq->aux_req_cb = rq->comp_cb; rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb; if (!esas2r_build_sg_list(a, rq, sgc)) return false; esas2r_start_request(a, rq); return true; } static bool check_lun(struct scsi_lun lun) { bool result; result = ((lun.scsi_lun[7] == 0) && (lun.scsi_lun[6] == 0) && (lun.scsi_lun[5] == 0) && (lun.scsi_lun[4] == 0) && (lun.scsi_lun[3] == 0) && (lun.scsi_lun[2] == 0) && /* Byte 1 is intentionally skipped */ (lun.scsi_lun[0] == 0)); return result; } static int csmi_ioctl_callback(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sg_context *sgc, void *context) { struct atto_csmi *ci = (struct atto_csmi *)context; union atto_ioctl_csmi *ioctl_csmi = (union atto_ioctl_csmi *)esas2r_buffered_ioctl; u8 path = 0; u8 tid = 0; u8 lun = 0; u32 sts = CSMI_STS_SUCCESS; struct esas2r_target *t; unsigned long flags; if (ci->control_code == CSMI_CC_GET_DEV_ADDR) { struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr; path = gda->path_id; tid = gda->target_id; lun = gda->lun; } else if (ci->control_code == CSMI_CC_TASK_MGT) { struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt; path = tm->path_id; tid = tm->target_id; lun = tm->lun; } if (path > 0) { rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( CSMI_STS_INV_PARAM); return false; } rq->target_id = tid; rq->vrq->scsi.flags |= cpu_to_le32(lun); switch (ci->control_code) { case CSMI_CC_GET_DRVR_INFO: { struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info; strcpy(gdi->description, esas2r_get_model_name(a)); gdi->csmi_major_rev = CSMI_MAJOR_REV; gdi->csmi_minor_rev = CSMI_MINOR_REV; break; } case CSMI_CC_GET_CNTLR_CFG: { struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg; gcc->base_io_addr = 0; pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2, &gcc->base_memaddr_lo); pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3, &gcc->base_memaddr_hi); gcc->board_id = MAKEDWORD(a->pcid->subsystem_device, a->pcid->subsystem_vendor); gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN; gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA; gcc->io_bus_type = CSMI_BUS_TYPE_PCI; gcc->pci_addr.bus_num = a->pcid->bus->number; gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn); gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn); memset(gcc->serial_num, 0, sizeof(gcc->serial_num)); gcc->major_rev = LOBYTE(LOWORD(a->fw_version)); gcc->minor_rev = HIBYTE(LOWORD(a->fw_version)); gcc->build_rev = LOBYTE(HIWORD(a->fw_version)); gcc->release_rev = HIBYTE(HIWORD(a->fw_version)); gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver)); gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver)); gcc->bios_build_rev = LOWORD(a->flash_ver); if (test_bit(AF2_THUNDERLINK, &a->flags2)) gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA | CSMI_CNTLRF_SATA_HBA; else gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID | CSMI_CNTLRF_SATA_RAID; gcc->rrom_major_rev = 0; gcc->rrom_minor_rev = 0; gcc->rrom_build_rev = 0; gcc->rrom_release_rev = 0; gcc->rrom_biosmajor_rev = 0; gcc->rrom_biosminor_rev = 0; gcc->rrom_biosbuild_rev = 0; gcc->rrom_biosrelease_rev = 0; break; } case CSMI_CC_GET_CNTLR_STS: { struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts; if (test_bit(AF_DEGRADED_MODE, &a->flags)) gcs->status = CSMI_CNTLR_STS_FAILED; else gcs->status = CSMI_CNTLR_STS_GOOD; gcs->offline_reason = CSMI_OFFLINE_NO_REASON; break; } case CSMI_CC_FW_DOWNLOAD: case CSMI_CC_GET_RAID_INFO: case CSMI_CC_GET_RAID_CFG: sts = CSMI_STS_BAD_CTRL_CODE; break; case CSMI_CC_SMP_PASSTHRU: case CSMI_CC_SSP_PASSTHRU: case CSMI_CC_STP_PASSTHRU: case CSMI_CC_GET_PHY_INFO: case CSMI_CC_SET_PHY_INFO: case CSMI_CC_GET_LINK_ERRORS: case CSMI_CC_GET_SATA_SIG: case CSMI_CC_GET_CONN_INFO: case CSMI_CC_PHY_CTRL: if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, ci->control_code, ESAS2R_TARG_ID_INV)) { sts = CSMI_STS_FAILED; break; } return true; case CSMI_CC_GET_SCSI_ADDR: { struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; struct scsi_lun lun; memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun)); if (!check_lun(lun)) { sts = CSMI_STS_NO_SCSI_ADDR; break; } /* make sure the device is present */ spin_lock_irqsave(&a->mem_lock, flags); t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr); spin_unlock_irqrestore(&a->mem_lock, flags); if (t == NULL) { sts = CSMI_STS_NO_SCSI_ADDR; break; } gsa->host_index = 0xFF; gsa->lun = gsa->sas_lun[1]; rq->target_id = esas2r_targ_get_id(t, a); break; } case CSMI_CC_GET_DEV_ADDR: { struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr; /* make sure the target is present */ t = a->targetdb + rq->target_id; if (t >= a->targetdb_end || t->target_state != TS_PRESENT || t->sas_addr == 0) { sts = CSMI_STS_NO_DEV_ADDR; break; } /* fill in the result */ *(u64 *)gda->sas_addr = t->sas_addr; memset(gda->sas_lun, 0, sizeof(gda->sas_lun)); gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags); break; } case CSMI_CC_TASK_MGT: /* make sure the target is present */ t = a->targetdb + rq->target_id; if (t >= a->targetdb_end || t->target_state != TS_PRESENT || !(t->flags & TF_PASS_THRU)) { sts = CSMI_STS_NO_DEV_ADDR; break; } if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, ci->control_code, t->phys_targ_id)) { sts = CSMI_STS_FAILED; break; } return true; default: sts = CSMI_STS_BAD_CTRL_CODE; break; } rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts); return false; } static void csmi_ioctl_done_callback(struct esas2r_adapter *a, struct esas2r_request *rq, void *context) { struct atto_csmi *ci = (struct atto_csmi *)context; union atto_ioctl_csmi *ioctl_csmi = (union atto_ioctl_csmi *)esas2r_buffered_ioctl; switch (ci->control_code) { case CSMI_CC_GET_DRVR_INFO: { struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info; strcpy(gdi->name, ESAS2R_VERSION_STR); gdi->major_rev = ESAS2R_MAJOR_REV; gdi->minor_rev = ESAS2R_MINOR_REV; gdi->build_rev = 0; gdi->release_rev = 0; break; } case CSMI_CC_GET_SCSI_ADDR: { struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) == CSMI_STS_SUCCESS) { gsa->target_id = rq->target_id; gsa->path_id = 0; } break; } } ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status); } static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci) { struct esas2r_buffered_ioctl bi; memset(&bi, 0, sizeof(bi)); bi.a = a; bi.ioctl = &ci->data; bi.length = sizeof(union atto_ioctl_csmi); bi.offset = 0; bi.callback = csmi_ioctl_callback; bi.context = ci; bi.done_callback = csmi_ioctl_done_callback; bi.done_context = ci; return handle_buffered_ioctl(&bi); } /* ATTO HBA ioctl support */ /* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */ static bool hba_ioctl_tunnel(struct esas2r_adapter *a, struct atto_ioctl *hi, struct esas2r_request *rq, struct esas2r_sg_context *sgc) { esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA); if (!esas2r_build_sg_list(a, rq, sgc)) { hi->status = ATTO_STS_OUT_OF_RSRC; return false; } esas2r_start_request(a, rq); return true; } static void scsi_passthru_comp_cb(struct esas2r_adapter *a, struct esas2r_request *rq) { struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx; struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; u8 sts = ATTO_SPT_RS_FAILED; spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat; spt->sense_length = rq->sense_len; spt->residual_length = le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length); switch (rq->req_stat) { case RS_SUCCESS: case RS_SCSI_ERROR: sts = ATTO_SPT_RS_SUCCESS; break; case RS_UNDERRUN: sts = ATTO_SPT_RS_UNDERRUN; break; case RS_OVERRUN: sts = ATTO_SPT_RS_OVERRUN; break; case RS_SEL: case RS_SEL2: sts = ATTO_SPT_RS_NO_DEVICE; break; case RS_NO_LUN: sts = ATTO_SPT_RS_NO_LUN; break; case RS_TIMEOUT: sts = ATTO_SPT_RS_TIMEOUT; break; case RS_DEGRADED: sts = ATTO_SPT_RS_DEGRADED; break; case RS_BUSY: sts = ATTO_SPT_RS_BUSY; break; case RS_ABORTED: sts = ATTO_SPT_RS_ABORTED; break; case RS_RESET: sts = ATTO_SPT_RS_BUS_RESET; break; } spt->req_status = sts; /* Update the target ID to the next one present. */ spt->target_id = esas2r_targ_db_find_next_present(a, (u16)spt->target_id); /* Done, call the completion callback. */ (*rq->aux_req_cb)(a, rq); } static int hba_ioctl_callback(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sg_context *sgc, void *context) { struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl; hi->status = ATTO_STS_SUCCESS; switch (hi->function) { case ATTO_FUNC_GET_ADAP_INFO: { u8 *class_code = (u8 *)&a->pcid->class; struct atto_hba_get_adapter_info *gai = &hi->data.get_adap_info; if (hi->flags & HBAF_TUNNEL) { hi->status = ATTO_STS_UNSUPPORTED; break; } if (hi->version > ATTO_VER_GET_ADAP_INFO0) { hi->status = ATTO_STS_INV_VERSION; hi->version = ATTO_VER_GET_ADAP_INFO0; break; } memset(gai, 0, sizeof(*gai)); gai->pci.vendor_id = a->pcid->vendor; gai->pci.device_id = a->pcid->device; gai->pci.ss_vendor_id = a->pcid->subsystem_vendor; gai->pci.ss_device_id = a->pcid->subsystem_device; gai->pci.class_code[0] = class_code[0]; gai->pci.class_code[1] = class_code[1]; gai->pci.class_code[2] = class_code[2]; gai->pci.rev_id = a->pcid->revision; gai->pci.bus_num = a->pcid->bus->number; gai->pci.dev_num = PCI_SLOT(a->pcid->devfn); gai->pci.func_num = PCI_FUNC(a->pcid->devfn); if (pci_is_pcie(a->pcid)) { u16 stat; u32 caps; pcie_capability_read_word(a->pcid, PCI_EXP_LNKSTA, &stat); pcie_capability_read_dword(a->pcid, PCI_EXP_LNKCAP, &caps); gai->pci.link_speed_curr = (u8)(stat & PCI_EXP_LNKSTA_CLS); gai->pci.link_speed_max = (u8)(caps & PCI_EXP_LNKCAP_SLS); gai->pci.link_width_curr = (u8)((stat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); gai->pci.link_width_max = (u8)((caps & PCI_EXP_LNKCAP_MLW) >> 4); } gai->pci.msi_vector_cnt = 1; if (a->pcid->msix_enabled) gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX; else if (a->pcid->msi_enabled) gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI; else gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY; gai->adap_type = ATTO_GAI_AT_ESASRAID2; if (test_bit(AF2_THUNDERLINK, &a->flags2)) gai->adap_type = ATTO_GAI_AT_TLSASHBA; if (test_bit(AF_DEGRADED_MODE, &a->flags)) gai->adap_flags |= ATTO_GAI_AF_DEGRADED; gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP | ATTO_GAI_AF_DEVADDR_SUPP; if (a->pcid->subsystem_device == ATTO_ESAS_R60F || a->pcid->subsystem_device == ATTO_ESAS_R608 || a->pcid->subsystem_device == ATTO_ESAS_R644 || a->pcid->subsystem_device == ATTO_TSSC_3808E) gai->adap_flags |= ATTO_GAI_AF_VIRT_SES; gai->num_ports = ESAS2R_NUM_PHYS; gai->num_phys = ESAS2R_NUM_PHYS; strcpy(gai->firmware_rev, a->fw_rev); strcpy(gai->flash_rev, a->flash_rev); strcpy(gai->model_name_short, esas2r_get_model_name_short(a)); strcpy(gai->model_name, esas2r_get_model_name(a)); gai->num_targets = ESAS2R_MAX_TARGETS; gai->num_busses = 1; gai->num_targsper_bus = gai->num_targets; gai->num_lunsper_targ = 256; if (a->pcid->subsystem_device == ATTO_ESAS_R6F0 || a->pcid->subsystem_device == ATTO_ESAS_R60F) gai->num_connectors = 4; else gai->num_connectors = 2; gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP; gai->num_targets_backend = a->num_targets_backend; gai->tunnel_flags = a->ioctl_tunnel & (ATTO_GAI_TF_MEM_RW | ATTO_GAI_TF_TRACE | ATTO_GAI_TF_SCSI_PASS_THRU | ATTO_GAI_TF_GET_DEV_ADDR | ATTO_GAI_TF_PHY_CTRL | ATTO_GAI_TF_CONN_CTRL | ATTO_GAI_TF_GET_DEV_INFO); break; } case ATTO_FUNC_GET_ADAP_ADDR: { struct atto_hba_get_adapter_address *gaa = &hi->data.get_adap_addr; if (hi->flags & HBAF_TUNNEL) { hi->status = ATTO_STS_UNSUPPORTED; break; } if (hi->version > ATTO_VER_GET_ADAP_ADDR0) { hi->status = ATTO_STS_INV_VERSION; hi->version = ATTO_VER_GET_ADAP_ADDR0; } else if (gaa->addr_type == ATTO_GAA_AT_PORT || gaa->addr_type == ATTO_GAA_AT_NODE) { if (gaa->addr_type == ATTO_GAA_AT_PORT && gaa->port_id >= ESAS2R_NUM_PHYS) { hi->status = ATTO_STS_NOT_APPL; } else { memcpy((u64 *)gaa->address, &a->nvram->sas_addr[0], sizeof(u64)); gaa->addr_len = sizeof(u64); } } else { hi->status = ATTO_STS_INV_PARAM; } break; } case ATTO_FUNC_MEM_RW: { if (hi->flags & HBAF_TUNNEL) { if (hba_ioctl_tunnel(a, hi, rq, sgc)) return true; break; } hi->status = ATTO_STS_UNSUPPORTED; break; } case ATTO_FUNC_TRACE: { struct atto_hba_trace *trc = &hi->data.trace; if (hi->flags & HBAF_TUNNEL) { if (hba_ioctl_tunnel(a, hi, rq, sgc)) return true; break; } if (hi->version > ATTO_VER_TRACE1) { hi->status = ATTO_STS_INV_VERSION; hi->version = ATTO_VER_TRACE1; break; } if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP && hi->version >= ATTO_VER_TRACE1) { if (trc->trace_func == ATTO_TRC_TF_UPLOAD) { u32 len = hi->data_length; u32 offset = trc->current_offset; u32 total_len = ESAS2R_FWCOREDUMP_SZ; /* Size is zero if a core dump isn't present */ if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) total_len = 0; if (len > total_len) len = total_len; if (offset >= total_len || offset + len > total_len || len == 0) { hi->status = ATTO_STS_INV_PARAM; break; } memcpy(trc->contents, a->fw_coredump_buff + offset, len); hi->data_length = len; } else if (trc->trace_func == ATTO_TRC_TF_RESET) { memset(a->fw_coredump_buff, 0, ESAS2R_FWCOREDUMP_SZ); clear_bit(AF2_COREDUMP_SAVED, &a->flags2); } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) { hi->status = ATTO_STS_UNSUPPORTED; break; } /* Always return all the info we can. */ trc->trace_mask = 0; trc->current_offset = 0; trc->total_length = ESAS2R_FWCOREDUMP_SZ; /* Return zero length buffer if core dump not present */ if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) trc->total_length = 0; } else { hi->status = ATTO_STS_UNSUPPORTED; } break; } case ATTO_FUNC_SCSI_PASS_THRU: { struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; struct scsi_lun lun; memcpy(&lun, spt->lun, sizeof(struct scsi_lun)); if (hi->flags & HBAF_TUNNEL) { if (hba_ioctl_tunnel(a, hi, rq, sgc)) return true; break; } if (hi->version > ATTO_VER_SCSI_PASS_THRU0) { hi->status = ATTO_STS_INV_VERSION; hi->version = ATTO_VER_SCSI_PASS_THRU0; break; } if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) { hi->status = ATTO_STS_INV_PARAM; break; } esas2r_sgc_init(sgc, a, rq, NULL); sgc->length = hi->data_length; sgc->cur_offset += offsetof(struct atto_ioctl, data.byte) + sizeof(struct atto_hba_scsi_pass_thru); /* Finish request initialization */ rq->target_id = (u16)spt->target_id; rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]); memcpy(rq->vrq->scsi.cdb, spt->cdb, 16); rq->vrq->scsi.length = cpu_to_le32(hi->data_length); rq->sense_len = spt->sense_length; rq->sense_buf = (u8 *)spt->sense_data; /* NOTE: we ignore spt->timeout */ /* * always usurp the completion callback since the interrupt * callback mechanism may be used. */ rq->aux_req_cx = hi; rq->aux_req_cb = rq->comp_cb; rq->comp_cb = scsi_passthru_comp_cb; if (spt->flags & ATTO_SPTF_DATA_IN) { rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD); } else if (spt->flags & ATTO_SPTF_DATA_OUT) { rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); } else { if (sgc->length) { hi->status = ATTO_STS_INV_PARAM; break; } } if (spt->flags & ATTO_SPTF_ORDERED_Q) rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_ORDRD_Q); else if (spt->flags & ATTO_SPTF_HEAD_OF_Q) rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q); if (!esas2r_build_sg_list(a, rq, sgc)) { hi->status = ATTO_STS_OUT_OF_RSRC; break; } esas2r_start_request(a, rq); return true; } case ATTO_FUNC_GET_DEV_ADDR: { struct atto_hba_get_device_address *gda = &hi->data.get_dev_addr; struct esas2r_target *t; if (hi->flags & HBAF_TUNNEL) { if (hba_ioctl_tunnel(a, hi, rq, sgc)) return true; break; } if (hi->version > ATTO_VER_GET_DEV_ADDR0) { hi->status = ATTO_STS_INV_VERSION; hi->version = ATTO_VER_GET_DEV_ADDR0; break; } if (gda->target_id >= ESAS2R_MAX_TARGETS) { hi->status = ATTO_STS_INV_PARAM; break; } t = a->targetdb + (u16)gda->target_id; if (t->target_state != TS_PRESENT) { hi->status = ATTO_STS_FAILED; } else if (gda->addr_type == ATTO_GDA_AT_PORT) { if (t->sas_addr == 0) { hi->status = ATTO_STS_UNSUPPORTED; } else { *(u64 *)gda->address = t->sas_addr; gda->addr_len = sizeof(u64); } } else if (gda->addr_type == ATTO_GDA_AT_NODE) { hi->status = ATTO_STS_NOT_APPL; } else { hi->status = ATTO_STS_INV_PARAM; } /* update the target ID to the next one present. */ gda->target_id = esas2r_targ_db_find_next_present(a, (u16)gda->target_id); break; } case ATTO_FUNC_PHY_CTRL: case ATTO_FUNC_CONN_CTRL: { if (hba_ioctl_tunnel(a, hi, rq, sgc)) return true; break; } case ATTO_FUNC_ADAP_CTRL: { struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl; if (hi->flags & HBAF_TUNNEL) { hi->status = ATTO_STS_UNSUPPORTED; break; } if (hi->version > ATTO_VER_ADAP_CTRL0) { hi->status = ATTO_STS_INV_VERSION; hi->version = ATTO_VER_ADAP_CTRL0; break; } if (ac->adap_func == ATTO_AC_AF_HARD_RST) { esas2r_reset_adapter(a); } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) { hi->status = ATTO_STS_UNSUPPORTED; break; } if (test_bit(AF_CHPRST_NEEDED, &a->flags)) ac->adap_state = ATTO_AC_AS_RST_SCHED; else if (test_bit(AF_CHPRST_PENDING, &a->flags)) ac->adap_state = ATTO_AC_AS_RST_IN_PROG; else if (test_bit(AF_DISC_PENDING, &a->flags)) ac->adap_state = ATTO_AC_AS_RST_DISC; else if (test_bit(AF_DISABLED, &a->flags)) ac->adap_state = ATTO_AC_AS_DISABLED; else if (test_bit(AF_DEGRADED_MODE, &a->flags)) ac->adap_state = ATTO_AC_AS_DEGRADED; else ac->adap_state = ATTO_AC_AS_OK; break; } case ATTO_FUNC_GET_DEV_INFO: { struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info; struct esas2r_target *t; if (hi->flags & HBAF_TUNNEL) { if (hba_ioctl_tunnel(a, hi, rq, sgc)) return true; break; } if (hi->version > ATTO_VER_GET_DEV_INFO0) { hi->status = ATTO_STS_INV_VERSION; hi->version = ATTO_VER_GET_DEV_INFO0; break; } if (gdi->target_id >= ESAS2R_MAX_TARGETS) { hi->status = ATTO_STS_INV_PARAM; break; } t = a->targetdb + (u16)gdi->target_id; /* update the target ID to the next one present. */ gdi->target_id = esas2r_targ_db_find_next_present(a, (u16)gdi->target_id); if (t->target_state != TS_PRESENT) { hi->status = ATTO_STS_FAILED; break; } hi->status = ATTO_STS_UNSUPPORTED; break; } default: hi->status = ATTO_STS_INV_FUNC; break; } return false; } static void hba_ioctl_done_callback(struct esas2r_adapter *a, struct esas2r_request *rq, void *context) { struct atto_ioctl *ioctl_hba = (struct atto_ioctl *)esas2r_buffered_ioctl; esas2r_debug("hba_ioctl_done_callback %d", a->index); if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) { struct atto_hba_get_adapter_info *gai = &ioctl_hba->data.get_adap_info; esas2r_debug("ATTO_FUNC_GET_ADAP_INFO"); gai->drvr_rev_major = ESAS2R_MAJOR_REV; gai->drvr_rev_minor = ESAS2R_MINOR_REV; strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR); strcpy(gai->drvr_name, ESAS2R_DRVR_NAME); gai->num_busses = 1; gai->num_targsper_bus = ESAS2R_MAX_ID + 1; gai->num_lunsper_targ = 1; } } u8 handle_hba_ioctl(struct esas2r_adapter *a, struct atto_ioctl *ioctl_hba) { struct esas2r_buffered_ioctl bi; memset(&bi, 0, sizeof(bi)); bi.a = a; bi.ioctl = ioctl_hba; bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length; bi.callback = hba_ioctl_callback; bi.context = NULL; bi.done_callback = hba_ioctl_done_callback; bi.done_context = NULL; bi.offset = 0; return handle_buffered_ioctl(&bi); } int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sas_nvram *data) { int result = 0; a->nvram_command_done = 0; rq->comp_cb = complete_nvr_req; if (esas2r_nvram_write(a, rq, data)) { /* now wait around for it to complete. */ while (!a->nvram_command_done) wait_event_interruptible(a->nvram_waiter, a->nvram_command_done); ; /* done, check the status. */ if (rq->req_stat == RS_SUCCESS) result = 1; } return result; } /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */ int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg) { struct atto_express_ioctl *ioctl = NULL; struct esas2r_adapter *a; struct esas2r_request *rq; u16 code; int err; esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg); if ((arg == NULL) || (cmd < EXPRESS_IOCTL_MIN) || (cmd > EXPRESS_IOCTL_MAX)) return -ENOTSUPP; ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl)); if (IS_ERR(ioctl)) { esas2r_log(ESAS2R_LOG_WARN, "ioctl_handler access_ok failed for cmd %u, address %p", cmd, arg); return PTR_ERR(ioctl); } /* verify the signature */ if (memcmp(ioctl->header.signature, EXPRESS_IOCTL_SIGNATURE, EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) { esas2r_log(ESAS2R_LOG_WARN, "invalid signature"); kfree(ioctl); return -ENOTSUPP; } /* assume success */ ioctl->header.return_code = IOCTL_SUCCESS; err = 0; /* * handle EXPRESS_IOCTL_GET_CHANNELS * without paying attention to channel */ if (cmd == EXPRESS_IOCTL_GET_CHANNELS) { int i = 0, k = 0; ioctl->data.chanlist.num_channels = 0; while (i < MAX_ADAPTERS) { if (esas2r_adapters[i]) { ioctl->data.chanlist.num_channels++; ioctl->data.chanlist.channel[k] = i; k++; } i++; } goto ioctl_done; } /* get the channel */ if (ioctl->header.channel == 0xFF) { a = (struct esas2r_adapter *)hostdata; } else { if (ioctl->header.channel >= MAX_ADAPTERS || esas2r_adapters[ioctl->header.channel] == NULL) { ioctl->header.return_code = IOCTL_BAD_CHANNEL; esas2r_log(ESAS2R_LOG_WARN, "bad channel value"); kfree(ioctl); return -ENOTSUPP; } a = esas2r_adapters[ioctl->header.channel]; } switch (cmd) { case EXPRESS_IOCTL_RW_FIRMWARE: if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) { err = esas2r_write_fw(a, (char *)ioctl->data.fwrw.image, 0, sizeof(struct atto_express_ioctl)); if (err >= 0) { err = esas2r_read_fw(a, (char *)ioctl->data.fwrw. image, 0, sizeof(struct atto_express_ioctl)); } } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) { err = esas2r_write_fs(a, (char *)ioctl->data.fwrw.image, 0, sizeof(struct atto_express_ioctl)); if (err >= 0) { err = esas2r_read_fs(a, (char *)ioctl->data.fwrw. image, 0, sizeof(struct atto_express_ioctl)); } } else { ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE; } break; case EXPRESS_IOCTL_READ_PARAMS: memcpy(ioctl->data.prw.data_buffer, a->nvram, sizeof(struct esas2r_sas_nvram)); ioctl->data.prw.code = 1; break; case EXPRESS_IOCTL_WRITE_PARAMS: rq = esas2r_alloc_request(a); if (rq == NULL) { kfree(ioctl); esas2r_log(ESAS2R_LOG_WARN, "could not allocate an internal request"); return -ENOMEM; } code = esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); ioctl->data.prw.code = code; esas2r_free_request(a, rq); break; case EXPRESS_IOCTL_DEFAULT_PARAMS: esas2r_nvram_get_defaults(a, (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); ioctl->data.prw.code = 1; break; case EXPRESS_IOCTL_CHAN_INFO: ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV; ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV; ioctl->data.chaninfo.IRQ = a->pcid->irq; ioctl->data.chaninfo.device_id = a->pcid->device; ioctl->data.chaninfo.vendor_id = a->pcid->vendor; ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device; ioctl->data.chaninfo.revision_id = a->pcid->revision; ioctl->data.chaninfo.pci_bus = a->pcid->bus->number; ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn; ioctl->data.chaninfo.core_rev = 0; ioctl->data.chaninfo.host_no = a->host->host_no; ioctl->data.chaninfo.hbaapi_rev = 0; break; case EXPRESS_IOCTL_SMP: ioctl->header.return_code = handle_smp_ioctl(a, &ioctl->data. ioctl_smp); break; case EXPRESS_CSMI: ioctl->header.return_code = handle_csmi_ioctl(a, &ioctl->data.csmi); break; case EXPRESS_IOCTL_HBA: ioctl->header.return_code = handle_hba_ioctl(a, &ioctl->data. ioctl_hba); break; case EXPRESS_IOCTL_VDA: err = esas2r_write_vda(a, (char *)&ioctl->data.ioctl_vda, 0, sizeof(struct atto_ioctl_vda) + ioctl->data.ioctl_vda.data_length); if (err >= 0) { err = esas2r_read_vda(a, (char *)&ioctl->data.ioctl_vda, 0, sizeof(struct atto_ioctl_vda) + ioctl->data.ioctl_vda.data_length); } break; case EXPRESS_IOCTL_GET_MOD_INFO: ioctl->data.modinfo.adapter = a; ioctl->data.modinfo.pci_dev = a->pcid; ioctl->data.modinfo.scsi_host = a->host; ioctl->data.modinfo.host_no = a->host->host_no; break; default: esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd); ioctl->header.return_code = IOCTL_ERR_INVCMD; } ioctl_done: if (err < 0) { esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %u", err, cmd); switch (err) { case -ENOMEM: case -EBUSY: ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES; break; case -ENOSYS: case -EINVAL: ioctl->header.return_code = IOCTL_INVALID_PARAM; break; default: ioctl->header.return_code = IOCTL_GENERAL_ERROR; break; } } /* Always copy the buffer back, if only to pick up the status */ err = copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl)); if (err != 0) { esas2r_log(ESAS2R_LOG_WARN, "ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)", err, cmd); kfree(ioctl); return -EFAULT; } kfree(ioctl); return 0; } int esas2r_ioctl(struct scsi_device *sd, unsigned int cmd, void __user *arg) { return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg); } static void free_fw_buffers(struct esas2r_adapter *a) { if (a->firmware.data) { dma_free_coherent(&a->pcid->dev, (size_t)a->firmware.orig_len, a->firmware.data, (dma_addr_t)a->firmware.phys); a->firmware.data = NULL; } } static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length) { free_fw_buffers(a); a->firmware.orig_len = length; a->firmware.data = dma_alloc_coherent(&a->pcid->dev, (size_t)length, (dma_addr_t *)&a->firmware.phys, GFP_KERNEL); if (!a->firmware.data) { esas2r_debug("buffer alloc failed!"); return 0; } return 1; } /* Handle a call to read firmware. */ int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count) { esas2r_trace_enter(); /* if the cached header is a status, simply copy it over and return. */ if (a->firmware.state == FW_STATUS_ST) { int size = min_t(int, count, sizeof(a->firmware.header)); esas2r_trace_exit(); memcpy(buf, &a->firmware.header, size); esas2r_debug("esas2r_read_fw: STATUS size %d", size); return size; } /* * if the cached header is a command, do it if at * offset 0, otherwise copy the pieces. */ if (a->firmware.state == FW_COMMAND_ST) { u32 length = a->firmware.header.length; esas2r_trace_exit(); esas2r_debug("esas2r_read_fw: COMMAND length %d off %d", length, off); if (off == 0) { if (a->firmware.header.action == FI_ACT_UP) { if (!allocate_fw_buffers(a, length)) return -ENOMEM; /* copy header over */ memcpy(a->firmware.data, &a->firmware.header, sizeof(a->firmware.header)); do_fm_api(a, (struct esas2r_flash_img *)a->firmware.data); } else if (a->firmware.header.action == FI_ACT_UPSZ) { int size = min((int)count, (int)sizeof(a->firmware.header)); do_fm_api(a, &a->firmware.header); memcpy(buf, &a->firmware.header, size); esas2r_debug("FI_ACT_UPSZ size %d", size); return size; } else { esas2r_debug("invalid action %d", a->firmware.header.action); return -ENOSYS; } } if (count + off > length) count = length - off; if (count < 0) return 0; if (!a->firmware.data) { esas2r_debug( "read: nonzero offset but no buffer available!"); return -ENOMEM; } esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off, count, length); memcpy(buf, &a->firmware.data[off], count); /* when done, release the buffer */ if (length <= off + count) { esas2r_debug("esas2r_read_fw: freeing buffer!"); free_fw_buffers(a); } return count; } esas2r_trace_exit(); esas2r_debug("esas2r_read_fw: invalid firmware state %d", a->firmware.state); return -EINVAL; } /* Handle a call to write firmware. */ int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off, int count) { u32 length; if (off == 0) { struct esas2r_flash_img *header = (struct esas2r_flash_img *)buf; /* assume version 0 flash image */ int min_size = sizeof(struct esas2r_flash_img_v0); a->firmware.state = FW_INVALID_ST; /* validate the version field first */ if (count < 4 || header->fi_version > FI_VERSION_1) { esas2r_debug( "esas2r_write_fw: short header or invalid version"); return -EINVAL; } /* See if its a version 1 flash image */ if (header->fi_version == FI_VERSION_1) min_size = sizeof(struct esas2r_flash_img); /* If this is the start, the header must be full and valid. */ if (count < min_size) { esas2r_debug("esas2r_write_fw: short header, aborting"); return -EINVAL; } /* Make sure the size is reasonable. */ length = header->length; if (length > 1024 * 1024) { esas2r_debug( "esas2r_write_fw: hosed, length %d fi_version %d", length, header->fi_version); return -EINVAL; } /* * If this is a write command, allocate memory because * we have to cache everything. otherwise, just cache * the header, because the read op will do the command. */ if (header->action == FI_ACT_DOWN) { if (!allocate_fw_buffers(a, length)) return -ENOMEM; /* * Store the command, so there is context on subsequent * calls. */ memcpy(&a->firmware.header, buf, sizeof(*header)); } else if (header->action == FI_ACT_UP || header->action == FI_ACT_UPSZ) { /* Save the command, result will be picked up on read */ memcpy(&a->firmware.header, buf, sizeof(*header)); a->firmware.state = FW_COMMAND_ST; esas2r_debug( "esas2r_write_fw: COMMAND, count %d, action %d ", count, header->action); /* * Pretend we took the whole buffer, * so we don't get bothered again. */ return count; } else { esas2r_debug("esas2r_write_fw: invalid action %d ", a->firmware.header.action); return -ENOSYS; } } else { length = a->firmware.header.length; } /* * We only get here on a download command, regardless of offset. * the chunks written by the system need to be cached, and when * the final one arrives, issue the fmapi command. */ if (off + count > length) count = length - off; if (count > 0) { esas2r_debug("esas2r_write_fw: off %d count %d length %d", off, count, length); /* * On a full upload, the system tries sending the whole buffer. * there's nothing to do with it, so just drop it here, before * trying to copy over into unallocated memory! */ if (a->firmware.header.action == FI_ACT_UP) return count; if (!a->firmware.data) { esas2r_debug( "write: nonzero offset but no buffer available!"); return -ENOMEM; } memcpy(&a->firmware.data[off], buf, count); if (length == off + count) { do_fm_api(a, (struct esas2r_flash_img *)a->firmware.data); /* * Now copy the header result to be picked up by the * next read */ memcpy(&a->firmware.header, a->firmware.data, sizeof(a->firmware.header)); a->firmware.state = FW_STATUS_ST; esas2r_debug("write completed"); /* * Since the system has the data buffered, the only way * this can leak is if a root user writes a program * that writes a shorter buffer than it claims, and the * copyin fails. */ free_fw_buffers(a); } } return count; } /* Callback for the completion of a VDA request. */ static void vda_complete_req(struct esas2r_adapter *a, struct esas2r_request *rq) { a->vda_command_done = 1; wake_up_interruptible(&a->vda_waiter); } /* Scatter/gather callback for VDA requests */ static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr) { struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer; (*addr) = a->ppvda_buffer + offset; return VDA_MAX_BUFFER_SIZE - offset; } /* Handle a call to read a VDA command. */ int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count) { if (!a->vda_buffer) return -ENOMEM; if (off == 0) { struct esas2r_request *rq; struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)a->vda_buffer; struct esas2r_sg_context sgc; bool wait_for_completion; /* * Presumeably, someone has already written to the vda_buffer, * and now they are reading the node the response, so now we * will actually issue the request to the chip and reply. */ /* allocate a request */ rq = esas2r_alloc_request(a); if (rq == NULL) { esas2r_debug("esas2r_read_vda: out of requests"); return -EBUSY; } rq->comp_cb = vda_complete_req; sgc.first_req = rq; sgc.adapter = a; sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ; sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda; a->vda_command_done = 0; wait_for_completion = esas2r_process_vda_ioctl(a, vi, rq, &sgc); if (wait_for_completion) { /* now wait around for it to complete. */ while (!a->vda_command_done) wait_event_interruptible(a->vda_waiter, a->vda_command_done); } esas2r_free_request(a, (struct esas2r_request *)rq); } if (off > VDA_MAX_BUFFER_SIZE) return 0; if (count + off > VDA_MAX_BUFFER_SIZE) count = VDA_MAX_BUFFER_SIZE - off; if (count < 0) return 0; memcpy(buf, a->vda_buffer + off, count); return count; } /* Handle a call to write a VDA command. */ int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off, int count) { /* * allocate memory for it, if not already done. once allocated, * we will keep it around until the driver is unloaded. */ if (!a->vda_buffer) { dma_addr_t dma_addr; a->vda_buffer = dma_alloc_coherent(&a->pcid->dev, (size_t) VDA_MAX_BUFFER_SIZE, &dma_addr, GFP_KERNEL); a->ppvda_buffer = dma_addr; } if (!a->vda_buffer) return -ENOMEM; if (off > VDA_MAX_BUFFER_SIZE) return 0; if (count + off > VDA_MAX_BUFFER_SIZE) count = VDA_MAX_BUFFER_SIZE - off; if (count < 1) return 0; memcpy(a->vda_buffer + off, buf, count); return count; } /* Callback for the completion of an FS_API request.*/ static void fs_api_complete_req(struct esas2r_adapter *a, struct esas2r_request *rq) { a->fs_api_command_done = 1; wake_up_interruptible(&a->fs_api_waiter); } /* Scatter/gather callback for VDA requests */ static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr) { struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)a->fs_api_buffer; u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs; (*addr) = a->ppfs_api_buffer + offset; return a->fs_api_buffer_size - offset; } /* Handle a call to read firmware via FS_API. */ int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count) { if (!a->fs_api_buffer) return -ENOMEM; if (off == 0) { struct esas2r_request *rq; struct esas2r_sg_context sgc; struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)a->fs_api_buffer; /* If another flash request is already in progress, return. */ if (mutex_lock_interruptible(&a->fs_api_mutex)) { busy: fs->status = ATTO_STS_OUT_OF_RSRC; return -EBUSY; } /* * Presumeably, someone has already written to the * fs_api_buffer, and now they are reading the node the * response, so now we will actually issue the request to the * chip and reply. Allocate a request */ rq = esas2r_alloc_request(a); if (rq == NULL) { esas2r_debug("esas2r_read_fs: out of requests"); mutex_unlock(&a->fs_api_mutex); goto busy; } rq->comp_cb = fs_api_complete_req; /* Set up the SGCONTEXT for to build the s/g table */ sgc.cur_offset = fs->data; sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api; a->fs_api_command_done = 0; if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) { if (fs->status == ATTO_STS_OUT_OF_RSRC) count = -EBUSY; goto dont_wait; } /* Now wait around for it to complete. */ while (!a->fs_api_command_done) wait_event_interruptible(a->fs_api_waiter, a->fs_api_command_done); ; dont_wait: /* Free the request and keep going */ mutex_unlock(&a->fs_api_mutex); esas2r_free_request(a, (struct esas2r_request *)rq); /* Pick up possible error code from above */ if (count < 0) return count; } if (off > a->fs_api_buffer_size) return 0; if (count + off > a->fs_api_buffer_size) count = a->fs_api_buffer_size - off; if (count < 0) return 0; memcpy(buf, a->fs_api_buffer + off, count); return count; } /* Handle a call to write firmware via FS_API. */ int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off, int count) { if (off == 0) { struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf; u32 length = fs->command.length + offsetof( struct esas2r_ioctl_fs, data); /* * Special case, for BEGIN commands, the length field * is lying to us, so just get enough for the header. */ if (fs->command.command == ESAS2R_FS_CMD_BEGINW) length = offsetof(struct esas2r_ioctl_fs, data); /* * Beginning a command. We assume we'll get at least * enough in the first write so we can look at the * header and see how much we need to alloc. */ if (count < offsetof(struct esas2r_ioctl_fs, data)) return -EINVAL; /* Allocate a buffer or use the existing buffer. */ if (a->fs_api_buffer) { if (a->fs_api_buffer_size < length) { /* Free too-small buffer and get a new one */ dma_free_coherent(&a->pcid->dev, (size_t)a->fs_api_buffer_size, a->fs_api_buffer, (dma_addr_t)a->ppfs_api_buffer); goto re_allocate_buffer; } } else { re_allocate_buffer: a->fs_api_buffer_size = length; a->fs_api_buffer = dma_alloc_coherent(&a->pcid->dev, (size_t)a->fs_api_buffer_size, (dma_addr_t *)&a->ppfs_api_buffer, GFP_KERNEL); } } if (!a->fs_api_buffer) return -ENOMEM; if (off > a->fs_api_buffer_size) return 0; if (count + off > a->fs_api_buffer_size) count = a->fs_api_buffer_size - off; if (count < 1) return 0; memcpy(a->fs_api_buffer + off, buf, count); return count; }
linux-master
drivers/scsi/esas2r/esas2r_ioctl.c
/* * linux/drivers/scsi/esas2r/esas2r_vda.c * esas2r driver VDA firmware interface functions * * Copyright (c) 2001-2013 ATTO Technology, Inc. * (mailto:[email protected]) */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #include "esas2r.h" static u8 esas2r_vdaioctl_versions[] = { ATTO_VDA_VER_UNSUPPORTED, ATTO_VDA_FLASH_VER, ATTO_VDA_VER_UNSUPPORTED, ATTO_VDA_VER_UNSUPPORTED, ATTO_VDA_CLI_VER, ATTO_VDA_VER_UNSUPPORTED, ATTO_VDA_CFG_VER, ATTO_VDA_MGT_VER, ATTO_VDA_GSV_VER }; static void clear_vda_request(struct esas2r_request *rq); static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a, struct esas2r_request *rq); /* Prepare a VDA IOCTL request to be sent to the firmware. */ bool esas2r_process_vda_ioctl(struct esas2r_adapter *a, struct atto_ioctl_vda *vi, struct esas2r_request *rq, struct esas2r_sg_context *sgc) { u32 datalen = 0; struct atto_vda_sge *firstsg = NULL; u8 vercnt = (u8)ARRAY_SIZE(esas2r_vdaioctl_versions); vi->status = ATTO_STS_SUCCESS; vi->vda_status = RS_PENDING; if (vi->function >= vercnt) { vi->status = ATTO_STS_INV_FUNC; return false; } if (vi->version > esas2r_vdaioctl_versions[vi->function]) { vi->status = ATTO_STS_INV_VERSION; return false; } if (test_bit(AF_DEGRADED_MODE, &a->flags)) { vi->status = ATTO_STS_DEGRADED; return false; } if (vi->function != VDA_FUNC_SCSI) clear_vda_request(rq); rq->vrq->scsi.function = vi->function; rq->interrupt_cb = esas2r_complete_vda_ioctl; rq->interrupt_cx = vi; switch (vi->function) { case VDA_FUNC_FLASH: if (vi->cmd.flash.sub_func != VDA_FLASH_FREAD && vi->cmd.flash.sub_func != VDA_FLASH_FWRITE && vi->cmd.flash.sub_func != VDA_FLASH_FINFO) { vi->status = ATTO_STS_INV_FUNC; return false; } if (vi->cmd.flash.sub_func != VDA_FLASH_FINFO) datalen = vi->data_length; rq->vrq->flash.length = cpu_to_le32(datalen); rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; memcpy(rq->vrq->flash.data.file.file_name, vi->cmd.flash.data.file.file_name, sizeof(vi->cmd.flash.data.file.file_name)); firstsg = rq->vrq->flash.data.file.sge; break; case VDA_FUNC_CLI: datalen = vi->data_length; rq->vrq->cli.cmd_rsp_len = cpu_to_le32(vi->cmd.cli.cmd_rsp_len); rq->vrq->cli.length = cpu_to_le32(datalen); firstsg = rq->vrq->cli.sge; break; case VDA_FUNC_MGT: { u8 *cmdcurr_offset = sgc->cur_offset - offsetof(struct atto_ioctl_vda, data) + offsetof(struct atto_ioctl_vda, cmd) + offsetof(struct atto_ioctl_vda_mgt_cmd, data); /* * build the data payload SGL here first since * esas2r_sgc_init() will modify the S/G list offset for the * management SGL (which is built below where the data SGL is * usually built). */ if (vi->data_length) { u32 payldlen = 0; if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_HEALTH_REQ || vi->cmd.mgt.mgt_func == VDAMGT_DEV_METRICS) { rq->vrq->mgt.payld_sglst_offset = (u8)offsetof(struct atto_vda_mgmt_req, payld_sge); payldlen = vi->data_length; datalen = vi->cmd.mgt.data_length; } else if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2 || vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2_BYADDR) { datalen = vi->data_length; cmdcurr_offset = sgc->cur_offset; } else { vi->status = ATTO_STS_INV_PARAM; return false; } /* Setup the length so building the payload SGL works */ rq->vrq->mgt.length = cpu_to_le32(datalen); if (payldlen) { rq->vrq->mgt.payld_length = cpu_to_le32(payldlen); esas2r_sgc_init(sgc, a, rq, rq->vrq->mgt.payld_sge); sgc->length = payldlen; if (!esas2r_build_sg_list(a, rq, sgc)) { vi->status = ATTO_STS_OUT_OF_RSRC; return false; } } } else { datalen = vi->cmd.mgt.data_length; rq->vrq->mgt.length = cpu_to_le32(datalen); } /* * Now that the payload SGL is built, if any, setup to build * the management SGL. */ firstsg = rq->vrq->mgt.sge; sgc->cur_offset = cmdcurr_offset; /* Finish initializing the management request. */ rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func; rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation; rq->vrq->mgt.dev_index = cpu_to_le32(vi->cmd.mgt.dev_index); esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data); break; } case VDA_FUNC_CFG: if (vi->data_length || vi->cmd.cfg.data_length == 0) { vi->status = ATTO_STS_INV_PARAM; return false; } if (vi->cmd.cfg.cfg_func == VDA_CFG_INIT) { vi->status = ATTO_STS_INV_FUNC; return false; } rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func; rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length); if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { memcpy(&rq->vrq->cfg.data, &vi->cmd.cfg.data, vi->cmd.cfg.data_length); esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func, &rq->vrq->cfg.data); } else { vi->status = ATTO_STS_INV_FUNC; return false; } break; case VDA_FUNC_GSV: vi->cmd.gsv.rsp_len = vercnt; memcpy(vi->cmd.gsv.version_info, esas2r_vdaioctl_versions, vercnt); vi->vda_status = RS_SUCCESS; break; default: vi->status = ATTO_STS_INV_FUNC; return false; } if (datalen) { esas2r_sgc_init(sgc, a, rq, firstsg); sgc->length = datalen; if (!esas2r_build_sg_list(a, rq, sgc)) { vi->status = ATTO_STS_OUT_OF_RSRC; return false; } } esas2r_start_request(a, rq); return true; } static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a, struct esas2r_request *rq) { struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx; vi->vda_status = rq->req_stat; switch (vi->function) { case VDA_FUNC_FLASH: if (vi->cmd.flash.sub_func == VDA_FLASH_FINFO || vi->cmd.flash.sub_func == VDA_FLASH_FREAD) vi->cmd.flash.data.file.file_size = le32_to_cpu(rq->func_rsp.flash_rsp.file_size); break; case VDA_FUNC_MGT: vi->cmd.mgt.scan_generation = rq->func_rsp.mgt_rsp.scan_generation; vi->cmd.mgt.dev_index = le16_to_cpu( rq->func_rsp.mgt_rsp.dev_index); if (vi->data_length == 0) vi->cmd.mgt.data_length = le32_to_cpu(rq->func_rsp.mgt_rsp.length); esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data); break; case VDA_FUNC_CFG: if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg; struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp; char buf[sizeof(cfg->data.init.fw_release) + 1]; cfg->data_length = cpu_to_le32(sizeof(struct atto_vda_cfg_init)); cfg->data.init.vda_version = le32_to_cpu(rsp->vda_version); cfg->data.init.fw_build = rsp->fw_build; snprintf(buf, sizeof(buf), "%1.1u.%2.2u", (int)LOBYTE(le16_to_cpu(rsp->fw_release)), (int)HIBYTE(le16_to_cpu(rsp->fw_release))); memcpy(&cfg->data.init.fw_release, buf, sizeof(cfg->data.init.fw_release)); if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A') cfg->data.init.fw_version = cfg->data.init.fw_build; else cfg->data.init.fw_version = cfg->data.init.fw_release; } else { esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func, &vi->cmd.cfg.data); } break; case VDA_FUNC_CLI: vi->cmd.cli.cmd_rsp_len = le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len); break; default: break; } } /* Build a flash VDA request. */ void esas2r_build_flash_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 sub_func, u8 cksum, u32 addr, u32 length) { struct atto_vda_flash_req *vrq = &rq->vrq->flash; clear_vda_request(rq); rq->vrq->scsi.function = VDA_FUNC_FLASH; if (sub_func == VDA_FLASH_BEGINW || sub_func == VDA_FLASH_WRITE || sub_func == VDA_FLASH_READ) vrq->sg_list_offset = (u8)offsetof(struct atto_vda_flash_req, data.sge); vrq->length = cpu_to_le32(length); vrq->flash_addr = cpu_to_le32(addr); vrq->checksum = cksum; vrq->sub_func = sub_func; } /* Build a VDA management request. */ void esas2r_build_mgt_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 sub_func, u8 scan_gen, u16 dev_index, u32 length, void *data) { struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt; clear_vda_request(rq); rq->vrq->scsi.function = VDA_FUNC_MGT; vrq->mgt_func = sub_func; vrq->scan_generation = scan_gen; vrq->dev_index = cpu_to_le16(dev_index); vrq->length = cpu_to_le32(length); if (vrq->length) { if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { vrq->sg_list_offset = (u8)offsetof( struct atto_vda_mgmt_req, sge); vrq->sge[0].length = cpu_to_le32(SGE_LAST | length); vrq->sge[0].address = cpu_to_le64( rq->vrq_md->phys_addr + sizeof(union atto_vda_req)); } else { vrq->sg_list_offset = (u8)offsetof( struct atto_vda_mgmt_req, prde); vrq->prde[0].ctl_len = cpu_to_le32(length); vrq->prde[0].address = cpu_to_le64( rq->vrq_md->phys_addr + sizeof(union atto_vda_req)); } } if (data) { esas2r_nuxi_mgt_data(sub_func, data); memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data, length); } } /* Build a VDA asyncronous event (AE) request. */ void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq) { struct atto_vda_ae_req *vrq = &rq->vrq->ae; clear_vda_request(rq); rq->vrq->scsi.function = VDA_FUNC_AE; vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data)); if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req, sge); vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length); vrq->sge[0].address = cpu_to_le64( rq->vrq_md->phys_addr + sizeof(union atto_vda_req)); } else { vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req, prde); vrq->prde[0].ctl_len = cpu_to_le32(vrq->length); vrq->prde[0].address = cpu_to_le64( rq->vrq_md->phys_addr + sizeof(union atto_vda_req)); } } /* Build a VDA CLI request. */ void esas2r_build_cli_req(struct esas2r_adapter *a, struct esas2r_request *rq, u32 length, u32 cmd_rsp_len) { struct atto_vda_cli_req *vrq = &rq->vrq->cli; clear_vda_request(rq); rq->vrq->scsi.function = VDA_FUNC_CLI; vrq->length = cpu_to_le32(length); vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len); vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge); } /* Build a VDA IOCTL request. */ void esas2r_build_ioctl_req(struct esas2r_adapter *a, struct esas2r_request *rq, u32 length, u8 sub_func) { struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl; clear_vda_request(rq); rq->vrq->scsi.function = VDA_FUNC_IOCTL; vrq->length = cpu_to_le32(length); vrq->sub_func = sub_func; vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ioctl_req, sge); } /* Build a VDA configuration request. */ void esas2r_build_cfg_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 sub_func, u32 length, void *data) { struct atto_vda_cfg_req *vrq = &rq->vrq->cfg; clear_vda_request(rq); rq->vrq->scsi.function = VDA_FUNC_CFG; vrq->sub_func = sub_func; vrq->length = cpu_to_le32(length); if (data) { esas2r_nuxi_cfg_data(sub_func, data); memcpy(&vrq->data, data, length); } } static void clear_vda_request(struct esas2r_request *rq) { u32 handle = rq->vrq->scsi.handle; memset(rq->vrq, 0, sizeof(*rq->vrq)); rq->vrq->scsi.handle = handle; rq->req_stat = RS_PENDING; /* since the data buffer is separate clear that too */ memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN); /* * Setup next and prev pointer in case the request is not going through * esas2r_start_request(). */ INIT_LIST_HEAD(&rq->req_list); }
linux-master
drivers/scsi/esas2r/esas2r_vda.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/lockdep.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include <linux/crash_dump.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> #endif #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc.h" #include "lpfc_scsi.h" #include "lpfc_nvme.h" #include "lpfc_crtn.h" #include "lpfc_logmsg.h" #include "lpfc_compat.h" #include "lpfc_debugfs.h" #include "lpfc_vport.h" #include "lpfc_version.h" /* There are only four IOCB completion types. */ typedef enum _lpfc_iocb_type { LPFC_UNKNOWN_IOCB, LPFC_UNSOL_IOCB, LPFC_SOL_IOCB, LPFC_ABORT_IOCB } lpfc_iocb_type; /* Provide function prototypes local to this module. */ static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, uint8_t *, uint32_t *); static struct lpfc_iocbq * lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, struct lpfc_iocbq *rspiocbq); static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, struct hbq_dmabuf *); static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf); static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe); static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, int); static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode); static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe); static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, struct lpfc_sglq *sglq); union lpfc_wqe128 lpfc_iread_cmd_template; union lpfc_wqe128 lpfc_iwrite_cmd_template; union lpfc_wqe128 lpfc_icmnd_cmd_template; /* Setup WQE templates for IOs */ void lpfc_wqe_cmd_template(void) { union lpfc_wqe128 *wqe; /* IREAD template */ wqe = &lpfc_iread_cmd_template; memset(wqe, 0, sizeof(union lpfc_wqe128)); /* Word 0, 1, 2 - BDE is variable */ /* Word 3 - cmd_buff_len, payload_offset_len is zero */ /* Word 4 - total_xfer_len is variable */ /* Word 5 - is zero */ /* Word 6 - ctxt_tag, xri_tag is variable */ /* Word 7 */ bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE); bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK); bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3); bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI); /* Word 8 - abort_tag is variable */ /* Word 9 - reqtag is variable */ /* Word 10 - dbde, wqes is variable */ bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4); bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); /* Word 11 - pbde is variable */ bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN); bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); /* Word 12 - is zero */ /* Word 13, 14, 15 - PBDE is variable */ /* IWRITE template */ wqe = &lpfc_iwrite_cmd_template; memset(wqe, 0, sizeof(union lpfc_wqe128)); /* Word 0, 1, 2 - BDE is variable */ /* Word 3 - cmd_buff_len, payload_offset_len is zero */ /* Word 4 - total_xfer_len is variable */ /* Word 5 - initial_xfer_len is variable */ /* Word 6 - ctxt_tag, xri_tag is variable */ /* Word 7 */ bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE); bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK); bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3); bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI); /* Word 8 - abort_tag is variable */ /* Word 9 - reqtag is variable */ /* Word 10 - dbde, wqes is variable */ bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0); bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4); bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); /* Word 11 - pbde is variable */ bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT); bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); /* Word 12 - is zero */ /* Word 13, 14, 15 - PBDE is variable */ /* ICMND template */ wqe = &lpfc_icmnd_cmd_template; memset(wqe, 0, sizeof(union lpfc_wqe128)); /* Word 0, 1, 2 - BDE is variable */ /* Word 3 - payload_offset_len is variable */ /* Word 4, 5 - is zero */ /* Word 6 - ctxt_tag, xri_tag is variable */ /* Word 7 */ bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE); bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3); bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI); /* Word 8 - abort_tag is variable */ /* Word 9 - reqtag is variable */ /* Word 10 - dbde, wqes is variable */ bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE); bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); /* Word 11 */ bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN); bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0); /* Word 12, 13, 14, 15 - is zero */ } #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) /** * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function * @srcp: Source memory pointer. * @destp: Destination memory pointer. * @cnt: Number of words required to be copied. * Must be a multiple of sizeof(uint64_t) * * This function is used for copying data between driver memory * and the SLI WQ. This function also changes the endianness * of each word if native endianness is different from SLI * endianness. This function can be called with or without * lock. **/ static void lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) { uint64_t *src = srcp; uint64_t *dest = destp; int i; for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) *dest++ = *src++; } #else #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) #endif /** * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue * @q: The Work Queue to operate on. * @wqe: The work Queue Entry to put on the Work queue. * * This routine will copy the contents of @wqe to the next available entry on * the @q. This function will then ring the Work Queue Doorbell to signal the * HBA to start processing the Work Queue Entry. This function returns 0 if * successful. If no entries are available on @q then this function will return * -ENOMEM. * The caller is expected to hold the hbalock when calling this routine. **/ static int lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) { union lpfc_wqe *temp_wqe; struct lpfc_register doorbell; uint32_t host_index; uint32_t idx; uint32_t i = 0; uint8_t *tmp; u32 if_type; /* sanity check on queue memory */ if (unlikely(!q)) return -ENOMEM; temp_wqe = lpfc_sli4_qe(q, q->host_index); /* If the host has not yet processed the next entry then we are done */ idx = ((q->host_index + 1) % q->entry_count); if (idx == q->hba_index) { q->WQ_overflow++; return -EBUSY; } q->WQ_posted++; /* set consumption flag every once in a while */ if (!((q->host_index + 1) % q->notify_interval)) bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); else bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); if (q->dpp_enable && q->phba->cfg_enable_dpp) { /* write to DPP aperture taking advatage of Combined Writes */ tmp = (uint8_t *)temp_wqe; #ifdef __raw_writeq for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) __raw_writeq(*((uint64_t *)(tmp + i)), q->dpp_regaddr + i); #else for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) __raw_writel(*((uint32_t *)(tmp + i)), q->dpp_regaddr + i); #endif } /* ensure WQE bcopy and DPP flushed before doorbell write */ wmb(); /* Update the host index before invoking device */ host_index = q->host_index; q->host_index = idx; /* Ring Doorbell */ doorbell.word0 = 0; if (q->db_format == LPFC_DB_LIST_FORMAT) { if (q->dpp_enable && q->phba->cfg_enable_dpp) { bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, q->dpp_id); bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, q->queue_id); } else { bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); /* Leave bits <23:16> clear for if_type 6 dpp */ if_type = bf_get(lpfc_sli_intf_if_type, &q->phba->sli4_hba.sli_intf); if (if_type != LPFC_SLI_INTF_IF_TYPE_6) bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index); } } else if (q->db_format == LPFC_DB_RING_FORMAT) { bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); } else { return -EINVAL; } writel(doorbell.word0, q->db_regaddr); return 0; } /** * lpfc_sli4_wq_release - Updates internal hba index for WQ * @q: The Work Queue to operate on. * @index: The index to advance the hba index to. * * This routine will update the HBA index of a queue to reflect consumption of * Work Queue Entries by the HBA. When the HBA indicates that it has consumed * an entry the host calls this function to update the queue's internal * pointers. **/ static void lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) { /* sanity check on queue memory */ if (unlikely(!q)) return; q->hba_index = index; } /** * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue * @q: The Mailbox Queue to operate on. * @mqe: The Mailbox Queue Entry to put on the Work queue. * * This routine will copy the contents of @mqe to the next available entry on * the @q. This function will then ring the Work Queue Doorbell to signal the * HBA to start processing the Work Queue Entry. This function returns 0 if * successful. If no entries are available on @q then this function will return * -ENOMEM. * The caller is expected to hold the hbalock when calling this routine. **/ static uint32_t lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) { struct lpfc_mqe *temp_mqe; struct lpfc_register doorbell; /* sanity check on queue memory */ if (unlikely(!q)) return -ENOMEM; temp_mqe = lpfc_sli4_qe(q, q->host_index); /* If the host has not yet processed the next entry then we are done */ if (((q->host_index + 1) % q->entry_count) == q->hba_index) return -ENOMEM; lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); /* Save off the mailbox pointer for completion */ q->phba->mbox = (MAILBOX_t *)temp_mqe; /* Update the host index before invoking device */ q->host_index = ((q->host_index + 1) % q->entry_count); /* Ring Doorbell */ doorbell.word0 = 0; bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); return 0; } /** * lpfc_sli4_mq_release - Updates internal hba index for MQ * @q: The Mailbox Queue to operate on. * * This routine will update the HBA index of a queue to reflect consumption of * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed * an entry the host calls this function to update the queue's internal * pointers. This routine returns the number of entries that were consumed by * the HBA. **/ static uint32_t lpfc_sli4_mq_release(struct lpfc_queue *q) { /* sanity check on queue memory */ if (unlikely(!q)) return 0; /* Clear the mailbox pointer for completion */ q->phba->mbox = NULL; q->hba_index = ((q->hba_index + 1) % q->entry_count); return 1; } /** * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ * @q: The Event Queue to get the first valid EQE from * * This routine will get the first valid Event Queue Entry from @q, update * the queue's internal hba index, and return the EQE. If no valid EQEs are in * the Queue (no more work to do), or the Queue is full of EQEs that have been * processed, but not popped back to the HBA then this routine will return NULL. **/ static struct lpfc_eqe * lpfc_sli4_eq_get(struct lpfc_queue *q) { struct lpfc_eqe *eqe; /* sanity check on queue memory */ if (unlikely(!q)) return NULL; eqe = lpfc_sli4_qe(q, q->host_index); /* If the next EQE is not valid then we are done */ if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) return NULL; /* * insert barrier for instruction interlock : data from the hardware * must have the valid bit checked before it can be copied and acted * upon. Speculative instructions were allowing a bcopy at the start * of lpfc_sli4_fp_handle_wcqe(), which is called immediately * after our return, to copy data before the valid bit check above * was done. As such, some of the copied data was stale. The barrier * ensures the check is before any data is copied. */ mb(); return eqe; } /** * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ * @q: The Event Queue to disable interrupts * **/ void lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) { struct lpfc_register doorbell; doorbell.word0 = 0; bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); } /** * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ * @q: The Event Queue to disable interrupts * **/ void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) { struct lpfc_register doorbell; doorbell.word0 = 0; bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); } /** * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state * @phba: adapter with EQ * @q: The Event Queue that the host has completed processing for. * @count: Number of elements that have been consumed * @arm: Indicates whether the host wants to arms this CQ. * * This routine will notify the HBA, by ringing the doorbell, that count * number of EQEs have been processed. The @arm parameter indicates whether * the queue should be rearmed when ringing the doorbell. **/ void lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, uint32_t count, bool arm) { struct lpfc_register doorbell; /* sanity check on queue memory */ if (unlikely(!q || (count == 0 && !arm))) return; /* ring doorbell for number popped */ doorbell.word0 = 0; if (arm) { bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); } bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); /* PCI read to flush PCI pipeline on re-arming for INTx mode */ if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) readl(q->phba->sli4_hba.EQDBregaddr); } /** * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state * @phba: adapter with EQ * @q: The Event Queue that the host has completed processing for. * @count: Number of elements that have been consumed * @arm: Indicates whether the host wants to arms this CQ. * * This routine will notify the HBA, by ringing the doorbell, that count * number of EQEs have been processed. The @arm parameter indicates whether * the queue should be rearmed when ringing the doorbell. **/ void lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, uint32_t count, bool arm) { struct lpfc_register doorbell; /* sanity check on queue memory */ if (unlikely(!q || (count == 0 && !arm))) return; /* ring doorbell for number popped */ doorbell.word0 = 0; if (arm) bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); /* PCI read to flush PCI pipeline on re-arming for INTx mode */ if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) readl(q->phba->sli4_hba.EQDBregaddr); } static void __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, struct lpfc_eqe *eqe) { if (!phba->sli4_hba.pc_sli4_params.eqav) bf_set_le32(lpfc_eqe_valid, eqe, 0); eq->host_index = ((eq->host_index + 1) % eq->entry_count); /* if the index wrapped around, toggle the valid bit */ if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) eq->qe_valid = (eq->qe_valid) ? 0 : 1; } static void lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) { struct lpfc_eqe *eqe = NULL; u32 eq_count = 0, cq_count = 0; struct lpfc_cqe *cqe = NULL; struct lpfc_queue *cq = NULL, *childq = NULL; int cqid = 0; /* walk all the EQ entries and drop on the floor */ eqe = lpfc_sli4_eq_get(eq); while (eqe) { /* Get the reference to the corresponding CQ */ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); cq = NULL; list_for_each_entry(childq, &eq->child_list, list) { if (childq->queue_id == cqid) { cq = childq; break; } } /* If CQ is valid, iterate through it and drop all the CQEs */ if (cq) { cqe = lpfc_sli4_cq_get(cq); while (cqe) { __lpfc_sli4_consume_cqe(phba, cq, cqe); cq_count++; cqe = lpfc_sli4_cq_get(cq); } /* Clear and re-arm the CQ */ phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count, LPFC_QUEUE_REARM); cq_count = 0; } __lpfc_sli4_consume_eqe(phba, eq, eqe); eq_count++; eqe = lpfc_sli4_eq_get(eq); } /* Clear and re-arm the EQ */ phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM); } static int lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, u8 rearm, enum lpfc_poll_mode poll_mode) { struct lpfc_eqe *eqe; int count = 0, consumed = 0; if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) goto rearm_and_exit; eqe = lpfc_sli4_eq_get(eq); while (eqe) { lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode); __lpfc_sli4_consume_eqe(phba, eq, eqe); consumed++; if (!(++count % eq->max_proc_limit)) break; if (!(count % eq->notify_interval)) { phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_NOARM); consumed = 0; } eqe = lpfc_sli4_eq_get(eq); } eq->EQ_processed += count; /* Track the max number of EQEs processed in 1 intr */ if (count > eq->EQ_max_eqe) eq->EQ_max_eqe = count; xchg(&eq->queue_claimed, 0); rearm_and_exit: /* Always clear the EQ. */ phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm); return count; } /** * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ * @q: The Completion Queue to get the first valid CQE from * * This routine will get the first valid Completion Queue Entry from @q, update * the queue's internal hba index, and return the CQE. If no valid CQEs are in * the Queue (no more work to do), or the Queue is full of CQEs that have been * processed, but not popped back to the HBA then this routine will return NULL. **/ static struct lpfc_cqe * lpfc_sli4_cq_get(struct lpfc_queue *q) { struct lpfc_cqe *cqe; /* sanity check on queue memory */ if (unlikely(!q)) return NULL; cqe = lpfc_sli4_qe(q, q->host_index); /* If the next CQE is not valid then we are done */ if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) return NULL; /* * insert barrier for instruction interlock : data from the hardware * must have the valid bit checked before it can be copied and acted * upon. Given what was seen in lpfc_sli4_cq_get() of speculative * instructions allowing action on content before valid bit checked, * add barrier here as well. May not be needed as "content" is a * single 32-bit entity here (vs multi word structure for cq's). */ mb(); return cqe; } static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) { if (!phba->sli4_hba.pc_sli4_params.cqav) bf_set_le32(lpfc_cqe_valid, cqe, 0); cq->host_index = ((cq->host_index + 1) % cq->entry_count); /* if the index wrapped around, toggle the valid bit */ if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) cq->qe_valid = (cq->qe_valid) ? 0 : 1; } /** * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. * @phba: the adapter with the CQ * @q: The Completion Queue that the host has completed processing for. * @count: the number of elements that were consumed * @arm: Indicates whether the host wants to arms this CQ. * * This routine will notify the HBA, by ringing the doorbell, that the * CQEs have been processed. The @arm parameter specifies whether the * queue should be rearmed when ringing the doorbell. **/ void lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, uint32_t count, bool arm) { struct lpfc_register doorbell; /* sanity check on queue memory */ if (unlikely(!q || (count == 0 && !arm))) return; /* ring doorbell for number popped */ doorbell.word0 = 0; if (arm) bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); } /** * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. * @phba: the adapter with the CQ * @q: The Completion Queue that the host has completed processing for. * @count: the number of elements that were consumed * @arm: Indicates whether the host wants to arms this CQ. * * This routine will notify the HBA, by ringing the doorbell, that the * CQEs have been processed. The @arm parameter specifies whether the * queue should be rearmed when ringing the doorbell. **/ void lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, uint32_t count, bool arm) { struct lpfc_register doorbell; /* sanity check on queue memory */ if (unlikely(!q || (count == 0 && !arm))) return; /* ring doorbell for number popped */ doorbell.word0 = 0; if (arm) bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); } /* * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue * * This routine will copy the contents of @wqe to the next available entry on * the @q. This function will then ring the Receive Queue Doorbell to signal the * HBA to start processing the Receive Queue Entry. This function returns the * index that the rqe was copied to if successful. If no entries are available * on @q then this function will return -ENOMEM. * The caller is expected to hold the hbalock when calling this routine. **/ int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) { struct lpfc_rqe *temp_hrqe; struct lpfc_rqe *temp_drqe; struct lpfc_register doorbell; int hq_put_index; int dq_put_index; /* sanity check on queue memory */ if (unlikely(!hq) || unlikely(!dq)) return -ENOMEM; hq_put_index = hq->host_index; dq_put_index = dq->host_index; temp_hrqe = lpfc_sli4_qe(hq, hq_put_index); temp_drqe = lpfc_sli4_qe(dq, dq_put_index); if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) return -EINVAL; if (hq_put_index != dq_put_index) return -EINVAL; /* If the host has not yet processed the next entry then we are done */ if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) return -EBUSY; lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); /* Update the host index to point to the next slot */ hq->host_index = ((hq_put_index + 1) % hq->entry_count); dq->host_index = ((dq_put_index + 1) % dq->entry_count); hq->RQ_buf_posted++; /* Ring The Header Receive Queue Doorbell */ if (!(hq->host_index % hq->notify_interval)) { doorbell.word0 = 0; if (hq->db_format == LPFC_DB_RING_FORMAT) { bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, hq->notify_interval); bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, hq->notify_interval); bf_set(lpfc_rq_db_list_fm_index, &doorbell, hq->host_index); bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); } else { return -EINVAL; } writel(doorbell.word0, hq->db_regaddr); } return hq_put_index; } /* * lpfc_sli4_rq_release - Updates internal hba index for RQ * * This routine will update the HBA index of a queue to reflect consumption of * one Receive Queue Entry by the HBA. When the HBA indicates that it has * consumed an entry the host calls this function to update the queue's * internal pointers. This routine returns the number of entries that were * consumed by the HBA. **/ static uint32_t lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) { /* sanity check on queue memory */ if (unlikely(!hq) || unlikely(!dq)) return 0; if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) return 0; hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); return 1; } /** * lpfc_cmd_iocb - Get next command iocb entry in the ring * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * * This function returns pointer to next command iocb entry * in the command ring. The caller must hold hbalock to prevent * other threads consume the next command iocb. * SLI-2/SLI-3 provide different sized iocbs. **/ static inline IOCB_t * lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + pring->sli.sli3.cmdidx * phba->iocb_cmd_size); } /** * lpfc_resp_iocb - Get next response iocb entry in the ring * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * * This function returns pointer to next response iocb entry * in the response ring. The caller must hold hbalock to make sure * that no other thread consume the next response iocb. * SLI-2/SLI-3 provide different sized iocbs. **/ static inline IOCB_t * lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + pring->sli.sli3.rspidx * phba->iocb_rsp_size); } /** * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool * @phba: Pointer to HBA context object. * * This function is called with hbalock held. This function * allocates a new driver iocb object from the iocb pool. If the * allocation is successful, it returns pointer to the newly * allocated iocb object else it returns NULL. **/ struct lpfc_iocbq * __lpfc_sli_get_iocbq(struct lpfc_hba *phba) { struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; struct lpfc_iocbq * iocbq = NULL; lockdep_assert_held(&phba->hbalock); list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); if (iocbq) phba->iocb_cnt++; if (phba->iocb_cnt > phba->iocb_max) phba->iocb_max = phba->iocb_cnt; return iocbq; } /** * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. * @phba: Pointer to HBA context object. * @xritag: XRI value. * * This function clears the sglq pointer from the array of active * sglq's. The xritag that is passed in is used to index into the * array. Before the xritag can be used it needs to be adjusted * by subtracting the xribase. * * Returns sglq ponter = success, NULL = Failure. **/ struct lpfc_sglq * __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) { struct lpfc_sglq *sglq; sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; return sglq; } /** * __lpfc_get_active_sglq - Get the active sglq for this XRI. * @phba: Pointer to HBA context object. * @xritag: XRI value. * * This function returns the sglq pointer from the array of active * sglq's. The xritag that is passed in is used to index into the * array. Before the xritag can be used it needs to be adjusted * by subtracting the xribase. * * Returns sglq ponter = success, NULL = Failure. **/ struct lpfc_sglq * __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) { struct lpfc_sglq *sglq; sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; return sglq; } /** * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. * @phba: Pointer to HBA context object. * @xritag: xri used in this exchange. * @rrq: The RRQ to be cleared. * **/ void lpfc_clr_rrq_active(struct lpfc_hba *phba, uint16_t xritag, struct lpfc_node_rrq *rrq) { struct lpfc_nodelist *ndlp = NULL; /* Lookup did to verify if did is still active on this vport */ if (rrq->vport) ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); if (!ndlp) goto out; if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { rrq->send_rrq = 0; rrq->xritag = 0; rrq->rrq_stop_time = 0; } out: mempool_free(rrq, phba->rrq_pool); } /** * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. * @phba: Pointer to HBA context object. * * This function is called with hbalock held. This function * Checks if stop_time (ratov from setting rrq active) has * been reached, if it has and the send_rrq flag is set then * it will call lpfc_send_rrq. If the send_rrq flag is not set * then it will just call the routine to clear the rrq and * free the rrq resource. * The timer is set to the next rrq that is going to expire before * leaving the routine. * **/ void lpfc_handle_rrq_active(struct lpfc_hba *phba) { struct lpfc_node_rrq *rrq; struct lpfc_node_rrq *nextrrq; unsigned long next_time; unsigned long iflags; LIST_HEAD(send_rrq); spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (time_after(jiffies, rrq->rrq_stop_time)) list_move(&rrq->list, &send_rrq); else if (time_before(rrq->rrq_stop_time, next_time)) next_time = rrq->rrq_stop_time; } spin_unlock_irqrestore(&phba->hbalock, iflags); if ((!list_empty(&phba->active_rrq_list)) && (!(phba->pport->load_flag & FC_UNLOADING))) mod_timer(&phba->rrq_tmr, next_time); list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { list_del(&rrq->list); if (!rrq->send_rrq) { /* this call will free the rrq */ lpfc_clr_rrq_active(phba, rrq->xritag, rrq); } else if (lpfc_send_rrq(phba, rrq)) { /* if we send the rrq then the completion handler * will clear the bit in the xribitmap. */ lpfc_clr_rrq_active(phba, rrq->xritag, rrq); } } } /** * lpfc_get_active_rrq - Get the active RRQ for this exchange. * @vport: Pointer to vport context object. * @xri: The xri used in the exchange. * @did: The targets DID for this exchange. * * returns NULL = rrq not found in the phba->active_rrq_list. * rrq = rrq for this xri and target. **/ struct lpfc_node_rrq * lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) { struct lpfc_hba *phba = vport->phba; struct lpfc_node_rrq *rrq; struct lpfc_node_rrq *nextrrq; unsigned long iflags; if (phba->sli_rev != LPFC_SLI_REV4) return NULL; spin_lock_irqsave(&phba->hbalock, iflags); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (rrq->vport == vport && rrq->xritag == xri && rrq->nlp_DID == did){ list_del(&rrq->list); spin_unlock_irqrestore(&phba->hbalock, iflags); return rrq; } } spin_unlock_irqrestore(&phba->hbalock, iflags); return NULL; } /** * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. * @vport: Pointer to vport context object. * @ndlp: Pointer to the lpfc_node_list structure. * If ndlp is NULL Remove all active RRQs for this vport from the * phba->active_rrq_list and clear the rrq. * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. **/ void lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; struct lpfc_node_rrq *rrq; struct lpfc_node_rrq *nextrrq; unsigned long iflags; LIST_HEAD(rrq_list); if (phba->sli_rev != LPFC_SLI_REV4) return; if (!ndlp) { lpfc_sli4_vport_delete_els_xri_aborted(vport); lpfc_sli4_vport_delete_fcp_xri_aborted(vport); } spin_lock_irqsave(&phba->hbalock, iflags); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (rrq->vport != vport) continue; if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID)) list_move(&rrq->list, &rrq_list); } spin_unlock_irqrestore(&phba->hbalock, iflags); list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { list_del(&rrq->list); lpfc_clr_rrq_active(phba, rrq->xritag, rrq); } } /** * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. * @phba: Pointer to HBA context object. * @ndlp: Targets nodelist pointer for this exchange. * @xritag: the xri in the bitmap to test. * * This function returns: * 0 = rrq not active for this xri * 1 = rrq is valid for this xri. **/ int lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint16_t xritag) { if (!ndlp) return 0; if (!ndlp->active_rrqs_xri_bitmap) return 0; if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) return 1; else return 0; } /** * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. * @phba: Pointer to HBA context object. * @ndlp: nodelist pointer for this target. * @xritag: xri used in this exchange. * @rxid: Remote Exchange ID. * @send_rrq: Flag used to determine if we should send rrq els cmd. * * This function takes the hbalock. * The active bit is always set in the active rrq xri_bitmap even * if there is no slot avaiable for the other rrq information. * * returns 0 rrq actived for this xri * < 0 No memory or invalid ndlp. **/ int lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint16_t xritag, uint16_t rxid, uint16_t send_rrq) { unsigned long iflags; struct lpfc_node_rrq *rrq; int empty; if (!ndlp) return -EINVAL; if (!phba->cfg_enable_rrq) return -EINVAL; spin_lock_irqsave(&phba->hbalock, iflags); if (phba->pport->load_flag & FC_UNLOADING) { phba->hba_flag &= ~HBA_RRQ_ACTIVE; goto out; } if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) goto out; if (!ndlp->active_rrqs_xri_bitmap) goto out; if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) goto out; spin_unlock_irqrestore(&phba->hbalock, iflags); rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC); if (!rrq) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" " DID:0x%x Send:%d\n", xritag, rxid, ndlp->nlp_DID, send_rrq); return -EINVAL; } if (phba->cfg_enable_rrq == 1) rrq->send_rrq = send_rrq; else rrq->send_rrq = 0; rrq->xritag = xritag; rrq->rrq_stop_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); rrq->nlp_DID = ndlp->nlp_DID; rrq->vport = ndlp->vport; rrq->rxid = rxid; spin_lock_irqsave(&phba->hbalock, iflags); empty = list_empty(&phba->active_rrq_list); list_add_tail(&rrq->list, &phba->active_rrq_list); phba->hba_flag |= HBA_RRQ_ACTIVE; if (empty) lpfc_worker_wake_up(phba); spin_unlock_irqrestore(&phba->hbalock, iflags); return 0; out: spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2921 Can't set rrq active xri:0x%x rxid:0x%x" " DID:0x%x Send:%d\n", xritag, rxid, ndlp->nlp_DID, send_rrq); return -EINVAL; } /** * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool * @phba: Pointer to HBA context object. * @piocbq: Pointer to the iocbq. * * The driver calls this function with either the nvme ls ring lock * or the fc els ring lock held depending on the iocb usage. This function * gets a new driver sglq object from the sglq list. If the list is not empty * then it is successful, it returns pointer to the newly allocated sglq * object else it returns NULL. **/ static struct lpfc_sglq * __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) { struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; struct lpfc_sglq *sglq = NULL; struct lpfc_sglq *start_sglq = NULL; struct lpfc_io_buf *lpfc_cmd; struct lpfc_nodelist *ndlp; int found = 0; u8 cmnd; cmnd = get_job_cmnd(phba, piocbq); if (piocbq->cmd_flag & LPFC_IO_FCP) { lpfc_cmd = piocbq->io_buf; ndlp = lpfc_cmd->rdata->pnode; } else if ((cmnd == CMD_GEN_REQUEST64_CR) && !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) { ndlp = piocbq->ndlp; } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) { if (piocbq->cmd_flag & LPFC_IO_LOOPBACK) ndlp = NULL; else ndlp = piocbq->ndlp; } else { ndlp = piocbq->ndlp; } spin_lock(&phba->sli4_hba.sgl_list_lock); list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); start_sglq = sglq; while (!found) { if (!sglq) break; if (ndlp && ndlp->active_rrqs_xri_bitmap && test_bit(sglq->sli4_lxritag, ndlp->active_rrqs_xri_bitmap)) { /* This xri has an rrq outstanding for this DID. * put it back in the list and get another xri. */ list_add_tail(&sglq->list, lpfc_els_sgl_list); sglq = NULL; list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); if (sglq == start_sglq) { list_add_tail(&sglq->list, lpfc_els_sgl_list); sglq = NULL; break; } else continue; } sglq->ndlp = ndlp; found = 1; phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; sglq->state = SGL_ALLOCATED; } spin_unlock(&phba->sli4_hba.sgl_list_lock); return sglq; } /** * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool * @phba: Pointer to HBA context object. * @piocbq: Pointer to the iocbq. * * This function is called with the sgl_list lock held. This function * gets a new driver sglq object from the sglq list. If the * list is not empty then it is successful, it returns pointer to the newly * allocated sglq object else it returns NULL. **/ struct lpfc_sglq * __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) { struct list_head *lpfc_nvmet_sgl_list; struct lpfc_sglq *sglq = NULL; lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); if (!sglq) return NULL; phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; sglq->state = SGL_ALLOCATED; return sglq; } /** * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool * @phba: Pointer to HBA context object. * * This function is called with no lock held. This function * allocates a new driver iocb object from the iocb pool. If the * allocation is successful, it returns pointer to the newly * allocated iocb object else it returns NULL. **/ struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *phba) { struct lpfc_iocbq * iocbq = NULL; unsigned long iflags; spin_lock_irqsave(&phba->hbalock, iflags); iocbq = __lpfc_sli_get_iocbq(phba); spin_unlock_irqrestore(&phba->hbalock, iflags); return iocbq; } /** * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool * @phba: Pointer to HBA context object. * @iocbq: Pointer to driver iocb object. * * This function is called to release the driver iocb object * to the iocb pool. The iotag in the iocb object * does not change for each use of the iocb object. This function * clears all other fields of the iocb object when it is freed. * The sqlq structure that holds the xritag and phys and virtual * mappings for the scatter gather list is retrieved from the * active array of sglq. The get of the sglq pointer also clears * the entry in the array. If the status of the IO indiactes that * this IO was aborted then the sglq entry it put on the * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the * IO has good status or fails for any other reason then the sglq * entry is added to the free list (lpfc_els_sgl_list). The hbalock is * asserted held in the code path calling this routine. **/ static void __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { struct lpfc_sglq *sglq; unsigned long iflag = 0; struct lpfc_sli_ring *pring; if (iocbq->sli4_xritag == NO_XRI) sglq = NULL; else sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); if (sglq) { if (iocbq->cmd_flag & LPFC_IO_NVMET) { spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); sglq->state = SGL_FREED; sglq->ndlp = NULL; list_add_tail(&sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list); spin_unlock_irqrestore( &phba->sli4_hba.sgl_list_lock, iflag); goto out; } if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) && (!(unlikely(pci_channel_offline(phba->pcidev)))) && sglq->state != SGL_XRI_ABORTED) { spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); /* Check if we can get a reference on ndlp */ if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp)) sglq->ndlp = NULL; list_add(&sglq->list, &phba->sli4_hba.lpfc_abts_els_sgl_list); spin_unlock_irqrestore( &phba->sli4_hba.sgl_list_lock, iflag); } else { spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); sglq->state = SGL_FREED; sglq->ndlp = NULL; list_add_tail(&sglq->list, &phba->sli4_hba.lpfc_els_sgl_list); spin_unlock_irqrestore( &phba->sli4_hba.sgl_list_lock, iflag); pring = lpfc_phba_elsring(phba); /* Check if TXQ queue needs to be serviced */ if (pring && (!list_empty(&pring->txq))) lpfc_worker_wake_up(phba); } } out: /* * Clean all volatile data fields, preserve iotag and node struct. */ memset_startat(iocbq, 0, wqe); iocbq->sli4_lxritag = NO_XRI; iocbq->sli4_xritag = NO_XRI; iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF | LPFC_IO_NVME_LS); list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } /** * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool * @phba: Pointer to HBA context object. * @iocbq: Pointer to driver iocb object. * * This function is called to release the driver iocb object to the * iocb pool. The iotag in the iocb object does not change for each * use of the iocb object. This function clears all other fields of * the iocb object when it is freed. The hbalock is asserted held in * the code path calling this routine. **/ static void __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { /* * Clean all volatile data fields, preserve iotag and node struct. */ memset_startat(iocbq, 0, iocb); iocbq->sli4_xritag = NO_XRI; list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } /** * __lpfc_sli_release_iocbq - Release iocb to the iocb pool * @phba: Pointer to HBA context object. * @iocbq: Pointer to driver iocb object. * * This function is called with hbalock held to release driver * iocb object to the iocb pool. The iotag in the iocb object * does not change for each use of the iocb object. This function * clears all other fields of the iocb object when it is freed. **/ static void __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { lockdep_assert_held(&phba->hbalock); phba->__lpfc_sli_release_iocbq(phba, iocbq); phba->iocb_cnt--; } /** * lpfc_sli_release_iocbq - Release iocb to the iocb pool * @phba: Pointer to HBA context object. * @iocbq: Pointer to driver iocb object. * * This function is called with no lock held to release the iocb to * iocb pool. **/ void lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { unsigned long iflags; /* * Clean all volatile data fields, preserve iotag and node struct. */ spin_lock_irqsave(&phba->hbalock, iflags); __lpfc_sli_release_iocbq(phba, iocbq); spin_unlock_irqrestore(&phba->hbalock, iflags); } /** * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. * @phba: Pointer to HBA context object. * @iocblist: List of IOCBs. * @ulpstatus: ULP status in IOCB command field. * @ulpWord4: ULP word-4 in IOCB command field. * * This function is called with a list of IOCBs to cancel. It cancels the IOCB * on the list by invoking the complete callback function associated with the * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond * fields. **/ void lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, uint32_t ulpstatus, uint32_t ulpWord4) { struct lpfc_iocbq *piocb; while (!list_empty(iocblist)) { list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); if (piocb->cmd_cmpl) { if (piocb->cmd_flag & LPFC_IO_NVME) { lpfc_nvme_cancel_iocb(phba, piocb, ulpstatus, ulpWord4); } else { if (phba->sli_rev == LPFC_SLI_REV4) { bf_set(lpfc_wcqe_c_status, &piocb->wcqe_cmpl, ulpstatus); piocb->wcqe_cmpl.parameter = ulpWord4; } else { piocb->iocb.ulpStatus = ulpstatus; piocb->iocb.un.ulpWord[4] = ulpWord4; } (piocb->cmd_cmpl) (phba, piocb, piocb); } } else { lpfc_sli_release_iocbq(phba, piocb); } } return; } /** * lpfc_sli_iocb_cmd_type - Get the iocb type * @iocb_cmnd: iocb command code. * * This function is called by ring event handler function to get the iocb type. * This function translates the iocb command to an iocb command type used to * decide the final disposition of each completed IOCB. * The function returns * LPFC_UNKNOWN_IOCB if it is an unsupported iocb * LPFC_SOL_IOCB if it is a solicited iocb completion * LPFC_ABORT_IOCB if it is an abort iocb * LPFC_UNSOL_IOCB if it is an unsolicited iocb * * The caller is not required to hold any lock. **/ static lpfc_iocb_type lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) { lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; if (iocb_cmnd > CMD_MAX_IOCB_CMD) return 0; switch (iocb_cmnd) { case CMD_XMIT_SEQUENCE_CR: case CMD_XMIT_SEQUENCE_CX: case CMD_XMIT_BCAST_CN: case CMD_XMIT_BCAST_CX: case CMD_ELS_REQUEST_CR: case CMD_ELS_REQUEST_CX: case CMD_CREATE_XRI_CR: case CMD_CREATE_XRI_CX: case CMD_GET_RPI_CN: case CMD_XMIT_ELS_RSP_CX: case CMD_GET_RPI_CR: case CMD_FCP_IWRITE_CR: case CMD_FCP_IWRITE_CX: case CMD_FCP_IREAD_CR: case CMD_FCP_IREAD_CX: case CMD_FCP_ICMND_CR: case CMD_FCP_ICMND_CX: case CMD_FCP_TSEND_CX: case CMD_FCP_TRSP_CX: case CMD_FCP_TRECEIVE_CX: case CMD_FCP_AUTO_TRSP_CX: case CMD_ADAPTER_MSG: case CMD_ADAPTER_DUMP: case CMD_XMIT_SEQUENCE64_CR: case CMD_XMIT_SEQUENCE64_CX: case CMD_XMIT_BCAST64_CN: case CMD_XMIT_BCAST64_CX: case CMD_ELS_REQUEST64_CR: case CMD_ELS_REQUEST64_CX: case CMD_FCP_IWRITE64_CR: case CMD_FCP_IWRITE64_CX: case CMD_FCP_IREAD64_CR: case CMD_FCP_IREAD64_CX: case CMD_FCP_ICMND64_CR: case CMD_FCP_ICMND64_CX: case CMD_FCP_TSEND64_CX: case CMD_FCP_TRSP64_CX: case CMD_FCP_TRECEIVE64_CX: case CMD_GEN_REQUEST64_CR: case CMD_GEN_REQUEST64_CX: case CMD_XMIT_ELS_RSP64_CX: case DSSCMD_IWRITE64_CR: case DSSCMD_IWRITE64_CX: case DSSCMD_IREAD64_CR: case DSSCMD_IREAD64_CX: case CMD_SEND_FRAME: type = LPFC_SOL_IOCB; break; case CMD_ABORT_XRI_CN: case CMD_ABORT_XRI_CX: case CMD_CLOSE_XRI_CN: case CMD_CLOSE_XRI_CX: case CMD_XRI_ABORTED_CX: case CMD_ABORT_MXRI64_CN: case CMD_XMIT_BLS_RSP64_CX: type = LPFC_ABORT_IOCB; break; case CMD_RCV_SEQUENCE_CX: case CMD_RCV_ELS_REQ_CX: case CMD_RCV_SEQUENCE64_CX: case CMD_RCV_ELS_REQ64_CX: case CMD_ASYNC_STATUS: case CMD_IOCB_RCV_SEQ64_CX: case CMD_IOCB_RCV_ELS64_CX: case CMD_IOCB_RCV_CONT64_CX: case CMD_IOCB_RET_XRI64_CX: type = LPFC_UNSOL_IOCB; break; case CMD_IOCB_XMIT_MSEQ64_CR: case CMD_IOCB_XMIT_MSEQ64_CX: case CMD_IOCB_RCV_SEQ_LIST64_CX: case CMD_IOCB_RCV_ELS_LIST64_CX: case CMD_IOCB_CLOSE_EXTENDED_CN: case CMD_IOCB_ABORT_EXTENDED_CN: case CMD_IOCB_RET_HBQE64_CN: case CMD_IOCB_FCP_IBIDIR64_CR: case CMD_IOCB_FCP_IBIDIR64_CX: case CMD_IOCB_FCP_ITASKMGT64_CX: case CMD_IOCB_LOGENTRY_CN: case CMD_IOCB_LOGENTRY_ASYNC_CN: printk("%s - Unhandled SLI-3 Command x%x\n", __func__, iocb_cmnd); type = LPFC_UNKNOWN_IOCB; break; default: type = LPFC_UNKNOWN_IOCB; break; } return type; } /** * lpfc_sli_ring_map - Issue config_ring mbox for all rings * @phba: Pointer to HBA context object. * * This function is called from SLI initialization code * to configure every ring of the HBA's SLI interface. The * caller is not required to hold any lock. This function issues * a config_ring mailbox command for each ring. * This function returns zero if successful else returns a negative * error code. **/ static int lpfc_sli_ring_map(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; LPFC_MBOXQ_t *pmb; MAILBOX_t *pmbox; int i, rc, ret = 0; pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) return -ENOMEM; pmbox = &pmb->u.mb; phba->link_state = LPFC_INIT_MBX_CMDS; for (i = 0; i < psli->num_rings; i++) { lpfc_config_ring(phba, i, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0446 Adapter failed to init (%d), " "mbxCmd x%x CFG_RING, mbxStatus x%x, " "ring %d\n", rc, pmbox->mbxCommand, pmbox->mbxStatus, i); phba->link_state = LPFC_HBA_ERROR; ret = -ENXIO; break; } } mempool_free(pmb, phba->mbox_mem_pool); return ret; } /** * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @piocb: Pointer to the driver iocb object. * * The driver calls this function with the hbalock held for SLI3 ports or * the ring lock held for SLI4 ports. The function adds the * new iocb to txcmplq of the given ring. This function always returns * 0. If this function is called for ELS ring, this function checks if * there is a vport associated with the ELS command. This function also * starts els_tmofunc timer if this is an ELS command. **/ static int lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb) { u32 ulp_command = 0; BUG_ON(!piocb); ulp_command = get_job_cmnd(phba, piocb); list_add_tail(&piocb->list, &pring->txcmplq); piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ; pring->txcmplq_cnt++; if ((unlikely(pring->ringno == LPFC_ELS_RING)) && (ulp_command != CMD_ABORT_XRI_WQE) && (ulp_command != CMD_ABORT_XRI_CN) && (ulp_command != CMD_CLOSE_XRI_CN)) { BUG_ON(!piocb->vport); if (!(piocb->vport->load_flag & FC_UNLOADING)) mod_timer(&piocb->vport->els_tmofunc, jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); } return 0; } /** * lpfc_sli_ringtx_get - Get first element of the txq * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * * This function is called with hbalock held to get next * iocb in txq of the given ring. If there is any iocb in * the txq, the function returns first iocb in the list after * removing the iocb from the list, else it returns NULL. **/ struct lpfc_iocbq * lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { struct lpfc_iocbq *cmd_iocb; lockdep_assert_held(&phba->hbalock); list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); return cmd_iocb; } /** * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl * @phba: Pointer to HBA context object. * @cmdiocb: Pointer to driver command iocb object. * @rspiocb: Pointer to driver response iocb object. * * This routine will inform the driver of any BW adjustments we need * to make. These changes will be picked up during the next CMF * timer interrupt. In addition, any BW changes will be logged * with LOG_CGN_MGMT. **/ static void lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { union lpfc_wqe128 *wqe; uint32_t status, info; struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl; uint64_t bw, bwdif, slop; uint64_t pcent, bwpcent; int asig, afpin, sigcnt, fpincnt; int wsigmax, wfpinmax, cg, tdp; char *s; /* First check for error */ status = bf_get(lpfc_wcqe_c_status, wcqe); if (status) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6211 CMF_SYNC_WQE Error " "req_tag x%x status x%x hwstatus x%x " "tdatap x%x parm x%x\n", bf_get(lpfc_wcqe_c_request_tag, wcqe), bf_get(lpfc_wcqe_c_status, wcqe), bf_get(lpfc_wcqe_c_hw_status, wcqe), wcqe->total_data_placed, wcqe->parameter); goto out; } /* Gather congestion information on a successful cmpl */ info = wcqe->parameter; phba->cmf_active_info = info; /* See if firmware info count is valid or has changed */ if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info) info = 0; else phba->cmf_info_per_interval = info; tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe); cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe); /* Get BW requirement from firmware */ bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE; if (!bw) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6212 CMF_SYNC_WQE x%x: NULL bw\n", bf_get(lpfc_wcqe_c_request_tag, wcqe)); goto out; } /* Gather information needed for logging if a BW change is required */ wqe = &cmdiocb->wqe; asig = bf_get(cmf_sync_asig, &wqe->cmf_sync); afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync); fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync); sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync); if (phba->cmf_max_bytes_per_interval != bw || (asig || afpin || sigcnt || fpincnt)) { /* Are we increasing or decreasing BW */ if (phba->cmf_max_bytes_per_interval < bw) { bwdif = bw - phba->cmf_max_bytes_per_interval; s = "Increase"; } else { bwdif = phba->cmf_max_bytes_per_interval - bw; s = "Decrease"; } /* What is the change percentage */ slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/ pcent = div64_u64(bwdif * 100 + slop, phba->cmf_link_byte_count); bwpcent = div64_u64(bw * 100 + slop, phba->cmf_link_byte_count); /* Because of bytes adjustment due to shorter timer in * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and * may seem like BW is above 100%. */ if (bwpcent > 100) bwpcent = 100; if (phba->cmf_max_bytes_per_interval < bw && bwpcent > 95) lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6208 Congestion bandwidth " "limits removed\n"); else if ((phba->cmf_max_bytes_per_interval > bw) && ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95)) lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6209 Congestion bandwidth " "limits in effect\n"); if (asig) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6237 BW Threshold %lld%% (%lld): " "%lld%% %s: Signal Alarm: cg:%d " "Info:%u\n", bwpcent, bw, pcent, s, cg, phba->cmf_active_info); } else if (afpin) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6238 BW Threshold %lld%% (%lld): " "%lld%% %s: FPIN Alarm: cg:%d " "Info:%u\n", bwpcent, bw, pcent, s, cg, phba->cmf_active_info); } else if (sigcnt) { wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync); lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6239 BW Threshold %lld%% (%lld): " "%lld%% %s: Signal Warning: " "Cnt %d Max %d: cg:%d Info:%u\n", bwpcent, bw, pcent, s, sigcnt, wsigmax, cg, phba->cmf_active_info); } else if (fpincnt) { wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync); lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6240 BW Threshold %lld%% (%lld): " "%lld%% %s: FPIN Warning: " "Cnt %d Max %d: cg:%d Info:%u\n", bwpcent, bw, pcent, s, fpincnt, wfpinmax, cg, phba->cmf_active_info); } else { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6241 BW Threshold %lld%% (%lld): " "CMF %lld%% %s: cg:%d Info:%u\n", bwpcent, bw, pcent, s, cg, phba->cmf_active_info); } } else if (info) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6246 Info Threshold %u\n", info); } /* Save BW change to be picked up during next timer interrupt */ phba->cmf_last_sync_bw = bw; out: lpfc_sli_release_iocbq(phba, cmdiocb); } /** * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE * @phba: Pointer to HBA context object. * @ms: ms to set in WQE interval, 0 means use init op * @total: Total rcv bytes for this interval * * This routine is called every CMF timer interrupt. Its purpose is * to issue a CMF_SYNC_WQE to the firmware to inform it of any events * that may indicate we have congestion (FPINs or Signals). Upon * completion, the firmware will indicate any BW restrictions the * driver may need to take. **/ int lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) { union lpfc_wqe128 *wqe; struct lpfc_iocbq *sync_buf; unsigned long iflags; u32 ret_val; u32 atot, wtot, max; u8 warn_sync_period = 0; /* First address any alarm / warning activity */ atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0); wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0); /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */ if (phba->cmf_active_mode != LPFC_CFG_MANAGED || phba->link_state == LPFC_LINK_DOWN) return 0; spin_lock_irqsave(&phba->hbalock, iflags); sync_buf = __lpfc_sli_get_iocbq(phba); if (!sync_buf) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, "6244 No available WQEs for CMF_SYNC_WQE\n"); ret_val = ENOMEM; goto out_unlock; } wqe = &sync_buf->wqe; /* WQEs are reused. Clear stale data and set key fields to zero */ memset(wqe, 0, sizeof(*wqe)); /* If this is the very first CMF_SYNC_WQE, issue an init operation */ if (!ms) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6441 CMF Init %d - CMF_SYNC_WQE\n", phba->fc_eventTag); bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */ bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL); goto initpath; } bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */ bf_set(cmf_sync_interval, &wqe->cmf_sync, ms); /* Check for alarms / warnings */ if (atot) { if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { /* We hit an Signal alarm condition */ bf_set(cmf_sync_asig, &wqe->cmf_sync, 1); } else { /* We hit a FPIN alarm condition */ bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1); } } else if (wtot) { if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { /* We hit an Signal warning condition */ max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency * lpfc_acqe_cgn_frequency; bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max); bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot); warn_sync_period = lpfc_acqe_cgn_frequency; } else { /* We hit a FPIN warning condition */ bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1); bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1); if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) warn_sync_period = LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency); } } /* Update total read blocks during previous timer interval */ wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE); initpath: bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER); wqe->cmf_sync.event_tag = phba->fc_eventTag; bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE); /* Setup reqtag to match the wqe completion. */ bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag); bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1); bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period); bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND); bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1); bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT); sync_buf->vport = phba->pport; sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl; sync_buf->cmd_dmabuf = NULL; sync_buf->rsp_dmabuf = NULL; sync_buf->bpl_dmabuf = NULL; sync_buf->sli4_xritag = NO_XRI; sync_buf->cmd_flag |= LPFC_IO_CMF; ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf); if (ret_val) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6214 Cannot issue CMF_SYNC_WQE: x%x\n", ret_val); __lpfc_sli_release_iocbq(phba, sync_buf); } out_unlock: spin_unlock_irqrestore(&phba->hbalock, iflags); return ret_val; } /** * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * * This function is called with hbalock held and the caller must post the * iocb without releasing the lock. If the caller releases the lock, * iocb slot returned by the function is not guaranteed to be available. * The function returns pointer to the next available iocb slot if there * is available slot in the ring, else it returns NULL. * If the get index of the ring is ahead of the put index, the function * will post an error attention event to the worker thread to take the * HBA to offline state. **/ static IOCB_t * lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; lockdep_assert_held(&phba->hbalock); if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) pring->sli.sli3.next_cmdidx = 0; if (unlikely(pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)) { pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0315 Ring %d issue: portCmdGet %d " "is bigger than cmd ring %d\n", pring->ringno, pring->sli.sli3.local_getidx, max_cmd_idx); phba->link_state = LPFC_HBA_ERROR; /* * All error attention handlers are posted to * worker thread */ phba->work_ha |= HA_ERATT; phba->work_hs = HS_FFER3; lpfc_worker_wake_up(phba); return NULL; } if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) return NULL; } return lpfc_cmd_iocb(phba, pring); } /** * lpfc_sli_next_iotag - Get an iotag for the iocb * @phba: Pointer to HBA context object. * @iocbq: Pointer to driver iocb object. * * This function gets an iotag for the iocb. If there is no unused iotag and * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup * array and assigns a new iotag. * The function returns the allocated iotag if successful, else returns zero. * Zero is not a valid iotag. * The caller is not required to hold any lock. **/ uint16_t lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { struct lpfc_iocbq **new_arr; struct lpfc_iocbq **old_arr; size_t new_len; struct lpfc_sli *psli = &phba->sli; uint16_t iotag; spin_lock_irq(&phba->hbalock); iotag = psli->last_iotag; if(++iotag < psli->iocbq_lookup_len) { psli->last_iotag = iotag; psli->iocbq_lookup[iotag] = iocbq; spin_unlock_irq(&phba->hbalock); iocbq->iotag = iotag; return iotag; } else if (psli->iocbq_lookup_len < (0xffff - LPFC_IOCBQ_LOOKUP_INCREMENT)) { new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; spin_unlock_irq(&phba->hbalock); new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), GFP_KERNEL); if (new_arr) { spin_lock_irq(&phba->hbalock); old_arr = psli->iocbq_lookup; if (new_len <= psli->iocbq_lookup_len) { /* highly unprobable case */ kfree(new_arr); iotag = psli->last_iotag; if(++iotag < psli->iocbq_lookup_len) { psli->last_iotag = iotag; psli->iocbq_lookup[iotag] = iocbq; spin_unlock_irq(&phba->hbalock); iocbq->iotag = iotag; return iotag; } spin_unlock_irq(&phba->hbalock); return 0; } if (psli->iocbq_lookup) memcpy(new_arr, old_arr, ((psli->last_iotag + 1) * sizeof (struct lpfc_iocbq *))); psli->iocbq_lookup = new_arr; psli->iocbq_lookup_len = new_len; psli->last_iotag = iotag; psli->iocbq_lookup[iotag] = iocbq; spin_unlock_irq(&phba->hbalock); iocbq->iotag = iotag; kfree(old_arr); return iotag; } } else spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0318 Failed to allocate IOTAG.last IOTAG is %d\n", psli->last_iotag); return 0; } /** * lpfc_sli_submit_iocb - Submit an iocb to the firmware * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @iocb: Pointer to iocb slot in the ring. * @nextiocb: Pointer to driver iocb object which need to be * posted to firmware. * * This function is called to post a new iocb to the firmware. This * function copies the new iocb to ring iocb slot and updates the * ring pointers. It adds the new iocb to txcmplq if there is * a completion call back for this iocb else the function will free the * iocb object. The hbalock is asserted held in the code path calling * this routine. **/ static void lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, IOCB_t *iocb, struct lpfc_iocbq *nextiocb) { /* * Set up an iotag */ nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0; if (pring->ringno == LPFC_ELS_RING) { lpfc_debugfs_slow_ring_trc(phba, "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", *(((uint32_t *) &nextiocb->iocb) + 4), *(((uint32_t *) &nextiocb->iocb) + 6), *(((uint32_t *) &nextiocb->iocb) + 7)); } /* * Issue iocb command to adapter */ lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); wmb(); pring->stats.iocb_cmd++; /* * If there is no completion routine to call, we can release the * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, * that have no rsp ring completion, cmd_cmpl MUST be NULL. */ if (nextiocb->cmd_cmpl) lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); else __lpfc_sli_release_iocbq(phba, nextiocb); /* * Let the HBA know what IOCB slot will be the next one the * driver will put a command into. */ pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); } /** * lpfc_sli_update_full_ring - Update the chip attention register * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * * The caller is not required to hold any lock for calling this function. * This function updates the chip attention bits for the ring to inform firmware * that there are pending work to be done for this ring and requests an * interrupt when there is space available in the ring. This function is * called when the driver is unable to post more iocbs to the ring due * to unavailability of space in the ring. **/ static void lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { int ringno = pring->ringno; pring->flag |= LPFC_CALL_RING_AVAILABLE; wmb(); /* * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. * The HBA will tell us when an IOCB entry is available. */ writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); readl(phba->CAregaddr); /* flush */ pring->stats.iocb_cmd_full++; } /** * lpfc_sli_update_ring - Update chip attention register * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * * This function updates the chip attention register bit for the * given ring to inform HBA that there is more work to be done * in this ring. The caller is not required to hold any lock. **/ static void lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { int ringno = pring->ringno; /* * Tell the HBA that there is work to do in this ring. */ if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { wmb(); writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); readl(phba->CAregaddr); /* flush */ } } /** * lpfc_sli_resume_iocb - Process iocbs in the txq * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * * This function is called with hbalock held to post pending iocbs * in the txq to the firmware. This function is called when driver * detects space available in the ring. **/ static void lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { IOCB_t *iocb; struct lpfc_iocbq *nextiocb; lockdep_assert_held(&phba->hbalock); /* * Check to see if: * (a) there is anything on the txq to send * (b) link is up * (c) link attention events can be processed (fcp ring only) * (d) IOCB processing is not blocked by the outstanding mbox command. */ if (lpfc_is_link_up(phba) && (!list_empty(&pring->txq)) && (pring->ringno != LPFC_FCP_RING || phba->sli.sli_flag & LPFC_PROCESS_LA)) { while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && (nextiocb = lpfc_sli_ringtx_get(phba, pring))) lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); if (iocb) lpfc_sli_update_ring(phba, pring); else lpfc_sli_update_full_ring(phba, pring); } return; } /** * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ * @phba: Pointer to HBA context object. * @hbqno: HBQ number. * * This function is called with hbalock held to get the next * available slot for the given HBQ. If there is free slot * available for the HBQ it will return pointer to the next available * HBQ entry else it will return NULL. **/ static struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) { struct hbq_s *hbqp = &phba->hbqs[hbqno]; lockdep_assert_held(&phba->hbalock); if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && ++hbqp->next_hbqPutIdx >= hbqp->entry_count) hbqp->next_hbqPutIdx = 0; if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { uint32_t raw_index = phba->hbq_get[hbqno]; uint32_t getidx = le32_to_cpu(raw_index); hbqp->local_hbqGetIdx = getidx; if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1802 HBQ %d: local_hbqGetIdx " "%u is > than hbqp->entry_count %u\n", hbqno, hbqp->local_hbqGetIdx, hbqp->entry_count); phba->link_state = LPFC_HBA_ERROR; return NULL; } if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) return NULL; } return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + hbqp->hbqPutIdx; } /** * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers * @phba: Pointer to HBA context object. * * This function is called with no lock held to free all the * hbq buffers while uninitializing the SLI interface. It also * frees the HBQ buffers returned by the firmware but not yet * processed by the upper layers. **/ void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) { struct lpfc_dmabuf *dmabuf, *next_dmabuf; struct hbq_dmabuf *hbq_buf; unsigned long flags; int i, hbq_count; hbq_count = lpfc_sli_hbq_count(); /* Return all memory used by all HBQs */ spin_lock_irqsave(&phba->hbalock, flags); for (i = 0; i < hbq_count; ++i) { list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->hbqs[i].hbq_buffer_list, list) { hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); list_del(&hbq_buf->dbuf.list); (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); } phba->hbqs[i].buffer_count = 0; } /* Mark the HBQs not in use */ phba->hbq_in_use = 0; spin_unlock_irqrestore(&phba->hbalock, flags); } /** * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware * @phba: Pointer to HBA context object. * @hbqno: HBQ number. * @hbq_buf: Pointer to HBQ buffer. * * This function is called with the hbalock held to post a * hbq buffer to the firmware. If the function finds an empty * slot in the HBQ, it will post the buffer. The function will return * pointer to the hbq entry if it successfully post the buffer * else it will return NULL. **/ static int lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, struct hbq_dmabuf *hbq_buf) { lockdep_assert_held(&phba->hbalock); return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); } /** * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware * @phba: Pointer to HBA context object. * @hbqno: HBQ number. * @hbq_buf: Pointer to HBQ buffer. * * This function is called with the hbalock held to post a hbq buffer to the * firmware. If the function finds an empty slot in the HBQ, it will post the * buffer and place it on the hbq_buffer_list. The function will return zero if * it successfully post the buffer else it will return an error. **/ static int lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, struct hbq_dmabuf *hbq_buf) { struct lpfc_hbq_entry *hbqe; dma_addr_t physaddr = hbq_buf->dbuf.phys; lockdep_assert_held(&phba->hbalock); /* Get next HBQ entry slot to use */ hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); if (hbqe) { struct hbq_s *hbqp = &phba->hbqs[hbqno]; hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; hbqe->bde.tus.f.bdeFlags = 0; hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); /* Sync SLIM */ hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); /* flush */ readl(phba->hbq_put + hbqno); list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); return 0; } else return -ENOMEM; } /** * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware * @phba: Pointer to HBA context object. * @hbqno: HBQ number. * @hbq_buf: Pointer to HBQ buffer. * * This function is called with the hbalock held to post an RQE to the SLI4 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to * the hbq_buffer_list and return zero, otherwise it will return an error. **/ static int lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, struct hbq_dmabuf *hbq_buf) { int rc; struct lpfc_rqe hrqe; struct lpfc_rqe drqe; struct lpfc_queue *hrq; struct lpfc_queue *drq; if (hbqno != LPFC_ELS_HBQ) return 1; hrq = phba->sli4_hba.hdr_rq; drq = phba->sli4_hba.dat_rq; lockdep_assert_held(&phba->hbalock); hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); if (rc < 0) return rc; hbq_buf->tag = (rc | (hbqno << 16)); list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); return 0; } /* HBQ for ELS and CT traffic. */ static struct lpfc_hbq_init lpfc_els_hbq = { .rn = 1, .entry_count = 256, .mask_count = 0, .profile = 0, .ring_mask = (1 << LPFC_ELS_RING), .buffer_count = 0, .init_count = 40, .add_count = 40, }; /* Array of HBQs */ struct lpfc_hbq_init *lpfc_hbq_defs[] = { &lpfc_els_hbq, }; /** * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ * @phba: Pointer to HBA context object. * @hbqno: HBQ number. * @count: Number of HBQ buffers to be posted. * * This function is called with no lock held to post more hbq buffers to the * given HBQ. The function returns the number of HBQ buffers successfully * posted. **/ static int lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) { uint32_t i, posted = 0; unsigned long flags; struct hbq_dmabuf *hbq_buffer; LIST_HEAD(hbq_buf_list); if (!phba->hbqs[hbqno].hbq_alloc_buffer) return 0; if ((phba->hbqs[hbqno].buffer_count + count) > lpfc_hbq_defs[hbqno]->entry_count) count = lpfc_hbq_defs[hbqno]->entry_count - phba->hbqs[hbqno].buffer_count; if (!count) return 0; /* Allocate HBQ entries */ for (i = 0; i < count; i++) { hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); if (!hbq_buffer) break; list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); } /* Check whether HBQ is still in use */ spin_lock_irqsave(&phba->hbalock, flags); if (!phba->hbq_in_use) goto err; while (!list_empty(&hbq_buf_list)) { list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, dbuf.list); hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | (hbqno << 16)); if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { phba->hbqs[hbqno].buffer_count++; posted++; } else (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); } spin_unlock_irqrestore(&phba->hbalock, flags); return posted; err: spin_unlock_irqrestore(&phba->hbalock, flags); while (!list_empty(&hbq_buf_list)) { list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, dbuf.list); (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); } return 0; } /** * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware * @phba: Pointer to HBA context object. * @qno: HBQ number. * * This function posts more buffers to the HBQ. This function * is called with no lock held. The function returns the number of HBQ entries * successfully allocated. **/ int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) { if (phba->sli_rev == LPFC_SLI_REV4) return 0; else return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, lpfc_hbq_defs[qno]->add_count); } /** * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ * @phba: Pointer to HBA context object. * @qno: HBQ queue number. * * This function is called from SLI initialization code path with * no lock held to post initial HBQ buffers to firmware. The * function returns the number of HBQ entries successfully allocated. **/ static int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) { if (phba->sli_rev == LPFC_SLI_REV4) return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, lpfc_hbq_defs[qno]->entry_count); else return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, lpfc_hbq_defs[qno]->init_count); } /* * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list * * This function removes the first hbq buffer on an hbq list and returns a * pointer to that buffer. If it finds no buffers on the list it returns NULL. **/ static struct hbq_dmabuf * lpfc_sli_hbqbuf_get(struct list_head *rb_list) { struct lpfc_dmabuf *d_buf; list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); if (!d_buf) return NULL; return container_of(d_buf, struct hbq_dmabuf, dbuf); } /** * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list * @phba: Pointer to HBA context object. * @hrq: HBQ number. * * This function removes the first RQ buffer on an RQ buffer list and returns a * pointer to that buffer. If it finds no buffers on the list it returns NULL. **/ static struct rqb_dmabuf * lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) { struct lpfc_dmabuf *h_buf; struct lpfc_rqb *rqbp; rqbp = hrq->rqbp; list_remove_head(&rqbp->rqb_buffer_list, h_buf, struct lpfc_dmabuf, list); if (!h_buf) return NULL; rqbp->buffer_count--; return container_of(h_buf, struct rqb_dmabuf, hbuf); } /** * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag * @phba: Pointer to HBA context object. * @tag: Tag of the hbq buffer. * * This function searches for the hbq buffer associated with the given tag in * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer * otherwise it returns NULL. **/ static struct hbq_dmabuf * lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) { struct lpfc_dmabuf *d_buf; struct hbq_dmabuf *hbq_buf; uint32_t hbqno; hbqno = tag >> 16; if (hbqno >= LPFC_MAX_HBQS) return NULL; spin_lock_irq(&phba->hbalock); list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); if (hbq_buf->tag == tag) { spin_unlock_irq(&phba->hbalock); return hbq_buf; } } spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1803 Bad hbq tag. Data: x%x x%x\n", tag, phba->hbqs[tag >> 16].buffer_count); return NULL; } /** * lpfc_sli_free_hbq - Give back the hbq buffer to firmware * @phba: Pointer to HBA context object. * @hbq_buffer: Pointer to HBQ buffer. * * This function is called with hbalock. This function gives back * the hbq buffer to firmware. If the HBQ does not have space to * post the buffer, it will free the buffer. **/ void lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) { uint32_t hbqno; if (hbq_buffer) { hbqno = hbq_buffer->tag >> 16; if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); } } /** * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox * @mbxCommand: mailbox command code. * * This function is called by the mailbox event handler function to verify * that the completed mailbox command is a legitimate mailbox command. If the * completed mailbox is not known to the function, it will return MBX_SHUTDOWN * and the mailbox event handler will take the HBA offline. **/ static int lpfc_sli_chk_mbx_command(uint8_t mbxCommand) { uint8_t ret; switch (mbxCommand) { case MBX_LOAD_SM: case MBX_READ_NV: case MBX_WRITE_NV: case MBX_WRITE_VPARMS: case MBX_RUN_BIU_DIAG: case MBX_INIT_LINK: case MBX_DOWN_LINK: case MBX_CONFIG_LINK: case MBX_CONFIG_RING: case MBX_RESET_RING: case MBX_READ_CONFIG: case MBX_READ_RCONFIG: case MBX_READ_SPARM: case MBX_READ_STATUS: case MBX_READ_RPI: case MBX_READ_XRI: case MBX_READ_REV: case MBX_READ_LNK_STAT: case MBX_REG_LOGIN: case MBX_UNREG_LOGIN: case MBX_CLEAR_LA: case MBX_DUMP_MEMORY: case MBX_DUMP_CONTEXT: case MBX_RUN_DIAGS: case MBX_RESTART: case MBX_UPDATE_CFG: case MBX_DOWN_LOAD: case MBX_DEL_LD_ENTRY: case MBX_RUN_PROGRAM: case MBX_SET_MASK: case MBX_SET_VARIABLE: case MBX_UNREG_D_ID: case MBX_KILL_BOARD: case MBX_CONFIG_FARP: case MBX_BEACON: case MBX_LOAD_AREA: case MBX_RUN_BIU_DIAG64: case MBX_CONFIG_PORT: case MBX_READ_SPARM64: case MBX_READ_RPI64: case MBX_REG_LOGIN64: case MBX_READ_TOPOLOGY: case MBX_WRITE_WWN: case MBX_SET_DEBUG: case MBX_LOAD_EXP_ROM: case MBX_ASYNCEVT_ENABLE: case MBX_REG_VPI: case MBX_UNREG_VPI: case MBX_HEARTBEAT: case MBX_PORT_CAPABILITIES: case MBX_PORT_IOV_CONTROL: case MBX_SLI4_CONFIG: case MBX_SLI4_REQ_FTRS: case MBX_REG_FCFI: case MBX_UNREG_FCFI: case MBX_REG_VFI: case MBX_UNREG_VFI: case MBX_INIT_VPI: case MBX_INIT_VFI: case MBX_RESUME_RPI: case MBX_READ_EVENT_LOG_STATUS: case MBX_READ_EVENT_LOG: case MBX_SECURITY_MGMT: case MBX_AUTH_PORT: case MBX_ACCESS_VDATA: ret = mbxCommand; break; default: ret = MBX_SHUTDOWN; break; } return ret; } /** * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler * @phba: Pointer to HBA context object. * @pmboxq: Pointer to mailbox command. * * This is completion handler function for mailbox commands issued from * lpfc_sli_issue_mbox_wait function. This function is called by the * mailbox event handler function with no lock held. This function * will wake up thread waiting on the wait queue pointed by context1 * of the mailbox. **/ void lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { unsigned long drvr_flag; struct completion *pmbox_done; /* * If pmbox_done is empty, the driver thread gave up waiting and * continued running. */ pmboxq->mbox_flag |= LPFC_MBX_WAKE; spin_lock_irqsave(&phba->hbalock, drvr_flag); pmbox_done = (struct completion *)pmboxq->context3; if (pmbox_done) complete(pmbox_done); spin_unlock_irqrestore(&phba->hbalock, drvr_flag); return; } static void __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { unsigned long iflags; if (ndlp->nlp_flag & NLP_RELEASE_RPI) { lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); spin_lock_irqsave(&ndlp->lock, iflags); ndlp->nlp_flag &= ~NLP_RELEASE_RPI; ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; spin_unlock_irqrestore(&ndlp->lock, iflags); } ndlp->nlp_flag &= ~NLP_UNREG_INP; } void lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { __lpfc_sli_rpi_release(vport, ndlp); } /** * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler * @phba: Pointer to HBA context object. * @pmb: Pointer to mailbox object. * * This function is the default mailbox completion handler. It * frees the memory resources associated with the completed mailbox * command. If the completed command is a REG_LOGIN mailbox command, * this function will issue a UREG_LOGIN to re-claim the RPI. **/ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct lpfc_dmabuf *mp; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; uint16_t rpi, vpi; int rc; /* * If a REG_LOGIN succeeded after node is destroyed or node * is in re-discovery driver need to cleanup the RPI. */ if (!(phba->pport->load_flag & FC_UNLOADING) && pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && !pmb->u.mb.mbxStatus) { mp = (struct lpfc_dmabuf *)pmb->ctx_buf; if (mp) { pmb->ctx_buf = NULL; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } rpi = pmb->u.mb.un.varWords[0]; vpi = pmb->u.mb.un.varRegLogin.vpi; if (phba->sli_rev == LPFC_SLI_REV4) vpi -= phba->sli4_hba.max_cfg_param.vpi_base; lpfc_unreg_login(phba, vpi, rpi, pmb); pmb->vport = vport; pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc != MBX_NOT_FINISHED) return; } if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && !(phba->pport->load_flag & FC_UNLOADING) && !pmb->u.mb.mbxStatus) { shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); vport->vpi_state |= LPFC_VPI_REGISTERED; vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); } if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; lpfc_nlp_put(ndlp); } if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; /* Check to see if there are any deferred events to process */ if (ndlp) { lpfc_printf_vlog( vport, KERN_INFO, LOG_MBOX | LOG_DISCOVERY, "1438 UNREG cmpl deferred mbox x%x " "on NPort x%x Data: x%x x%x x%px x%x x%x\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp, vport->load_flag, kref_read(&ndlp->kref)); if ((ndlp->nlp_flag & NLP_UNREG_INP) && (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { ndlp->nlp_flag &= ~NLP_UNREG_INP; ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { __lpfc_sli_rpi_release(vport, ndlp); } /* The unreg_login mailbox is complete and had a * reference that has to be released. The PLOGI * got its own ref. */ lpfc_nlp_put(ndlp); pmb->ctx_ndlp = NULL; } } /* This nlp_put pairs with lpfc_sli4_resume_rpi */ if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) { ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; lpfc_nlp_put(ndlp); } /* Check security permission status on INIT_LINK mailbox command */ if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2860 SLI authentication is required " "for INIT_LINK but has not done yet\n"); if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) lpfc_sli4_mbox_cmd_free(phba, pmb); else lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); } /** * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler * @phba: Pointer to HBA context object. * @pmb: Pointer to mailbox object. * * This function is the unreg rpi mailbox completion handler. It * frees the memory resources associated with the completed mailbox * command. An additional reference is put on the ndlp to prevent * lpfc_nlp_release from freeing the rpi bit in the bitmask before * the unreg mailbox command completes, this routine puts the * reference back. * **/ void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct lpfc_nodelist *ndlp; ndlp = pmb->ctx_ndlp; if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { if (phba->sli_rev == LPFC_SLI_REV4 && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2)) { if (ndlp) { lpfc_printf_vlog( vport, KERN_INFO, LOG_MBOX | LOG_SLI, "0010 UNREG_LOGIN vpi:%x " "rpi:%x DID:%x defer x%x flg x%x " "x%px\n", vport->vpi, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp->nlp_flag, ndlp); ndlp->nlp_flag &= ~NLP_LOGO_ACC; /* Check to see if there are any deferred * events to process */ if ((ndlp->nlp_flag & NLP_UNREG_INP) && (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { lpfc_printf_vlog( vport, KERN_INFO, LOG_DISCOVERY, "4111 UNREG cmpl deferred " "clr x%x on " "NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); ndlp->nlp_flag &= ~NLP_UNREG_INP; ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi( vport, ndlp->nlp_DID, 0); } else { __lpfc_sli_rpi_release(vport, ndlp); } lpfc_nlp_put(ndlp); } } } mempool_free(pmb, phba->mbox_mem_pool); } /** * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware * @phba: Pointer to HBA context object. * * This function is called with no lock held. This function processes all * the completed mailbox commands and gives it to upper layers. The interrupt * service routine processes mailbox completion interrupt and adds completed * mailbox commands to the mboxq_cmpl queue and signals the worker thread. * Worker thread call lpfc_sli_handle_mb_event, which will return the * completed mailbox commands in mboxq_cmpl queue to the upper layers. This * function returns the mailbox commands to the upper layer by calling the * completion handler function of each mailbox. **/ int lpfc_sli_handle_mb_event(struct lpfc_hba *phba) { MAILBOX_t *pmbox; LPFC_MBOXQ_t *pmb; int rc; LIST_HEAD(cmplq); phba->sli.slistat.mbox_event++; /* Get all completed mailboxe buffers into the cmplq */ spin_lock_irq(&phba->hbalock); list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); spin_unlock_irq(&phba->hbalock); /* Get a Mailbox buffer to setup mailbox commands for callback */ do { list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); if (pmb == NULL) break; pmbox = &pmb->u.mb; if (pmbox->mbxCommand != MBX_HEARTBEAT) { if (pmb->vport) { lpfc_debugfs_disc_trc(pmb->vport, LPFC_DISC_TRC_MBOX_VPORT, "MBOX cmpl vport: cmd:x%x mb:x%x x%x", (uint32_t)pmbox->mbxCommand, pmbox->un.varWords[0], pmbox->un.varWords[1]); } else { lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_MBOX, "MBOX cmpl: cmd:x%x mb:x%x x%x", (uint32_t)pmbox->mbxCommand, pmbox->un.varWords[0], pmbox->un.varWords[1]); } } /* * It is a fatal error if unknown mbox command completion. */ if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == MBX_SHUTDOWN) { /* Unknown mailbox command compl */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):0323 Unknown Mailbox command " "x%x (x%x/x%x) Cmpl\n", pmb->vport ? pmb->vport->vpi : LPFC_VPORT_UNKNOWN, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), lpfc_sli_config_mbox_opcode_get(phba, pmb)); phba->link_state = LPFC_HBA_ERROR; phba->work_hs = HS_FFER3; lpfc_handle_eratt(phba); continue; } if (pmbox->mbxStatus) { phba->sli.slistat.mbox_stat_err++; if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { /* Mbox cmd cmpl error - RETRYing */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0305 Mbox cmd cmpl " "error - RETRYing Data: x%x " "(x%x/x%x) x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : LPFC_VPORT_UNKNOWN, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), lpfc_sli_config_mbox_opcode_get(phba, pmb), pmbox->mbxStatus, pmbox->un.varWords[0], pmb->vport ? pmb->vport->port_state : LPFC_VPORT_UNKNOWN); pmbox->mbxStatus = 0; pmbox->mbxOwner = OWN_HOST; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc != MBX_NOT_FINISHED) continue; } } /* Mailbox cmd <cmd> Cmpl <cmpl> */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps " "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " "x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), lpfc_sli_config_mbox_opcode_get(phba, pmb), pmb->mbox_cmpl, *((uint32_t *) pmbox), pmbox->un.varWords[0], pmbox->un.varWords[1], pmbox->un.varWords[2], pmbox->un.varWords[3], pmbox->un.varWords[4], pmbox->un.varWords[5], pmbox->un.varWords[6], pmbox->un.varWords[7], pmbox->un.varWords[8], pmbox->un.varWords[9], pmbox->un.varWords[10]); if (pmb->mbox_cmpl) pmb->mbox_cmpl(phba,pmb); } while (1); return 0; } /** * lpfc_sli_get_buff - Get the buffer associated with the buffer tag * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @tag: buffer tag. * * This function is called with no lock held. When QUE_BUFTAG_BIT bit * is set in the tag the buffer is posted for a particular exchange, * the function will return the buffer without replacing the buffer. * If the buffer is for unsolicited ELS or CT traffic, this function * returns the buffer and also posts another buffer to the firmware. **/ static struct lpfc_dmabuf * lpfc_sli_get_buff(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t tag) { struct hbq_dmabuf *hbq_entry; if (tag & QUE_BUFTAG_BIT) return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); if (!hbq_entry) return NULL; return &hbq_entry->dbuf; } /** * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer * containing a NVME LS request. * @phba: pointer to lpfc hba data structure. * @piocb: pointer to the iocbq struct representing the sequence starting * frame. * * This routine initially validates the NVME LS, validates there is a login * with the port that sent the LS, and then calls the appropriate nvme host * or target LS request handler. **/ static void lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) { struct lpfc_nodelist *ndlp; struct lpfc_dmabuf *d_buf; struct hbq_dmabuf *nvmebuf; struct fc_frame_header *fc_hdr; struct lpfc_async_xchg_ctx *axchg = NULL; char *failwhy = NULL; uint32_t oxid, sid, did, fctl, size; int ret = 1; d_buf = piocb->cmd_dmabuf; nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); fc_hdr = nvmebuf->hbuf.virt; oxid = be16_to_cpu(fc_hdr->fh_ox_id); sid = sli4_sid_from_fc_hdr(fc_hdr); did = sli4_did_from_fc_hdr(fc_hdr); fctl = (fc_hdr->fh_f_ctl[0] << 16 | fc_hdr->fh_f_ctl[1] << 8 | fc_hdr->fh_f_ctl[2]); size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n", oxid, size, sid); if (phba->pport->load_flag & FC_UNLOADING) { failwhy = "Driver Unloading"; } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { failwhy = "NVME FC4 Disabled"; } else if (!phba->nvmet_support && !phba->pport->localport) { failwhy = "No Localport"; } else if (phba->nvmet_support && !phba->targetport) { failwhy = "No Targetport"; } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) { failwhy = "Bad NVME LS R_CTL"; } else if (unlikely((fctl & 0x00FF0000) != (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) { failwhy = "Bad NVME LS F_CTL"; } else { axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC); if (!axchg) failwhy = "No CTX memory"; } if (unlikely(failwhy)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6154 Drop NVME LS: SID %06X OXID x%X: %s\n", sid, oxid, failwhy); goto out_fail; } /* validate the source of the LS is logged in */ ndlp = lpfc_findnode_did(phba->pport, sid); if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, "6216 NVME Unsol rcv: No ndlp: " "NPort_ID x%x oxid x%x\n", sid, oxid); goto out_fail; } axchg->phba = phba; axchg->ndlp = ndlp; axchg->size = size; axchg->oxid = oxid; axchg->sid = sid; axchg->wqeq = NULL; axchg->state = LPFC_NVME_STE_LS_RCV; axchg->entry_cnt = 1; axchg->rqb_buffer = (void *)nvmebuf; axchg->hdwq = &phba->sli4_hba.hdwq[0]; axchg->payload = nvmebuf->dbuf.virt; INIT_LIST_HEAD(&axchg->list); if (phba->nvmet_support) { ret = lpfc_nvmet_handle_lsreq(phba, axchg); spin_lock_irq(&ndlp->lock); if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) { ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH; spin_unlock_irq(&ndlp->lock); /* This reference is a single occurrence to hold the * node valid until the nvmet transport calls * host_release. */ if (!lpfc_nlp_get(ndlp)) goto out_fail; lpfc_printf_log(phba, KERN_ERR, LOG_NODE, "6206 NVMET unsol ls_req ndlp x%px " "DID x%x xflags x%x refcnt %d\n", ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); } else { spin_unlock_irq(&ndlp->lock); } } else { ret = lpfc_nvme_handle_lsreq(phba, axchg); } /* if zero, LS was successfully handled. If non-zero, LS not handled */ if (!ret) return; out_fail: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X " "NVMe%s handler failed %d\n", did, sid, oxid, (phba->nvmet_support) ? "T" : "I", ret); /* recycle receive buffer */ lpfc_in_buf_free(phba, &nvmebuf->dbuf); /* If start of new exchange, abort it */ if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX))) ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid); if (ret) kfree(axchg); } /** * lpfc_complete_unsol_iocb - Complete an unsolicited sequence * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @saveq: Pointer to the iocbq struct representing the sequence starting frame. * @fch_r_ctl: the r_ctl for the first frame of the sequence. * @fch_type: the type for the first frame of the sequence. * * This function is called with no lock held. This function uses the r_ctl and * type of the received sequence to find the correct callback function to call * to process the sequence. **/ static int lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, uint32_t fch_type) { int i; switch (fch_type) { case FC_TYPE_NVME: lpfc_nvme_unsol_ls_handler(phba, saveq); return 1; default: break; } /* unSolicited Responses */ if (pring->prt[0].profile) { if (pring->prt[0].lpfc_sli_rcv_unsol_event) (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, saveq); return 1; } /* We must search, based on rctl / type for the right routine */ for (i = 0; i < pring->num_mask; i++) { if ((pring->prt[i].rctl == fch_r_ctl) && (pring->prt[i].type == fch_type)) { if (pring->prt[i].lpfc_sli_rcv_unsol_event) (pring->prt[i].lpfc_sli_rcv_unsol_event) (phba, pring, saveq); return 1; } } return 0; } static void lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *saveq) { IOCB_t *irsp; union lpfc_wqe128 *wqe; u16 i = 0; irsp = &saveq->iocb; wqe = &saveq->wqe; /* Fill wcqe with the IOCB status fields */ bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus); saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount; saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4]; saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len; /* Source ID */ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo); /* rx-id of the response frame */ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext); /* ox-id of the frame */ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, irsp->unsli3.rcvsli3.ox_id); /* DID */ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, irsp->un.rcvels.remoteID); /* unsol data len */ for (i = 0; i < irsp->ulpBdeCount; i++) { struct lpfc_hbq_entry *hbqe = NULL; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { if (i == 0) { hbqe = (struct lpfc_hbq_entry *) &irsp->un.ulpWord[0]; saveq->wqe.gen_req.bde.tus.f.bdeSize = hbqe->bde.tus.f.bdeSize; } else if (i == 1) { hbqe = (struct lpfc_hbq_entry *) &irsp->unsli3.sli3Words[4]; saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize; } } } } /** * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @saveq: Pointer to the unsolicited iocb. * * This function is called with no lock held by the ring event handler * when there is an unsolicited iocb posted to the response ring by the * firmware. This function gets the buffer associated with the iocbs * and calls the event handler for the ring. This function handles both * qring buffers and hbq buffers. * When the function returns 1 the caller can free the iocb object otherwise * upper layer functions will free the iocb objects. **/ static int lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *saveq) { IOCB_t * irsp; WORD5 * w5p; dma_addr_t paddr; uint32_t Rctl, Type; struct lpfc_iocbq *iocbq; struct lpfc_dmabuf *dmzbuf; irsp = &saveq->iocb; saveq->vport = phba->pport; if (irsp->ulpCommand == CMD_ASYNC_STATUS) { if (pring->lpfc_sli_rcv_async_status) pring->lpfc_sli_rcv_async_status(phba, pring, saveq); else lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0316 Ring %d handler: unexpected " "ASYNC_STATUS iocb received evt_code " "0x%x\n", pring->ringno, irsp->un.asyncstat.evt_code); return 1; } if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { if (irsp->ulpBdeCount > 0) { dmzbuf = lpfc_sli_get_buff(phba, pring, irsp->un.ulpWord[3]); lpfc_in_buf_free(phba, dmzbuf); } if (irsp->ulpBdeCount > 1) { dmzbuf = lpfc_sli_get_buff(phba, pring, irsp->unsli3.sli3Words[3]); lpfc_in_buf_free(phba, dmzbuf); } if (irsp->ulpBdeCount > 2) { dmzbuf = lpfc_sli_get_buff(phba, pring, irsp->unsli3.sli3Words[7]); lpfc_in_buf_free(phba, dmzbuf); } return 1; } if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { if (irsp->ulpBdeCount != 0) { saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring, irsp->un.ulpWord[3]); if (!saveq->cmd_dmabuf) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0341 Ring %d Cannot find buffer for " "an unsolicited iocb. tag 0x%x\n", pring->ringno, irsp->un.ulpWord[3]); } if (irsp->ulpBdeCount == 2) { saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring, irsp->unsli3.sli3Words[7]); if (!saveq->bpl_dmabuf) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0342 Ring %d Cannot find buffer for an" " unsolicited iocb. tag 0x%x\n", pring->ringno, irsp->unsli3.sli3Words[7]); } list_for_each_entry(iocbq, &saveq->list, list) { irsp = &iocbq->iocb; if (irsp->ulpBdeCount != 0) { iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring, irsp->un.ulpWord[3]); if (!iocbq->cmd_dmabuf) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0343 Ring %d Cannot find " "buffer for an unsolicited iocb" ". tag 0x%x\n", pring->ringno, irsp->un.ulpWord[3]); } if (irsp->ulpBdeCount == 2) { iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring, irsp->unsli3.sli3Words[7]); if (!iocbq->bpl_dmabuf) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0344 Ring %d Cannot find " "buffer for an unsolicited " "iocb. tag 0x%x\n", pring->ringno, irsp->unsli3.sli3Words[7]); } } } else { paddr = getPaddr(irsp->un.cont64[0].addrHigh, irsp->un.cont64[0].addrLow); saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, paddr); if (irsp->ulpBdeCount == 2) { paddr = getPaddr(irsp->un.cont64[1].addrHigh, irsp->un.cont64[1].addrLow); saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, paddr); } } if (irsp->ulpBdeCount != 0 && (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { int found = 0; /* search continue save q for same XRI */ list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { if (iocbq->iocb.unsli3.rcvsli3.ox_id == saveq->iocb.unsli3.rcvsli3.ox_id) { list_add_tail(&saveq->list, &iocbq->list); found = 1; break; } } if (!found) list_add_tail(&saveq->clist, &pring->iocb_continue_saveq); if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { list_del_init(&iocbq->clist); saveq = iocbq; irsp = &saveq->iocb; } else { return 0; } } if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { Rctl = FC_RCTL_ELS_REQ; Type = FC_TYPE_ELS; } else { w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); Rctl = w5p->hcsw.Rctl; Type = w5p->hcsw.Type; /* Firmware Workaround */ if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { Rctl = FC_RCTL_ELS_REQ; Type = FC_TYPE_ELS; w5p->hcsw.Rctl = Rctl; w5p->hcsw.Type = Type; } } if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX || irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { if (irsp->unsli3.rcvsli3.vpi == 0xffff) saveq->vport = phba->pport; else saveq->vport = lpfc_find_vport_by_vpid(phba, irsp->unsli3.rcvsli3.vpi); } /* Prepare WQE with Unsol frame */ lpfc_sli_prep_unsol_wqe(phba, saveq); if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0313 Ring %d handler: unexpected Rctl x%x " "Type x%x received\n", pring->ringno, Rctl, Type); return 1; } /** * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @prspiocb: Pointer to response iocb object. * * This function looks up the iocb_lookup table to get the command iocb * corresponding to the given response iocb using the iotag of the * response iocb. The driver calls this function with the hbalock held * for SLI3 ports or the ring lock held for SLI4 ports. * This function returns the command iocb object if it finds the command * iocb else returns NULL. **/ static struct lpfc_iocbq * lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *prspiocb) { struct lpfc_iocbq *cmd_iocb = NULL; u16 iotag; if (phba->sli_rev == LPFC_SLI_REV4) iotag = get_wqe_reqtag(prspiocb); else iotag = prspiocb->iocb.ulpIoTag; if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) { /* remove from txcmpl queue list */ list_del_init(&cmd_iocb->list); cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; pring->txcmplq_cnt--; return cmd_iocb; } } lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0317 iotag x%x is out of " "range: max iotag x%x\n", iotag, phba->sli.last_iotag); return NULL; } /** * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @iotag: IOCB tag. * * This function looks up the iocb_lookup table to get the command iocb * corresponding to the given iotag. The driver calls this function with * the ring lock held because this function is an SLI4 port only helper. * This function returns the command iocb object if it finds the command * iocb else returns NULL. **/ static struct lpfc_iocbq * lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint16_t iotag) { struct lpfc_iocbq *cmd_iocb = NULL; if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) { /* remove from txcmpl queue list */ list_del_init(&cmd_iocb->list); cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; pring->txcmplq_cnt--; return cmd_iocb; } } lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0372 iotag x%x lookup error: max iotag (x%x) " "cmd_flag x%x\n", iotag, phba->sli.last_iotag, cmd_iocb ? cmd_iocb->cmd_flag : 0xffff); return NULL; } /** * lpfc_sli_process_sol_iocb - process solicited iocb completion * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @saveq: Pointer to the response iocb to be processed. * * This function is called by the ring event handler for non-fcp * rings when there is a new response iocb in the response ring. * The caller is not required to hold any locks. This function * gets the command iocb associated with the response iocb and * calls the completion handler for the command iocb. If there * is no completion handler, the function will free the resources * associated with command iocb. If the response iocb is for * an already aborted command iocb, the status of the completion * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. * This function always returns 1. **/ static int lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *saveq) { struct lpfc_iocbq *cmdiocbp; unsigned long iflag; u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag; if (phba->sli_rev == LPFC_SLI_REV4) spin_lock_irqsave(&pring->ring_lock, iflag); else spin_lock_irqsave(&phba->hbalock, iflag); cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock_irqrestore(&pring->ring_lock, iflag); else spin_unlock_irqrestore(&phba->hbalock, iflag); ulp_command = get_job_cmnd(phba, saveq); ulp_status = get_job_ulpstatus(phba, saveq); ulp_word4 = get_job_word4(phba, saveq); ulp_context = get_job_ulpcontext(phba, saveq); if (phba->sli_rev == LPFC_SLI_REV4) iotag = get_wqe_reqtag(saveq); else iotag = saveq->iocb.ulpIoTag; if (cmdiocbp) { ulp_command = get_job_cmnd(phba, cmdiocbp); if (cmdiocbp->cmd_cmpl) { /* * If an ELS command failed send an event to mgmt * application. */ if (ulp_status && (pring->ringno == LPFC_ELS_RING) && (ulp_command == CMD_ELS_REQUEST64_CR)) lpfc_send_els_failure_event(phba, cmdiocbp, saveq); /* * Post all ELS completions to the worker thread. * All other are passed to the completion callback. */ if (pring->ringno == LPFC_ELS_RING) { if ((phba->sli_rev < LPFC_SLI_REV4) && (cmdiocbp->cmd_flag & LPFC_DRIVER_ABORTED)) { spin_lock_irqsave(&phba->hbalock, iflag); cmdiocbp->cmd_flag &= ~LPFC_DRIVER_ABORTED; spin_unlock_irqrestore(&phba->hbalock, iflag); saveq->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; saveq->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; /* Firmware could still be in progress * of DMAing payload, so don't free data * buffer till after a hbeat. */ spin_lock_irqsave(&phba->hbalock, iflag); saveq->cmd_flag |= LPFC_DELAY_MEM_FREE; spin_unlock_irqrestore(&phba->hbalock, iflag); } if (phba->sli_rev == LPFC_SLI_REV4) { if (saveq->cmd_flag & LPFC_EXCHANGE_BUSY) { /* Set cmdiocb flag for the * exchange busy so sgl (xri) * will not be released until * the abort xri is received * from hba. */ spin_lock_irqsave( &phba->hbalock, iflag); cmdiocbp->cmd_flag |= LPFC_EXCHANGE_BUSY; spin_unlock_irqrestore( &phba->hbalock, iflag); } if (cmdiocbp->cmd_flag & LPFC_DRIVER_ABORTED) { /* * Clear LPFC_DRIVER_ABORTED * bit in case it was driver * initiated abort. */ spin_lock_irqsave( &phba->hbalock, iflag); cmdiocbp->cmd_flag &= ~LPFC_DRIVER_ABORTED; spin_unlock_irqrestore( &phba->hbalock, iflag); set_job_ulpstatus(cmdiocbp, IOSTAT_LOCAL_REJECT); set_job_ulpword4(cmdiocbp, IOERR_ABORT_REQUESTED); /* * For SLI4, irspiocb contains * NO_XRI in sli_xritag, it * shall not affect releasing * sgl (xri) process. */ set_job_ulpstatus(saveq, IOSTAT_LOCAL_REJECT); set_job_ulpword4(saveq, IOERR_SLI_ABORTED); spin_lock_irqsave( &phba->hbalock, iflag); saveq->cmd_flag |= LPFC_DELAY_MEM_FREE; spin_unlock_irqrestore( &phba->hbalock, iflag); } } } cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq); } else lpfc_sli_release_iocbq(phba, cmdiocbp); } else { /* * Unknown initiating command based on the response iotag. * This could be the case on the ELS ring because of * lpfc_els_abort(). */ if (pring->ringno != LPFC_ELS_RING) { /* * Ring <ringno> handler: unexpected completion IoTag * <IoTag> */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0322 Ring %d handler: " "unexpected completion IoTag x%x " "Data: x%x x%x x%x x%x\n", pring->ringno, iotag, ulp_status, ulp_word4, ulp_command, ulp_context); } } return 1; } /** * lpfc_sli_rsp_pointers_error - Response ring pointer error handler * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * * This function is called from the iocb ring event handlers when * put pointer is ahead of the get pointer for a ring. This function signal * an error attention condition to the worker thread and the worker * thread will transition the HBA to offline state. **/ static void lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; /* * Ring <ringno> handler: portRspPut <portRspPut> is bigger than * rsp ring <portRspMax> */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0312 Ring %d handler: portRspPut %d " "is bigger than rsp ring %d\n", pring->ringno, le32_to_cpu(pgp->rspPutInx), pring->sli.sli3.numRiocb); phba->link_state = LPFC_HBA_ERROR; /* * All error attention handlers are posted to * worker thread */ phba->work_ha |= HA_ERATT; phba->work_hs = HS_FFER3; lpfc_worker_wake_up(phba); return; } /** * lpfc_poll_eratt - Error attention polling timer timeout handler * @t: Context to fetch pointer to address of HBA context object from. * * This function is invoked by the Error Attention polling timer when the * timer times out. It will check the SLI Error Attention register for * possible attention events. If so, it will post an Error Attention event * and wake up worker thread to process it. Otherwise, it will set up the * Error Attention polling timer for the next poll. **/ void lpfc_poll_eratt(struct timer_list *t) { struct lpfc_hba *phba; uint32_t eratt = 0; uint64_t sli_intr, cnt; phba = from_timer(phba, t, eratt_poll); if (!(phba->hba_flag & HBA_SETUP)) return; /* Here we will also keep track of interrupts per sec of the hba */ sli_intr = phba->sli.slistat.sli_intr; if (phba->sli.slistat.sli_prev_intr > sli_intr) cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + sli_intr); else cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); /* 64-bit integer division not supported on 32-bit x86 - use do_div */ do_div(cnt, phba->eratt_poll_interval); phba->sli.slistat.sli_ips = cnt; phba->sli.slistat.sli_prev_intr = sli_intr; /* Check chip HA register for error event */ eratt = lpfc_sli_check_eratt(phba); if (eratt) /* Tell the worker thread there is work to do */ lpfc_worker_wake_up(phba); else /* Restart the timer for next eratt poll */ mod_timer(&phba->eratt_poll, jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); return; } /** * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @mask: Host attention register mask for this ring. * * This function is called from the interrupt context when there is a ring * event for the fcp ring. The caller does not hold any lock. * The function processes each response iocb in the response ring until it * finds an iocb with LE bit set and chains all the iocbs up to the iocb with * LE bit set. The function will call the completion handler of the command iocb * if the response iocb indicates a completion for a command iocb or it is * an abort completion. The function will call lpfc_sli_process_unsol_iocb * function if this is an unsolicited iocb. * This routine presumes LPFC_FCP_RING handling and doesn't bother * to check it explicitly. */ int lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t mask) { struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; IOCB_t *irsp = NULL; IOCB_t *entry = NULL; struct lpfc_iocbq *cmdiocbq = NULL; struct lpfc_iocbq rspiocbq; uint32_t status; uint32_t portRspPut, portRspMax; int rc = 1; lpfc_iocb_type type; unsigned long iflag; uint32_t rsp_cmpl = 0; spin_lock_irqsave(&phba->hbalock, iflag); pring->stats.iocb_event++; /* * The next available response entry should never exceed the maximum * entries. If it does, treat it as an adapter hardware error. */ portRspMax = pring->sli.sli3.numRiocb; portRspPut = le32_to_cpu(pgp->rspPutInx); if (unlikely(portRspPut >= portRspMax)) { lpfc_sli_rsp_pointers_error(phba, pring); spin_unlock_irqrestore(&phba->hbalock, iflag); return 1; } if (phba->fcp_ring_in_use) { spin_unlock_irqrestore(&phba->hbalock, iflag); return 1; } else phba->fcp_ring_in_use = 1; rmb(); while (pring->sli.sli3.rspidx != portRspPut) { /* * Fetch an entry off the ring and copy it into a local data * structure. The copy involves a byte-swap since the * network byte order and pci byte orders are different. */ entry = lpfc_resp_iocb(phba, pring); phba->last_completion_time = jiffies; if (++pring->sli.sli3.rspidx >= portRspMax) pring->sli.sli3.rspidx = 0; lpfc_sli_pcimem_bcopy((uint32_t *) entry, (uint32_t *) &rspiocbq.iocb, phba->iocb_rsp_size); INIT_LIST_HEAD(&(rspiocbq.list)); irsp = &rspiocbq.iocb; type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); pring->stats.iocb_rsp++; rsp_cmpl++; if (unlikely(irsp->ulpStatus)) { /* * If resource errors reported from HBA, reduce * queuedepths of the SCSI device. */ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == IOERR_NO_RESOURCES)) { spin_unlock_irqrestore(&phba->hbalock, iflag); phba->lpfc_rampdown_queue_depth(phba); spin_lock_irqsave(&phba->hbalock, iflag); } /* Rsp ring <ringno> error: IOCB */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0336 Rsp Ring %d error: IOCB Data: " "x%x x%x x%x x%x x%x x%x x%x x%x\n", pring->ringno, irsp->un.ulpWord[0], irsp->un.ulpWord[1], irsp->un.ulpWord[2], irsp->un.ulpWord[3], irsp->un.ulpWord[4], irsp->un.ulpWord[5], *(uint32_t *)&irsp->un1, *((uint32_t *)&irsp->un1 + 1)); } switch (type) { case LPFC_ABORT_IOCB: case LPFC_SOL_IOCB: /* * Idle exchange closed via ABTS from port. No iocb * resources need to be recovered. */ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0333 IOCB cmd 0x%x" " processed. Skipping" " completion\n", irsp->ulpCommand); break; } cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, &rspiocbq); if (unlikely(!cmdiocbq)) break; if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; if (cmdiocbq->cmd_cmpl) { spin_unlock_irqrestore(&phba->hbalock, iflag); cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq); spin_lock_irqsave(&phba->hbalock, iflag); } break; case LPFC_UNSOL_IOCB: spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); spin_lock_irqsave(&phba->hbalock, iflag); break; default: if (irsp->ulpCommand == CMD_ADAPTER_MSG) { char adaptermsg[LPFC_MAX_ADPTMSG]; memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); memcpy(&adaptermsg[0], (uint8_t *) irsp, MAX_MSG_DATA); dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s\n", phba->brd_no, adaptermsg); } else { /* Unknown IOCB command */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0334 Unknown IOCB command " "Data: x%x, x%x x%x x%x x%x\n", type, irsp->ulpCommand, irsp->ulpStatus, irsp->ulpIoTag, irsp->ulpContext); } break; } /* * The response IOCB has been processed. Update the ring * pointer in SLIM. If the port response put pointer has not * been updated, sync the pgp->rspPutInx and fetch the new port * response put pointer. */ writel(pring->sli.sli3.rspidx, &phba->host_gp[pring->ringno].rspGetInx); if (pring->sli.sli3.rspidx == portRspPut) portRspPut = le32_to_cpu(pgp->rspPutInx); } if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { pring->stats.iocb_rsp_full++; status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); writel(status, phba->CAregaddr); readl(phba->CAregaddr); } if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { pring->flag &= ~LPFC_CALL_RING_AVAILABLE; pring->stats.iocb_cmd_empty++; /* Force update of the local copy of cmdGetInx */ pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); lpfc_sli_resume_iocb(phba, pring); if ((pring->lpfc_sli_cmd_available)) (pring->lpfc_sli_cmd_available) (phba, pring); } phba->fcp_ring_in_use = 0; spin_unlock_irqrestore(&phba->hbalock, iflag); return rc; } /** * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @rspiocbp: Pointer to driver response IOCB object. * * This function is called from the worker thread when there is a slow-path * response IOCB to process. This function chains all the response iocbs until * seeing the iocb with the LE bit set. The function will call * lpfc_sli_process_sol_iocb function if the response iocb indicates a * completion of a command iocb. The function will call the * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. * The function frees the resources or calls the completion handler if this * iocb is an abort completion. The function returns NULL when the response * iocb has the LE bit set and all the chained iocbs are processed, otherwise * this function shall chain the iocb on to the iocb_continueq and return the * response iocb passed in. **/ static struct lpfc_iocbq * lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *rspiocbp) { struct lpfc_iocbq *saveq; struct lpfc_iocbq *cmdiocb; struct lpfc_iocbq *next_iocb; IOCB_t *irsp; uint32_t free_saveq; u8 cmd_type; lpfc_iocb_type type; unsigned long iflag; u32 ulp_status = get_job_ulpstatus(phba, rspiocbp); u32 ulp_word4 = get_job_word4(phba, rspiocbp); u32 ulp_command = get_job_cmnd(phba, rspiocbp); int rc; spin_lock_irqsave(&phba->hbalock, iflag); /* First add the response iocb to the countinueq list */ list_add_tail(&rspiocbp->list, &pring->iocb_continueq); pring->iocb_continueq_cnt++; /* * By default, the driver expects to free all resources * associated with this iocb completion. */ free_saveq = 1; saveq = list_get_first(&pring->iocb_continueq, struct lpfc_iocbq, list); list_del_init(&pring->iocb_continueq); pring->iocb_continueq_cnt = 0; pring->stats.iocb_rsp++; /* * If resource errors reported from HBA, reduce * queuedepths of the SCSI device. */ if (ulp_status == IOSTAT_LOCAL_REJECT && ((ulp_word4 & IOERR_PARAM_MASK) == IOERR_NO_RESOURCES)) { spin_unlock_irqrestore(&phba->hbalock, iflag); phba->lpfc_rampdown_queue_depth(phba); spin_lock_irqsave(&phba->hbalock, iflag); } if (ulp_status) { /* Rsp ring <ringno> error: IOCB */ if (phba->sli_rev < LPFC_SLI_REV4) { irsp = &rspiocbp->iocb; lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0328 Rsp Ring %d error: ulp_status x%x " "IOCB Data: " "x%08x x%08x x%08x x%08x " "x%08x x%08x x%08x x%08x " "x%08x x%08x x%08x x%08x " "x%08x x%08x x%08x x%08x\n", pring->ringno, ulp_status, get_job_ulpword(rspiocbp, 0), get_job_ulpword(rspiocbp, 1), get_job_ulpword(rspiocbp, 2), get_job_ulpword(rspiocbp, 3), get_job_ulpword(rspiocbp, 4), get_job_ulpword(rspiocbp, 5), *(((uint32_t *)irsp) + 6), *(((uint32_t *)irsp) + 7), *(((uint32_t *)irsp) + 8), *(((uint32_t *)irsp) + 9), *(((uint32_t *)irsp) + 10), *(((uint32_t *)irsp) + 11), *(((uint32_t *)irsp) + 12), *(((uint32_t *)irsp) + 13), *(((uint32_t *)irsp) + 14), *(((uint32_t *)irsp) + 15)); } else { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0321 Rsp Ring %d error: " "IOCB Data: " "x%x x%x x%x x%x\n", pring->ringno, rspiocbp->wcqe_cmpl.word0, rspiocbp->wcqe_cmpl.total_data_placed, rspiocbp->wcqe_cmpl.parameter, rspiocbp->wcqe_cmpl.word3); } } /* * Fetch the iocb command type and call the correct completion * routine. Solicited and Unsolicited IOCBs on the ELS ring * get freed back to the lpfc_iocb_list by the discovery * kernel thread. */ cmd_type = ulp_command & CMD_IOCB_MASK; type = lpfc_sli_iocb_cmd_type(cmd_type); switch (type) { case LPFC_SOL_IOCB: spin_unlock_irqrestore(&phba->hbalock, iflag); rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); spin_lock_irqsave(&phba->hbalock, iflag); break; case LPFC_UNSOL_IOCB: spin_unlock_irqrestore(&phba->hbalock, iflag); rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); spin_lock_irqsave(&phba->hbalock, iflag); if (!rc) free_saveq = 0; break; case LPFC_ABORT_IOCB: cmdiocb = NULL; if (ulp_command != CMD_XRI_ABORTED_CX) cmdiocb = lpfc_sli_iocbq_lookup(phba, pring, saveq); if (cmdiocb) { /* Call the specified completion routine */ if (cmdiocb->cmd_cmpl) { spin_unlock_irqrestore(&phba->hbalock, iflag); cmdiocb->cmd_cmpl(phba, cmdiocb, saveq); spin_lock_irqsave(&phba->hbalock, iflag); } else { __lpfc_sli_release_iocbq(phba, cmdiocb); } } break; case LPFC_UNKNOWN_IOCB: if (ulp_command == CMD_ADAPTER_MSG) { char adaptermsg[LPFC_MAX_ADPTMSG]; memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe, MAX_MSG_DATA); dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s\n", phba->brd_no, adaptermsg); } else { /* Unknown command */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0335 Unknown IOCB " "command Data: x%x " "x%x x%x x%x\n", ulp_command, ulp_status, get_wqe_reqtag(rspiocbp), get_job_ulpcontext(phba, rspiocbp)); } break; } if (free_saveq) { list_for_each_entry_safe(rspiocbp, next_iocb, &saveq->list, list) { list_del_init(&rspiocbp->list); __lpfc_sli_release_iocbq(phba, rspiocbp); } __lpfc_sli_release_iocbq(phba, saveq); } rspiocbp = NULL; spin_unlock_irqrestore(&phba->hbalock, iflag); return rspiocbp; } /** * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @mask: Host attention register mask for this ring. * * This routine wraps the actual slow_ring event process routine from the * API jump table function pointer from the lpfc_hba struct. **/ void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t mask) { phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); } /** * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @mask: Host attention register mask for this ring. * * This function is called from the worker thread when there is a ring event * for non-fcp rings. The caller does not hold any lock. The function will * remove each response iocb in the response ring and calls the handle * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. **/ static void lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t mask) { struct lpfc_pgp *pgp; IOCB_t *entry; IOCB_t *irsp = NULL; struct lpfc_iocbq *rspiocbp = NULL; uint32_t portRspPut, portRspMax; unsigned long iflag; uint32_t status; pgp = &phba->port_gp[pring->ringno]; spin_lock_irqsave(&phba->hbalock, iflag); pring->stats.iocb_event++; /* * The next available response entry should never exceed the maximum * entries. If it does, treat it as an adapter hardware error. */ portRspMax = pring->sli.sli3.numRiocb; portRspPut = le32_to_cpu(pgp->rspPutInx); if (portRspPut >= portRspMax) { /* * Ring <ringno> handler: portRspPut <portRspPut> is bigger than * rsp ring <portRspMax> */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0303 Ring %d handler: portRspPut %d " "is bigger than rsp ring %d\n", pring->ringno, portRspPut, portRspMax); phba->link_state = LPFC_HBA_ERROR; spin_unlock_irqrestore(&phba->hbalock, iflag); phba->work_hs = HS_FFER3; lpfc_handle_eratt(phba); return; } rmb(); while (pring->sli.sli3.rspidx != portRspPut) { /* * Build a completion list and call the appropriate handler. * The process is to get the next available response iocb, get * a free iocb from the list, copy the response data into the * free iocb, insert to the continuation list, and update the * next response index to slim. This process makes response * iocb's in the ring available to DMA as fast as possible but * pays a penalty for a copy operation. Since the iocb is * only 32 bytes, this penalty is considered small relative to * the PCI reads for register values and a slim write. When * the ulpLe field is set, the entire Command has been * received. */ entry = lpfc_resp_iocb(phba, pring); phba->last_completion_time = jiffies; rspiocbp = __lpfc_sli_get_iocbq(phba); if (rspiocbp == NULL) { printk(KERN_ERR "%s: out of buffers! Failing " "completion.\n", __func__); break; } lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, phba->iocb_rsp_size); irsp = &rspiocbp->iocb; if (++pring->sli.sli3.rspidx >= portRspMax) pring->sli.sli3.rspidx = 0; if (pring->ringno == LPFC_ELS_RING) { lpfc_debugfs_slow_ring_trc(phba, "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", *(((uint32_t *) irsp) + 4), *(((uint32_t *) irsp) + 6), *(((uint32_t *) irsp) + 7)); } writel(pring->sli.sli3.rspidx, &phba->host_gp[pring->ringno].rspGetInx); spin_unlock_irqrestore(&phba->hbalock, iflag); /* Handle the response IOCB */ rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); spin_lock_irqsave(&phba->hbalock, iflag); /* * If the port response put pointer has not been updated, sync * the pgp->rspPutInx in the MAILBOX_tand fetch the new port * response put pointer. */ if (pring->sli.sli3.rspidx == portRspPut) { portRspPut = le32_to_cpu(pgp->rspPutInx); } } /* while (pring->sli.sli3.rspidx != portRspPut) */ if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { /* At least one response entry has been freed */ pring->stats.iocb_rsp_full++; /* SET RxRE_RSP in Chip Att register */ status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); writel(status, phba->CAregaddr); readl(phba->CAregaddr); /* flush */ } if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { pring->flag &= ~LPFC_CALL_RING_AVAILABLE; pring->stats.iocb_cmd_empty++; /* Force update of the local copy of cmdGetInx */ pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); lpfc_sli_resume_iocb(phba, pring); if ((pring->lpfc_sli_cmd_available)) (pring->lpfc_sli_cmd_available) (phba, pring); } spin_unlock_irqrestore(&phba->hbalock, iflag); return; } /** * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @mask: Host attention register mask for this ring. * * This function is called from the worker thread when there is a pending * ELS response iocb on the driver internal slow-path response iocb worker * queue. The caller does not hold any lock. The function will remove each * response iocb from the response worker queue and calls the handle * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. **/ static void lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t mask) { struct lpfc_iocbq *irspiocbq; struct hbq_dmabuf *dmabuf; struct lpfc_cq_event *cq_event; unsigned long iflag; int count = 0; spin_lock_irqsave(&phba->hbalock, iflag); phba->hba_flag &= ~HBA_SP_QUEUE_EVT; spin_unlock_irqrestore(&phba->hbalock, iflag); while (!list_empty(&phba->sli4_hba.sp_queue_event)) { /* Get the response iocb from the head of work queue */ spin_lock_irqsave(&phba->hbalock, iflag); list_remove_head(&phba->sli4_hba.sp_queue_event, cq_event, struct lpfc_cq_event, list); spin_unlock_irqrestore(&phba->hbalock, iflag); switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { case CQE_CODE_COMPL_WQE: irspiocbq = container_of(cq_event, struct lpfc_iocbq, cq_event); /* Translate ELS WCQE to response IOCBQ */ irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba, irspiocbq); if (irspiocbq) lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); count++; break; case CQE_CODE_RECEIVE: case CQE_CODE_RECEIVE_V1: dmabuf = container_of(cq_event, struct hbq_dmabuf, cq_event); lpfc_sli4_handle_received_buffer(phba, dmabuf); count++; break; default: break; } /* Limit the number of events to 64 to avoid soft lockups */ if (count == 64) break; } } /** * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * * This function aborts all iocbs in the given ring and frees all the iocb * objects in txq. This function issues an abort iocb for all the iocb commands * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before * the return of this function. The caller is not required to hold any locks. **/ void lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { LIST_HEAD(tx_completions); LIST_HEAD(txcmplq_completions); struct lpfc_iocbq *iocb, *next_iocb; int offline; if (pring->ringno == LPFC_ELS_RING) { lpfc_fabric_abort_hba(phba); } offline = pci_channel_offline(phba->pcidev); /* Error everything on txq and txcmplq * First do the txq. */ if (phba->sli_rev >= LPFC_SLI_REV4) { spin_lock_irq(&pring->ring_lock); list_splice_init(&pring->txq, &tx_completions); pring->txq_cnt = 0; if (offline) { list_splice_init(&pring->txcmplq, &txcmplq_completions); } else { /* Next issue ABTS for everything on the txcmplq */ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); } spin_unlock_irq(&pring->ring_lock); } else { spin_lock_irq(&phba->hbalock); list_splice_init(&pring->txq, &tx_completions); pring->txq_cnt = 0; if (offline) { list_splice_init(&pring->txcmplq, &txcmplq_completions); } else { /* Next issue ABTS for everything on the txcmplq */ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); } spin_unlock_irq(&phba->hbalock); } if (offline) { /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &txcmplq_completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } else { /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); } /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } /** * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings * @phba: Pointer to HBA context object. * * This function aborts all iocbs in FCP rings and frees all the iocb * objects in txq. This function issues an abort iocb for all the iocb commands * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before * the return of this function. The caller is not required to hold any locks. **/ void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; uint32_t i; /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { for (i = 0; i < phba->cfg_hdw_queue; i++) { pring = phba->sli4_hba.hdwq[i].io_wq->pring; lpfc_sli_abort_iocb_ring(phba, pring); } } else { pring = &psli->sli3_ring[LPFC_FCP_RING]; lpfc_sli_abort_iocb_ring(phba, pring); } } /** * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring * @phba: Pointer to HBA context object. * * This function flushes all iocbs in the IO ring and frees all the iocb * objects in txq and txcmplq. This function will not issue abort iocbs * for all the iocb commands in txcmplq, they will just be returned with * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI * slot has been permanently disabled. **/ void lpfc_sli_flush_io_rings(struct lpfc_hba *phba) { LIST_HEAD(txq); LIST_HEAD(txcmplq); struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; uint32_t i; struct lpfc_iocbq *piocb, *next_iocb; spin_lock_irq(&phba->hbalock); /* Indicate the I/O queues are flushed */ phba->hba_flag |= HBA_IOQ_FLUSH; spin_unlock_irq(&phba->hbalock); /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { for (i = 0; i < phba->cfg_hdw_queue; i++) { pring = phba->sli4_hba.hdwq[i].io_wq->pring; spin_lock_irq(&pring->ring_lock); /* Retrieve everything on txq */ list_splice_init(&pring->txq, &txq); list_for_each_entry_safe(piocb, next_iocb, &pring->txcmplq, list) piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; /* Retrieve everything on the txcmplq */ list_splice_init(&pring->txcmplq, &txcmplq); pring->txq_cnt = 0; pring->txcmplq_cnt = 0; spin_unlock_irq(&pring->ring_lock); /* Flush the txq */ lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, IOERR_SLI_DOWN); /* Flush the txcmplq */ lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, IOERR_SLI_DOWN); if (unlikely(pci_channel_offline(phba->pcidev))) lpfc_sli4_io_xri_aborted(phba, NULL, 0); } } else { pring = &psli->sli3_ring[LPFC_FCP_RING]; spin_lock_irq(&phba->hbalock); /* Retrieve everything on txq */ list_splice_init(&pring->txq, &txq); list_for_each_entry_safe(piocb, next_iocb, &pring->txcmplq, list) piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; /* Retrieve everything on the txcmplq */ list_splice_init(&pring->txcmplq, &txcmplq); pring->txq_cnt = 0; pring->txcmplq_cnt = 0; spin_unlock_irq(&phba->hbalock); /* Flush the txq */ lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, IOERR_SLI_DOWN); /* Flush the txcmpq */ lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, IOERR_SLI_DOWN); } } /** * lpfc_sli_brdready_s3 - Check for sli3 host ready status * @phba: Pointer to HBA context object. * @mask: Bit mask to be checked. * * This function reads the host status register and compares * with the provided bit mask to check if HBA completed * the restart. This function will wait in a loop for the * HBA to complete restart. If the HBA does not restart within * 15 iterations, the function will reset the HBA again. The * function returns 1 when HBA fail to restart otherwise returns * zero. **/ static int lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) { uint32_t status; int i = 0; int retval = 0; /* Read the HBA Host Status Register */ if (lpfc_readl(phba->HSregaddr, &status)) return 1; phba->hba_flag |= HBA_NEEDS_CFG_PORT; /* * Check status register every 100ms for 5 retries, then every * 500ms for 5, then every 2.5 sec for 5, then reset board and * every 2.5 sec for 4. * Break our of the loop if errors occurred during init. */ while (((status & mask) != mask) && !(status & HS_FFERM) && i++ < 20) { if (i <= 5) msleep(10); else if (i <= 10) msleep(500); else msleep(2500); if (i == 15) { /* Do post */ phba->pport->port_state = LPFC_VPORT_UNKNOWN; lpfc_sli_brdrestart(phba); } /* Read the HBA Host Status Register */ if (lpfc_readl(phba->HSregaddr, &status)) { retval = 1; break; } } /* Check to see if any errors occurred during init */ if ((status & HS_FFERM) || (i >= 20)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2751 Adapter failed to restart, " "status reg x%x, FW Data: A8 x%x AC x%x\n", status, readl(phba->MBslimaddr + 0xa8), readl(phba->MBslimaddr + 0xac)); phba->link_state = LPFC_HBA_ERROR; retval = 1; } return retval; } /** * lpfc_sli_brdready_s4 - Check for sli4 host ready status * @phba: Pointer to HBA context object. * @mask: Bit mask to be checked. * * This function checks the host status register to check if HBA is * ready. This function will wait in a loop for the HBA to be ready * If the HBA is not ready , the function will will reset the HBA PCI * function again. The function returns 1 when HBA fail to be ready * otherwise returns zero. **/ static int lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) { uint32_t status; int retval = 0; /* Read the HBA Host Status Register */ status = lpfc_sli4_post_status_check(phba); if (status) { phba->pport->port_state = LPFC_VPORT_UNKNOWN; lpfc_sli_brdrestart(phba); status = lpfc_sli4_post_status_check(phba); } /* Check to see if any errors occurred during init */ if (status) { phba->link_state = LPFC_HBA_ERROR; retval = 1; } else phba->sli4_hba.intr_enable = 0; phba->hba_flag &= ~HBA_SETUP; return retval; } /** * lpfc_sli_brdready - Wrapper func for checking the hba readyness * @phba: Pointer to HBA context object. * @mask: Bit mask to be checked. * * This routine wraps the actual SLI3 or SLI4 hba readyness check routine * from the API jump table function pointer from the lpfc_hba struct. **/ int lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) { return phba->lpfc_sli_brdready(phba, mask); } #define BARRIER_TEST_PATTERN (0xdeadbeef) /** * lpfc_reset_barrier - Make HBA ready for HBA reset * @phba: Pointer to HBA context object. * * This function is called before resetting an HBA. This function is called * with hbalock held and requests HBA to quiesce DMAs before a reset. **/ void lpfc_reset_barrier(struct lpfc_hba *phba) { uint32_t __iomem *resp_buf; uint32_t __iomem *mbox_buf; volatile struct MAILBOX_word0 mbox; uint32_t hc_copy, ha_copy, resp_data; int i; uint8_t hdrtype; lockdep_assert_held(&phba->hbalock); pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); if (hdrtype != 0x80 || (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) return; /* * Tell the other part of the chip to suspend temporarily all * its DMA activity. */ resp_buf = phba->MBslimaddr; /* Disable the error attention */ if (lpfc_readl(phba->HCregaddr, &hc_copy)) return; writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); readl(phba->HCregaddr); /* flush */ phba->link_flag |= LS_IGNORE_ERATT; if (lpfc_readl(phba->HAregaddr, &ha_copy)) return; if (ha_copy & HA_ERATT) { /* Clear Chip error bit */ writel(HA_ERATT, phba->HAregaddr); phba->pport->stopped = 1; } mbox.word0 = 0; mbox.mbxCommand = MBX_KILL_BOARD; mbox.mbxOwner = OWN_CHIP; writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); mbox_buf = phba->MBslimaddr; writel(mbox.word0, mbox_buf); for (i = 0; i < 50; i++) { if (lpfc_readl((resp_buf + 1), &resp_data)) return; if (resp_data != ~(BARRIER_TEST_PATTERN)) mdelay(1); else break; } resp_data = 0; if (lpfc_readl((resp_buf + 1), &resp_data)) return; if (resp_data != ~(BARRIER_TEST_PATTERN)) { if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || phba->pport->stopped) goto restore_hc; else goto clear_errat; } mbox.mbxOwner = OWN_HOST; resp_data = 0; for (i = 0; i < 500; i++) { if (lpfc_readl(resp_buf, &resp_data)) return; if (resp_data != mbox.word0) mdelay(1); else break; } clear_errat: while (++i < 500) { if (lpfc_readl(phba->HAregaddr, &ha_copy)) return; if (!(ha_copy & HA_ERATT)) mdelay(1); else break; } if (readl(phba->HAregaddr) & HA_ERATT) { writel(HA_ERATT, phba->HAregaddr); phba->pport->stopped = 1; } restore_hc: phba->link_flag &= ~LS_IGNORE_ERATT; writel(hc_copy, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } /** * lpfc_sli_brdkill - Issue a kill_board mailbox command * @phba: Pointer to HBA context object. * * This function issues a kill_board mailbox command and waits for * the error attention interrupt. This function is called for stopping * the firmware processing. The caller is not required to hold any * locks. This function calls lpfc_hba_down_post function to free * any pending commands after the kill. The function will return 1 when it * fails to kill the board else will return 0. **/ int lpfc_sli_brdkill(struct lpfc_hba *phba) { struct lpfc_sli *psli; LPFC_MBOXQ_t *pmb; uint32_t status; uint32_t ha_copy; int retval; int i = 0; psli = &phba->sli; /* Kill HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0329 Kill HBA Data: x%x x%x\n", phba->pport->port_state, psli->sli_flag); pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) return 1; /* Disable the error attention */ spin_lock_irq(&phba->hbalock); if (lpfc_readl(phba->HCregaddr, &status)) { spin_unlock_irq(&phba->hbalock); mempool_free(pmb, phba->mbox_mem_pool); return 1; } status &= ~HC_ERINT_ENA; writel(status, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ phba->link_flag |= LS_IGNORE_ERATT; spin_unlock_irq(&phba->hbalock); lpfc_kill_board(phba, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (retval != MBX_SUCCESS) { if (retval != MBX_BUSY) mempool_free(pmb, phba->mbox_mem_pool); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2752 KILL_BOARD command failed retval %d\n", retval); spin_lock_irq(&phba->hbalock); phba->link_flag &= ~LS_IGNORE_ERATT; spin_unlock_irq(&phba->hbalock); return 1; } spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); mempool_free(pmb, phba->mbox_mem_pool); /* There is no completion for a KILL_BOARD mbox cmd. Check for an error * attention every 100ms for 3 seconds. If we don't get ERATT after * 3 seconds we still set HBA_ERROR state because the status of the * board is now undefined. */ if (lpfc_readl(phba->HAregaddr, &ha_copy)) return 1; while ((i++ < 30) && !(ha_copy & HA_ERATT)) { mdelay(100); if (lpfc_readl(phba->HAregaddr, &ha_copy)) return 1; } del_timer_sync(&psli->mbox_tmo); if (ha_copy & HA_ERATT) { writel(HA_ERATT, phba->HAregaddr); phba->pport->stopped = 1; } spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; psli->mbox_active = NULL; phba->link_flag &= ~LS_IGNORE_ERATT; spin_unlock_irq(&phba->hbalock); lpfc_hba_down_post(phba); phba->link_state = LPFC_HBA_ERROR; return ha_copy & HA_ERATT ? 0 : 1; } /** * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA * @phba: Pointer to HBA context object. * * This function resets the HBA by writing HC_INITFF to the control * register. After the HBA resets, this function resets all the iocb ring * indices. This function disables PCI layer parity checking during * the reset. * This function returns 0 always. * The caller is not required to hold any locks. **/ int lpfc_sli_brdreset(struct lpfc_hba *phba) { struct lpfc_sli *psli; struct lpfc_sli_ring *pring; uint16_t cfg_value; int i; psli = &phba->sli; /* Reset HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0325 Reset HBA Data: x%x x%x\n", (phba->pport) ? phba->pport->port_state : 0, psli->sli_flag); /* perform board reset */ phba->fc_eventTag = 0; phba->link_events = 0; phba->hba_flag |= HBA_NEEDS_CFG_PORT; if (phba->pport) { phba->pport->fc_myDID = 0; phba->pport->fc_prevDID = 0; } /* Turn off parity checking and serr during the physical reset */ if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) return -EIO; pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); /* Now toggle INITFF bit in the Host Control Register */ writel(HC_INITFF, phba->HCregaddr); mdelay(1); readl(phba->HCregaddr); /* flush */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ /* Restore PCI cmd register */ pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); /* Initialize relevant SLI info */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->sli3_ring[i]; pring->flag = 0; pring->sli.sli3.rspidx = 0; pring->sli.sli3.next_cmdidx = 0; pring->sli.sli3.local_getidx = 0; pring->sli.sli3.cmdidx = 0; pring->missbufcnt = 0; } phba->link_state = LPFC_WARM_START; return 0; } /** * lpfc_sli4_brdreset - Reset a sli-4 HBA * @phba: Pointer to HBA context object. * * This function resets a SLI4 HBA. This function disables PCI layer parity * checking during resets the device. The caller is not required to hold * any locks. * * This function returns 0 on success else returns negative error code. **/ int lpfc_sli4_brdreset(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; uint16_t cfg_value; int rc = 0; /* Reset HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0295 Reset HBA Data: x%x x%x x%x\n", phba->pport->port_state, psli->sli_flag, phba->hba_flag); /* perform board reset */ phba->fc_eventTag = 0; phba->link_events = 0; phba->pport->fc_myDID = 0; phba->pport->fc_prevDID = 0; phba->hba_flag &= ~HBA_SETUP; spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~(LPFC_PROCESS_LA); phba->fcf.fcf_flag = 0; spin_unlock_irq(&phba->hbalock); /* Now physically reset the device */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0389 Performing PCI function reset!\n"); /* Turn off parity checking and serr during the physical reset */ if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3205 PCI read Config failed\n"); return -EIO; } pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); /* Perform FCoE PCI function reset before freeing queue memory */ rc = lpfc_pci_function_reset(phba); /* Restore PCI cmd register */ pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); return rc; } /** * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba * @phba: Pointer to HBA context object. * * This function is called in the SLI initialization code path to * restart the HBA. The caller is not required to hold any lock. * This function writes MBX_RESTART mailbox command to the SLIM and * resets the HBA. At the end of the function, it calls lpfc_hba_down_post * function to free any pending commands. The function enables * POST only during the first initialization. The function returns zero. * The function does not guarantee completion of MBX_RESTART mailbox * command before the return of this function. **/ static int lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) { volatile struct MAILBOX_word0 mb; struct lpfc_sli *psli; void __iomem *to_slim; spin_lock_irq(&phba->hbalock); psli = &phba->sli; /* Restart HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0337 Restart HBA Data: x%x x%x\n", (phba->pport) ? phba->pport->port_state : 0, psli->sli_flag); mb.word0 = 0; mb.mbxCommand = MBX_RESTART; mb.mbxHc = 1; lpfc_reset_barrier(phba); to_slim = phba->MBslimaddr; writel(mb.word0, to_slim); readl(to_slim); /* flush */ /* Only skip post after fc_ffinit is completed */ if (phba->pport && phba->pport->port_state) mb.word0 = 1; /* This is really setting up word1 */ else mb.word0 = 0; /* This is really setting up word1 */ to_slim = phba->MBslimaddr + sizeof (uint32_t); writel(mb.word0, to_slim); readl(to_slim); /* flush */ lpfc_sli_brdreset(phba); if (phba->pport) phba->pport->stopped = 0; phba->link_state = LPFC_INIT_START; phba->hba_flag = 0; spin_unlock_irq(&phba->hbalock); memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); psli->stats_start = ktime_get_seconds(); /* Give the INITFF and Post time to settle. */ mdelay(100); lpfc_hba_down_post(phba); return 0; } /** * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba * @phba: Pointer to HBA context object. * * This function is called in the SLI initialization code path to restart * a SLI4 HBA. The caller is not required to hold any lock. * At the end of the function, it calls lpfc_hba_down_post function to * free any pending commands. **/ static int lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; int rc; /* Restart HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0296 Restart HBA Data: x%x x%x\n", phba->pport->port_state, psli->sli_flag); rc = lpfc_sli4_brdreset(phba); if (rc) { phba->link_state = LPFC_HBA_ERROR; goto hba_down_queue; } spin_lock_irq(&phba->hbalock); phba->pport->stopped = 0; phba->link_state = LPFC_INIT_START; phba->hba_flag = 0; /* Preserve FA-PWWN expectation */ phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC; spin_unlock_irq(&phba->hbalock); memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); psli->stats_start = ktime_get_seconds(); hba_down_queue: lpfc_hba_down_post(phba); lpfc_sli4_queue_destroy(phba); return rc; } /** * lpfc_sli_brdrestart - Wrapper func for restarting hba * @phba: Pointer to HBA context object. * * This routine wraps the actual SLI3 or SLI4 hba restart routine from the * API jump table function pointer from the lpfc_hba struct. **/ int lpfc_sli_brdrestart(struct lpfc_hba *phba) { return phba->lpfc_sli_brdrestart(phba); } /** * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart * @phba: Pointer to HBA context object. * * This function is called after a HBA restart to wait for successful * restart of the HBA. Successful restart of the HBA is indicated by * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 * iteration, the function will restart the HBA again. The function returns * zero if HBA successfully restarted else returns negative error code. **/ int lpfc_sli_chipset_init(struct lpfc_hba *phba) { uint32_t status, i = 0; /* Read the HBA Host Status Register */ if (lpfc_readl(phba->HSregaddr, &status)) return -EIO; /* Check status register to see what current state is */ i = 0; while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { /* Check every 10ms for 10 retries, then every 100ms for 90 * retries, then every 1 sec for 50 retires for a total of * ~60 seconds before reset the board again and check every * 1 sec for 50 retries. The up to 60 seconds before the * board ready is required by the Falcon FIPS zeroization * complete, and any reset the board in between shall cause * restart of zeroization, further delay the board ready. */ if (i++ >= 200) { /* Adapter failed to init, timeout, status reg <status> */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0436 Adapter failed to init, " "timeout, status reg x%x, " "FW Data: A8 x%x AC x%x\n", status, readl(phba->MBslimaddr + 0xa8), readl(phba->MBslimaddr + 0xac)); phba->link_state = LPFC_HBA_ERROR; return -ETIMEDOUT; } /* Check to see if any errors occurred during init */ if (status & HS_FFERM) { /* ERROR: During chipset initialization */ /* Adapter failed to init, chipset, status reg <status> */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0437 Adapter failed to init, " "chipset, status reg x%x, " "FW Data: A8 x%x AC x%x\n", status, readl(phba->MBslimaddr + 0xa8), readl(phba->MBslimaddr + 0xac)); phba->link_state = LPFC_HBA_ERROR; return -EIO; } if (i <= 10) msleep(10); else if (i <= 100) msleep(100); else msleep(1000); if (i == 150) { /* Do post */ phba->pport->port_state = LPFC_VPORT_UNKNOWN; lpfc_sli_brdrestart(phba); } /* Read the HBA Host Status Register */ if (lpfc_readl(phba->HSregaddr, &status)) return -EIO; } /* Check to see if any errors occurred during init */ if (status & HS_FFERM) { /* ERROR: During chipset initialization */ /* Adapter failed to init, chipset, status reg <status> */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0438 Adapter failed to init, chipset, " "status reg x%x, " "FW Data: A8 x%x AC x%x\n", status, readl(phba->MBslimaddr + 0xa8), readl(phba->MBslimaddr + 0xac)); phba->link_state = LPFC_HBA_ERROR; return -EIO; } phba->hba_flag |= HBA_NEEDS_CFG_PORT; /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ /* setup host attn register */ writel(0xffffffff, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ return 0; } /** * lpfc_sli_hbq_count - Get the number of HBQs to be configured * * This function calculates and returns the number of HBQs required to be * configured. **/ int lpfc_sli_hbq_count(void) { return ARRAY_SIZE(lpfc_hbq_defs); } /** * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries * * This function adds the number of hbq entries in every HBQ to get * the total number of hbq entries required for the HBA and returns * the total count. **/ static int lpfc_sli_hbq_entry_count(void) { int hbq_count = lpfc_sli_hbq_count(); int count = 0; int i; for (i = 0; i < hbq_count; ++i) count += lpfc_hbq_defs[i]->entry_count; return count; } /** * lpfc_sli_hbq_size - Calculate memory required for all hbq entries * * This function calculates amount of memory required for all hbq entries * to be configured and returns the total memory required. **/ int lpfc_sli_hbq_size(void) { return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); } /** * lpfc_sli_hbq_setup - configure and initialize HBQs * @phba: Pointer to HBA context object. * * This function is called during the SLI initialization to configure * all the HBQs and post buffers to the HBQ. The caller is not * required to hold any locks. This function will return zero if successful * else it will return negative error code. **/ static int lpfc_sli_hbq_setup(struct lpfc_hba *phba) { int hbq_count = lpfc_sli_hbq_count(); LPFC_MBOXQ_t *pmb; MAILBOX_t *pmbox; uint32_t hbqno; uint32_t hbq_entry_index; /* Get a Mailbox buffer to setup mailbox * commands for HBA initialization */ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) return -ENOMEM; pmbox = &pmb->u.mb; /* Initialize the struct lpfc_sli_hbq structure for each hbq */ phba->link_state = LPFC_INIT_MBX_CMDS; phba->hbq_in_use = 1; hbq_entry_index = 0; for (hbqno = 0; hbqno < hbq_count; ++hbqno) { phba->hbqs[hbqno].next_hbqPutIdx = 0; phba->hbqs[hbqno].hbqPutIdx = 0; phba->hbqs[hbqno].local_hbqGetIdx = 0; phba->hbqs[hbqno].entry_count = lpfc_hbq_defs[hbqno]->entry_count; lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], hbq_entry_index, pmb); hbq_entry_index += phba->hbqs[hbqno].entry_count; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { /* Adapter failed to init, mbxCmd <cmd> CFG_RING, mbxStatus <status>, ring <num> */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, "1805 Adapter failed to init. " "Data: x%x x%x x%x\n", pmbox->mbxCommand, pmbox->mbxStatus, hbqno); phba->link_state = LPFC_HBA_ERROR; mempool_free(pmb, phba->mbox_mem_pool); return -ENXIO; } } phba->hbq_count = hbq_count; mempool_free(pmb, phba->mbox_mem_pool); /* Initially populate or replenish the HBQs */ for (hbqno = 0; hbqno < hbq_count; ++hbqno) lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); return 0; } /** * lpfc_sli4_rb_setup - Initialize and post RBs to HBA * @phba: Pointer to HBA context object. * * This function is called during the SLI initialization to configure * all the HBQs and post buffers to the HBQ. The caller is not * required to hold any locks. This function will return zero if successful * else it will return negative error code. **/ static int lpfc_sli4_rb_setup(struct lpfc_hba *phba) { phba->hbq_in_use = 1; /** * Specific case when the MDS diagnostics is enabled and supported. * The receive buffer count is truncated to manage the incoming * traffic. **/ if (phba->cfg_enable_mds_diags && phba->mds_diags_support) phba->hbqs[LPFC_ELS_HBQ].entry_count = lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1; else phba->hbqs[LPFC_ELS_HBQ].entry_count = lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; phba->hbq_count = 1; lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); /* Initially populate or replenish the HBQs */ return 0; } /** * lpfc_sli_config_port - Issue config port mailbox command * @phba: Pointer to HBA context object. * @sli_mode: sli mode - 2/3 * * This function is called by the sli initialization code path * to issue config_port mailbox command. This function restarts the * HBA firmware and issues a config_port mailbox command to configure * the SLI interface in the sli mode specified by sli_mode * variable. The caller is not required to hold any locks. * The function returns 0 if successful, else returns negative error * code. **/ int lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) { LPFC_MBOXQ_t *pmb; uint32_t resetcount = 0, rc = 0, done = 0; pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } phba->sli_rev = sli_mode; while (resetcount < 2 && !done) { spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; spin_unlock_irq(&phba->hbalock); phba->pport->port_state = LPFC_VPORT_UNKNOWN; lpfc_sli_brdrestart(phba); rc = lpfc_sli_chipset_init(phba); if (rc) break; spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irq(&phba->hbalock); resetcount++; /* Call pre CONFIG_PORT mailbox command initialization. A * value of 0 means the call was successful. Any other * nonzero value is a failure, but if ERESTART is returned, * the driver may reset the HBA and try again. */ rc = lpfc_config_port_prep(phba); if (rc == -ERESTART) { phba->link_state = LPFC_LINK_UNKNOWN; continue; } else if (rc) break; phba->link_state = LPFC_INIT_MBX_CMDS; lpfc_config_port(phba, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED | LPFC_SLI3_CRP_ENABLED | LPFC_SLI3_DSS_ENABLED); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0442 Adapter failed to init, mbxCmd x%x " "CONFIG_PORT, mbxStatus x%x Data: x%x\n", pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); rc = -ENXIO; } else { /* Allow asynchronous mailbox command to go through */ spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; spin_unlock_irq(&phba->hbalock); done = 1; if ((pmb->u.mb.un.varCfgPort.casabt == 1) && (pmb->u.mb.un.varCfgPort.gasabt == 0)) lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "3110 Port did not grant ASABT\n"); } } if (!done) { rc = -EINVAL; goto do_prep_failed; } if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { if (!pmb->u.mb.un.varCfgPort.cMA) { rc = -ENXIO; goto do_prep_failed; } if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; phba->max_vports = (phba->max_vpi > phba->max_vports) ? phba->max_vpi : phba->max_vports; } else phba->max_vpi = 0; if (pmb->u.mb.un.varCfgPort.gerbm) phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; if (pmb->u.mb.un.varCfgPort.gcrp) phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; phba->port_gp = phba->mbox->us.s3_pgp.port; if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { if (pmb->u.mb.un.varCfgPort.gbg == 0) { phba->cfg_enable_bg = 0; phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0443 Adapter did not grant " "BlockGuard\n"); } } } else { phba->hbq_get = NULL; phba->port_gp = phba->mbox->us.s2.port; phba->max_vpi = 0; } do_prep_failed: mempool_free(pmb, phba->mbox_mem_pool); return rc; } /** * lpfc_sli_hba_setup - SLI initialization function * @phba: Pointer to HBA context object. * * This function is the main SLI initialization function. This function * is called by the HBA initialization code, HBA reset code and HBA * error attention handler code. Caller is not required to hold any * locks. This function issues config_port mailbox command to configure * the SLI, setup iocb rings and HBQ rings. In the end the function * calls the config_port_post function to issue init_link mailbox * command and to start the discovery. The function will return zero * if successful, else it will return negative error code. **/ int lpfc_sli_hba_setup(struct lpfc_hba *phba) { uint32_t rc; int i; int longs; /* Enable ISR already does config_port because of config_msi mbx */ if (phba->hba_flag & HBA_NEEDS_CFG_PORT) { rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3); if (rc) return -EIO; phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; } phba->fcp_embed_io = 0; /* SLI4 FC support only */ if (phba->sli_rev == 3) { phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; } else { phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; phba->sli3_options = 0; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0444 Firmware in SLI %x mode. Max_vpi %d\n", phba->sli_rev, phba->max_vpi); rc = lpfc_sli_ring_map(phba); if (rc) goto lpfc_sli_hba_setup_error; /* Initialize VPIs. */ if (phba->sli_rev == LPFC_SLI_REV3) { /* * The VPI bitmask and physical ID array are allocated * and initialized once only - at driver load. A port * reset doesn't need to reinitialize this memory. */ if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), GFP_KERNEL); if (!phba->vpi_bmask) { rc = -ENOMEM; goto lpfc_sli_hba_setup_error; } phba->vpi_ids = kcalloc(phba->max_vpi + 1, sizeof(uint16_t), GFP_KERNEL); if (!phba->vpi_ids) { kfree(phba->vpi_bmask); rc = -ENOMEM; goto lpfc_sli_hba_setup_error; } for (i = 0; i < phba->max_vpi; i++) phba->vpi_ids[i] = i; } } /* Init HBQs */ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { rc = lpfc_sli_hbq_setup(phba); if (rc) goto lpfc_sli_hba_setup_error; } spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_PROCESS_LA; spin_unlock_irq(&phba->hbalock); rc = lpfc_config_port_post(phba); if (rc) goto lpfc_sli_hba_setup_error; return rc; lpfc_sli_hba_setup_error: phba->link_state = LPFC_HBA_ERROR; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0445 Firmware initialization failed\n"); return rc; } /** * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region * @phba: Pointer to HBA context object. * * This function issue a dump mailbox command to read config region * 23 and parse the records in the region and populate driver * data structure. **/ static int lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; struct lpfc_dmabuf *mp; struct lpfc_mqe *mqe; uint32_t data_length; int rc; /* Program the default value of vlan_id and fc_map */ phba->valid_vlan = 0; phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return -ENOMEM; mqe = &mboxq->u.mqe; if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { rc = -ENOMEM; goto out_free_mboxq; } mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):2571 Mailbox cmd x%x Status x%x " "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " "x%x x%x x%x x%x x%x x%x x%x x%x x%x " "CQ: x%x x%x x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, bf_get(lpfc_mqe_command, mqe), bf_get(lpfc_mqe_status, mqe), mqe->un.mb_words[0], mqe->un.mb_words[1], mqe->un.mb_words[2], mqe->un.mb_words[3], mqe->un.mb_words[4], mqe->un.mb_words[5], mqe->un.mb_words[6], mqe->un.mb_words[7], mqe->un.mb_words[8], mqe->un.mb_words[9], mqe->un.mb_words[10], mqe->un.mb_words[11], mqe->un.mb_words[12], mqe->un.mb_words[13], mqe->un.mb_words[14], mqe->un.mb_words[15], mqe->un.mb_words[16], mqe->un.mb_words[50], mboxq->mcqe.word0, mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, mboxq->mcqe.trailer); if (rc) { rc = -EIO; goto out_free_mboxq; } data_length = mqe->un.mb_words[5]; if (data_length > DMP_RGN23_SIZE) { rc = -EIO; goto out_free_mboxq; } lpfc_parse_fcoe_conf(phba, mp->virt, data_length); rc = 0; out_free_mboxq: lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); return rc; } /** * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to the LPFC_MBOXQ_t structure. * @vpd: pointer to the memory to hold resulting port vpd data. * @vpd_size: On input, the number of bytes allocated to @vpd. * On output, the number of data bytes in @vpd. * * This routine executes a READ_REV SLI4 mailbox command. In * addition, this routine gets the port vpd data. * * Return codes * 0 - successful * -ENOMEM - could not allocated memory. **/ static int lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, uint8_t *vpd, uint32_t *vpd_size) { int rc = 0; uint32_t dma_size; struct lpfc_dmabuf *dmabuf; struct lpfc_mqe *mqe; dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) return -ENOMEM; /* * Get a DMA buffer for the vpd data resulting from the READ_REV * mailbox command. */ dma_size = *vpd_size; dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); return -ENOMEM; } /* * The SLI4 implementation of READ_REV conflicts at word1, * bits 31:16 and SLI4 adds vpd functionality not present * in SLI3. This code corrects the conflicts. */ lpfc_read_rev(phba, mboxq); mqe = &mboxq->u.mqe; mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); mqe->un.read_rev.word1 &= 0x0000FFFF; bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc) { dma_free_coherent(&phba->pcidev->dev, dma_size, dmabuf->virt, dmabuf->phys); kfree(dmabuf); return -EIO; } /* * The available vpd length cannot be bigger than the * DMA buffer passed to the port. Catch the less than * case and update the caller's size. */ if (mqe->un.read_rev.avail_vpd_len < *vpd_size) *vpd_size = mqe->un.read_rev.avail_vpd_len; memcpy(vpd, dmabuf->virt, *vpd_size); dma_free_coherent(&phba->pcidev->dev, dma_size, dmabuf->virt, dmabuf->phys); kfree(dmabuf); return 0; } /** * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes * @phba: pointer to lpfc hba data structure. * * This routine retrieves SLI4 device physical port name this PCI function * is attached to. * * Return codes * 0 - successful * otherwise - failed to retrieve controller attributes **/ static int lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; struct lpfc_controller_attribute *cntl_attr; void *virtaddr = NULL; uint32_t alloclen, reqlen; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; int rc; mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return -ENOMEM; /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */ reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, LPFC_SLI4_MBX_NEMBED); if (alloclen < reqlen) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3084 Allocated DMA memory size (%d) is " "less than the requested DMA memory size " "(%d)\n", alloclen, reqlen); rc = -ENOMEM; goto out_free_mboxq; } rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); virtaddr = mboxq->sge_array->addr[0]; mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; shdr = &mbx_cntl_attr->cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3085 Mailbox x%x (x%x/x%x) failed, " "rc:x%x, status:x%x, add_status:x%x\n", bf_get(lpfc_mqe_command, &mboxq->u.mqe), lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), rc, shdr_status, shdr_add_status); rc = -ENXIO; goto out_free_mboxq; } cntl_attr = &mbx_cntl_attr->cntl_attr; phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; phba->sli4_hba.lnk_info.lnk_tp = bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); phba->sli4_hba.lnk_info.lnk_no = bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, sizeof(phba->BIOSVersion)); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " "flash_id: x%02x, asic_rev: x%02x\n", phba->sli4_hba.lnk_info.lnk_tp, phba->sli4_hba.lnk_info.lnk_no, phba->BIOSVersion, phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev); out_free_mboxq: if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) lpfc_sli4_mbox_cmd_free(phba, mboxq); else mempool_free(mboxq, phba->mbox_mem_pool); return rc; } /** * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name * @phba: pointer to lpfc hba data structure. * * This routine retrieves SLI4 device physical port name this PCI function * is attached to. * * Return codes * 0 - successful * otherwise - failed to retrieve physical port name **/ static int lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; struct lpfc_mbx_get_port_name *get_port_name; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; char cport_name = 0; int rc; /* We assume nothing at this point */ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return -ENOMEM; /* obtain link type and link number via READ_CONFIG */ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; lpfc_sli4_read_config(phba); if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) goto retrieve_ppname; /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ rc = lpfc_sli4_get_ctl_attr(phba); if (rc) goto out_free_mboxq; retrieve_ppname: lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_GET_PORT_NAME, sizeof(struct lpfc_mbx_get_port_name) - sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); get_port_name = &mboxq->u.mqe.un.get_port_name; shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, phba->sli4_hba.lnk_info.lnk_tp); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3087 Mailbox x%x (x%x/x%x) failed: " "rc:x%x, status:x%x, add_status:x%x\n", bf_get(lpfc_mqe_command, &mboxq->u.mqe), lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), rc, shdr_status, shdr_add_status); rc = -ENXIO; goto out_free_mboxq; } switch (phba->sli4_hba.lnk_info.lnk_no) { case LPFC_LINK_NUMBER_0: cport_name = bf_get(lpfc_mbx_get_port_name_name0, &get_port_name->u.response); phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; break; case LPFC_LINK_NUMBER_1: cport_name = bf_get(lpfc_mbx_get_port_name_name1, &get_port_name->u.response); phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; break; case LPFC_LINK_NUMBER_2: cport_name = bf_get(lpfc_mbx_get_port_name_name2, &get_port_name->u.response); phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; break; case LPFC_LINK_NUMBER_3: cport_name = bf_get(lpfc_mbx_get_port_name_name3, &get_port_name->u.response); phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; break; default: break; } if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { phba->Port[0] = cport_name; phba->Port[1] = '\0'; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3091 SLI get port name: %s\n", phba->Port); } out_free_mboxq: if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) lpfc_sli4_mbox_cmd_free(phba, mboxq); else mempool_free(mboxq, phba->mbox_mem_pool); return rc; } /** * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues * @phba: pointer to lpfc hba data structure. * * This routine is called to explicitly arm the SLI4 device's completion and * event queues **/ static void lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) { int qidx; struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; struct lpfc_sli4_hdw_queue *qp; struct lpfc_queue *eq; sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); if (sli4_hba->nvmels_cq) sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, LPFC_QUEUE_REARM); if (sli4_hba->hdwq) { /* Loop thru all Hardware Queues */ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { qp = &sli4_hba->hdwq[qidx]; /* ARM the corresponding CQ */ sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0, LPFC_QUEUE_REARM); } /* Loop thru all IRQ vectors */ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { eq = sli4_hba->hba_eq_hdl[qidx].eq; /* ARM the corresponding EQ */ sli4_hba->sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM); } } if (phba->nvmet_support) { for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmet_cqset[qidx], 0, LPFC_QUEUE_REARM); } } } /** * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. * @phba: Pointer to HBA context object. * @type: The resource extent type. * @extnt_count: buffer to hold port available extent count. * @extnt_size: buffer to hold element count per extent. * * This function calls the port and retrievs the number of available * extents and their size for a particular extent type. * * Returns: 0 if successful. Nonzero otherwise. **/ int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, uint16_t *extnt_count, uint16_t *extnt_size) { int rc = 0; uint32_t length; uint32_t mbox_tmo; struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; LPFC_MBOXQ_t *mbox; *extnt_count = 0; *extnt_size = 0; mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; /* Find out how many extents are available for this resource type */ length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, length, LPFC_SLI4_MBX_EMBED); /* Send an extents count of 0 - the GET doesn't use it. */ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, LPFC_SLI4_MBX_EMBED); if (unlikely(rc)) { rc = -EIO; goto err_exit; } if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } if (unlikely(rc)) { rc = -EIO; goto err_exit; } rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; if (bf_get(lpfc_mbox_hdr_status, &rsrc_info->header.cfg_shdr.response)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2930 Failed to get resource extents " "Status 0x%x Add'l Status 0x%x\n", bf_get(lpfc_mbox_hdr_status, &rsrc_info->header.cfg_shdr.response), bf_get(lpfc_mbox_hdr_add_status, &rsrc_info->header.cfg_shdr.response)); rc = -EIO; goto err_exit; } *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, &rsrc_info->u.rsp); *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, &rsrc_info->u.rsp); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3162 Retrieved extents type-%d from port: count:%d, " "size:%d\n", type, *extnt_count, *extnt_size); err_exit: mempool_free(mbox, phba->mbox_mem_pool); return rc; } /** * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. * @phba: Pointer to HBA context object. * @type: The extent type to check. * * This function reads the current available extents from the port and checks * if the extent count or extent size has changed since the last access. * Callers use this routine post port reset to understand if there is a * extent reprovisioning requirement. * * Returns: * -Error: error indicates problem. * 1: Extent count or size has changed. * 0: No changes. **/ static int lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) { uint16_t curr_ext_cnt, rsrc_ext_cnt; uint16_t size_diff, rsrc_ext_size; int rc = 0; struct lpfc_rsrc_blks *rsrc_entry; struct list_head *rsrc_blk_list = NULL; size_diff = 0; curr_ext_cnt = 0; rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, &rsrc_ext_cnt, &rsrc_ext_size); if (unlikely(rc)) return -EIO; switch (type) { case LPFC_RSC_TYPE_FCOE_RPI: rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; break; case LPFC_RSC_TYPE_FCOE_VPI: rsrc_blk_list = &phba->lpfc_vpi_blk_list; break; case LPFC_RSC_TYPE_FCOE_XRI: rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; break; case LPFC_RSC_TYPE_FCOE_VFI: rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; break; default: break; } list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { curr_ext_cnt++; if (rsrc_entry->rsrc_size != rsrc_ext_size) size_diff++; } if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) rc = 1; return rc; } /** * lpfc_sli4_cfg_post_extnts - * @phba: Pointer to HBA context object. * @extnt_cnt: number of available extents. * @type: the extent type (rpi, xri, vfi, vpi). * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation. * @mbox: pointer to the caller's allocated mailbox structure. * * This function executes the extents allocation request. It also * takes care of the amount of memory needed to allocate or get the * allocated extents. It is the caller's responsibility to evaluate * the response. * * Returns: * -Error: Error value describes the condition found. * 0: if successful **/ static int lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) { int rc = 0; uint32_t req_len; uint32_t emb_len; uint32_t alloc_len, mbox_tmo; /* Calculate the total requested length of the dma memory */ req_len = extnt_cnt * sizeof(uint16_t); /* * Calculate the size of an embedded mailbox. The uint32_t * accounts for extents-specific word. */ emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - sizeof(uint32_t); /* * Presume the allocation and response will fit into an embedded * mailbox. If not true, reconfigure to a non-embedded mailbox. */ *emb = LPFC_SLI4_MBX_EMBED; if (req_len > emb_len) { req_len = extnt_cnt * sizeof(uint16_t) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); *emb = LPFC_SLI4_MBX_NEMBED; } alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, req_len, *emb); if (alloc_len < req_len) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2982 Allocated DMA memory size (x%x) is " "less than the requested DMA memory " "size (x%x)\n", alloc_len, req_len); return -ENOMEM; } rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); if (unlikely(rc)) return -EIO; if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } if (unlikely(rc)) rc = -EIO; return rc; } /** * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. * @phba: Pointer to HBA context object. * @type: The resource extent type to allocate. * * This function allocates the number of elements for the specified * resource type. **/ static int lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) { bool emb = false; uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; uint16_t rsrc_id, rsrc_start, j, k; uint16_t *ids; int i, rc; unsigned long longs; unsigned long *bmask; struct lpfc_rsrc_blks *rsrc_blks; LPFC_MBOXQ_t *mbox; uint32_t length; struct lpfc_id_range *id_array = NULL; void *virtaddr = NULL; struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; struct list_head *ext_blk_list; rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, &rsrc_cnt, &rsrc_size); if (unlikely(rc)) return -EIO; if ((rsrc_cnt == 0) || (rsrc_size == 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3009 No available Resource Extents " "for resource type 0x%x: Count: 0x%x, " "Size 0x%x\n", type, rsrc_cnt, rsrc_size); return -ENOMEM; } lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, "2903 Post resource extents type-0x%x: " "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); if (unlikely(rc)) { rc = -EIO; goto err_exit; } /* * Figure out where the response is located. Then get local pointers * to the response data. The port does not guarantee to respond to * all extents counts request so update the local variable with the * allocated count from the port. */ if (emb == LPFC_SLI4_MBX_EMBED) { rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; id_array = &rsrc_ext->u.rsp.id[0]; rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); } else { virtaddr = mbox->sge_array->addr[0]; n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); id_array = &n_rsrc->id; } longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; rsrc_id_cnt = rsrc_cnt * rsrc_size; /* * Based on the resource size and count, correct the base and max * resource values. */ length = sizeof(struct lpfc_rsrc_blks); switch (type) { case LPFC_RSC_TYPE_FCOE_RPI: phba->sli4_hba.rpi_bmask = kcalloc(longs, sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->sli4_hba.rpi_bmask)) { rc = -ENOMEM; goto err_exit; } phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->sli4_hba.rpi_ids)) { kfree(phba->sli4_hba.rpi_bmask); rc = -ENOMEM; goto err_exit; } /* * The next_rpi was initialized with the maximum available * count but the port may allocate a smaller number. Catch * that case and update the next_rpi. */ phba->sli4_hba.next_rpi = rsrc_id_cnt; /* Initialize local ptrs for common extent processing later. */ bmask = phba->sli4_hba.rpi_bmask; ids = phba->sli4_hba.rpi_ids; ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; break; case LPFC_RSC_TYPE_FCOE_VPI: phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->vpi_bmask)) { rc = -ENOMEM; goto err_exit; } phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->vpi_ids)) { kfree(phba->vpi_bmask); rc = -ENOMEM; goto err_exit; } /* Initialize local ptrs for common extent processing later. */ bmask = phba->vpi_bmask; ids = phba->vpi_ids; ext_blk_list = &phba->lpfc_vpi_blk_list; break; case LPFC_RSC_TYPE_FCOE_XRI: phba->sli4_hba.xri_bmask = kcalloc(longs, sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->sli4_hba.xri_bmask)) { rc = -ENOMEM; goto err_exit; } phba->sli4_hba.max_cfg_param.xri_used = 0; phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->sli4_hba.xri_ids)) { kfree(phba->sli4_hba.xri_bmask); rc = -ENOMEM; goto err_exit; } /* Initialize local ptrs for common extent processing later. */ bmask = phba->sli4_hba.xri_bmask; ids = phba->sli4_hba.xri_ids; ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; break; case LPFC_RSC_TYPE_FCOE_VFI: phba->sli4_hba.vfi_bmask = kcalloc(longs, sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->sli4_hba.vfi_bmask)) { rc = -ENOMEM; goto err_exit; } phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->sli4_hba.vfi_ids)) { kfree(phba->sli4_hba.vfi_bmask); rc = -ENOMEM; goto err_exit; } /* Initialize local ptrs for common extent processing later. */ bmask = phba->sli4_hba.vfi_bmask; ids = phba->sli4_hba.vfi_ids; ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; break; default: /* Unsupported Opcode. Fail call. */ id_array = NULL; bmask = NULL; ids = NULL; ext_blk_list = NULL; goto err_exit; } /* * Complete initializing the extent configuration with the * allocated ids assigned to this function. The bitmask serves * as an index into the array and manages the available ids. The * array just stores the ids communicated to the port via the wqes. */ for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { if ((i % 2) == 0) rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, &id_array[k]); else rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, &id_array[k]); rsrc_blks = kzalloc(length, GFP_KERNEL); if (unlikely(!rsrc_blks)) { rc = -ENOMEM; kfree(bmask); kfree(ids); goto err_exit; } rsrc_blks->rsrc_start = rsrc_id; rsrc_blks->rsrc_size = rsrc_size; list_add_tail(&rsrc_blks->list, ext_blk_list); rsrc_start = rsrc_id; if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { phba->sli4_hba.io_xri_start = rsrc_start + lpfc_sli4_get_iocb_cnt(phba); } while (rsrc_id < (rsrc_start + rsrc_size)) { ids[j] = rsrc_id; rsrc_id++; j++; } /* Entire word processed. Get next word.*/ if ((i % 2) == 1) k++; } err_exit: lpfc_sli4_mbox_cmd_free(phba, mbox); return rc; } /** * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. * @phba: Pointer to HBA context object. * @type: the extent's type. * * This function deallocates all extents of a particular resource type. * SLI4 does not allow for deallocating a particular extent range. It * is the caller's responsibility to release all kernel memory resources. **/ static int lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) { int rc; uint32_t length, mbox_tmo = 0; LPFC_MBOXQ_t *mbox; struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; /* * This function sends an embedded mailbox because it only sends the * the resource type. All extents of this type are released by the * port. */ length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, length, LPFC_SLI4_MBX_EMBED); /* Send an extents count of 0 - the dealloc doesn't use it. */ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, LPFC_SLI4_MBX_EMBED); if (unlikely(rc)) { rc = -EIO; goto out_free_mbox; } if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } if (unlikely(rc)) { rc = -EIO; goto out_free_mbox; } dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; if (bf_get(lpfc_mbox_hdr_status, &dealloc_rsrc->header.cfg_shdr.response)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2919 Failed to release resource extents " "for type %d - Status 0x%x Add'l Status 0x%x. " "Resource memory not released.\n", type, bf_get(lpfc_mbox_hdr_status, &dealloc_rsrc->header.cfg_shdr.response), bf_get(lpfc_mbox_hdr_add_status, &dealloc_rsrc->header.cfg_shdr.response)); rc = -EIO; goto out_free_mbox; } /* Release kernel memory resources for the specific type. */ switch (type) { case LPFC_RSC_TYPE_FCOE_VPI: kfree(phba->vpi_bmask); kfree(phba->vpi_ids); bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, &phba->lpfc_vpi_blk_list, list) { list_del_init(&rsrc_blk->list); kfree(rsrc_blk); } phba->sli4_hba.max_cfg_param.vpi_used = 0; break; case LPFC_RSC_TYPE_FCOE_XRI: kfree(phba->sli4_hba.xri_bmask); kfree(phba->sli4_hba.xri_ids); list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, &phba->sli4_hba.lpfc_xri_blk_list, list) { list_del_init(&rsrc_blk->list); kfree(rsrc_blk); } break; case LPFC_RSC_TYPE_FCOE_VFI: kfree(phba->sli4_hba.vfi_bmask); kfree(phba->sli4_hba.vfi_ids); bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, &phba->sli4_hba.lpfc_vfi_blk_list, list) { list_del_init(&rsrc_blk->list); kfree(rsrc_blk); } break; case LPFC_RSC_TYPE_FCOE_RPI: /* RPI bitmask and physical id array are cleaned up earlier. */ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, &phba->sli4_hba.lpfc_rpi_blk_list, list) { list_del_init(&rsrc_blk->list); kfree(rsrc_blk); } break; default: break; } bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); out_free_mbox: mempool_free(mbox, phba->mbox_mem_pool); return rc; } static void lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, uint32_t feature) { uint32_t len; u32 sig_freq = 0; len = sizeof(struct lpfc_mbx_set_feature) - sizeof(struct lpfc_sli4_cfg_mhdr); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_SET_FEATURES, len, LPFC_SLI4_MBX_EMBED); switch (feature) { case LPFC_SET_UE_RECOVERY: bf_set(lpfc_mbx_set_feature_UER, &mbox->u.mqe.un.set_feature, 1); mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; mbox->u.mqe.un.set_feature.param_len = 8; break; case LPFC_SET_MDS_DIAGS: bf_set(lpfc_mbx_set_feature_mds, &mbox->u.mqe.un.set_feature, 1); bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, &mbox->u.mqe.un.set_feature, 1); mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; mbox->u.mqe.un.set_feature.param_len = 8; break; case LPFC_SET_CGN_SIGNAL: if (phba->cmf_active_mode == LPFC_CFG_OFF) sig_freq = 0; else sig_freq = phba->cgn_sig_freq; if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { bf_set(lpfc_mbx_set_feature_CGN_alarm_freq, &mbox->u.mqe.un.set_feature, sig_freq); bf_set(lpfc_mbx_set_feature_CGN_warn_freq, &mbox->u.mqe.un.set_feature, sig_freq); } if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY) bf_set(lpfc_mbx_set_feature_CGN_warn_freq, &mbox->u.mqe.un.set_feature, sig_freq); if (phba->cmf_active_mode == LPFC_CFG_OFF || phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED) sig_freq = 0; else sig_freq = lpfc_acqe_cgn_frequency; bf_set(lpfc_mbx_set_feature_CGN_acqe_freq, &mbox->u.mqe.un.set_feature, sig_freq); mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL; mbox->u.mqe.un.set_feature.param_len = 12; break; case LPFC_SET_DUAL_DUMP: bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP); bf_set(lpfc_mbx_set_feature_ddquery, &mbox->u.mqe.un.set_feature, 0); mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP; mbox->u.mqe.un.set_feature.param_len = 4; break; case LPFC_SET_ENABLE_MI: mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI; mbox->u.mqe.un.set_feature.param_len = 4; bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature, phba->pport->cfg_lun_queue_depth); bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature, phba->sli4_hba.pc_sli4_params.mi_ver); break; case LPFC_SET_LD_SIGNAL: mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL; mbox->u.mqe.un.set_feature.param_len = 16; bf_set(lpfc_mbx_set_feature_lds_qry, &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP); break; case LPFC_SET_ENABLE_CMF: mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF; mbox->u.mqe.un.set_feature.param_len = 4; bf_set(lpfc_mbx_set_feature_cmf, &mbox->u.mqe.un.set_feature, 1); break; } return; } /** * lpfc_ras_stop_fwlog: Disable FW logging by the adapter * @phba: Pointer to HBA context object. * * Disable FW logging into host memory on the adapter. To * be done before reading logs from the host memory. **/ void lpfc_ras_stop_fwlog(struct lpfc_hba *phba) { struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; spin_lock_irq(&phba->hbalock); ras_fwlog->state = INACTIVE; spin_unlock_irq(&phba->hbalock); /* Disable FW logging to host memory */ writel(LPFC_CTL_PDEV_CTL_DDL_RAS, phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); /* Wait 10ms for firmware to stop using DMA buffer */ usleep_range(10 * 1000, 20 * 1000); } /** * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. * @phba: Pointer to HBA context object. * * This function is called to free memory allocated for RAS FW logging * support in the driver. **/ void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) { struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; struct lpfc_dmabuf *dmabuf, *next; if (!list_empty(&ras_fwlog->fwlog_buff_list)) { list_for_each_entry_safe(dmabuf, next, &ras_fwlog->fwlog_buff_list, list) { list_del(&dmabuf->list); dma_free_coherent(&phba->pcidev->dev, LPFC_RAS_MAX_ENTRY_SIZE, dmabuf->virt, dmabuf->phys); kfree(dmabuf); } } if (ras_fwlog->lwpd.virt) { dma_free_coherent(&phba->pcidev->dev, sizeof(uint32_t) * 2, ras_fwlog->lwpd.virt, ras_fwlog->lwpd.phys); ras_fwlog->lwpd.virt = NULL; } spin_lock_irq(&phba->hbalock); ras_fwlog->state = INACTIVE; spin_unlock_irq(&phba->hbalock); } /** * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support * @phba: Pointer to HBA context object. * @fwlog_buff_count: Count of buffers to be created. * * This routine DMA memory for Log Write Position Data[LPWD] and buffer * to update FW log is posted to the adapter. * Buffer count is calculated based on module param ras_fwlog_buffsize * Size of each buffer posted to FW is 64K. **/ static int lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, uint32_t fwlog_buff_count) { struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; struct lpfc_dmabuf *dmabuf; int rc = 0, i = 0; /* Initialize List */ INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list); /* Allocate memory for the LWPD */ ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev, sizeof(uint32_t) * 2, &ras_fwlog->lwpd.phys, GFP_KERNEL); if (!ras_fwlog->lwpd.virt) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6185 LWPD Memory Alloc Failed\n"); return -ENOMEM; } ras_fwlog->fw_buffcount = fwlog_buff_count; for (i = 0; i < ras_fwlog->fw_buffcount; i++) { dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) { rc = -ENOMEM; lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "6186 Memory Alloc failed FW logging"); goto free_mem; } dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, LPFC_RAS_MAX_ENTRY_SIZE, &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); rc = -ENOMEM; lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "6187 DMA Alloc Failed FW logging"); goto free_mem; } dmabuf->buffer_tag = i; list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); } free_mem: if (rc) lpfc_sli4_ras_dma_free(phba); return rc; } /** * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * Completion handler for driver's RAS MBX command to the device. **/ static void lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb; union lpfc_sli4_cfg_shdr *shdr; uint32_t shdr_status, shdr_add_status; struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; mb = &pmb->u.mb; shdr = (union lpfc_sli4_cfg_shdr *) &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6188 FW LOG mailbox " "completed with status x%x add_status x%x," " mbx status x%x\n", shdr_status, shdr_add_status, mb->mbxStatus); ras_fwlog->ras_hwsupport = false; goto disable_ras; } spin_lock_irq(&phba->hbalock); ras_fwlog->state = ACTIVE; spin_unlock_irq(&phba->hbalock); mempool_free(pmb, phba->mbox_mem_pool); return; disable_ras: /* Free RAS DMA memory */ lpfc_sli4_ras_dma_free(phba); mempool_free(pmb, phba->mbox_mem_pool); } /** * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command * @phba: pointer to lpfc hba data structure. * @fwlog_level: Logging verbosity level. * @fwlog_enable: Enable/Disable logging. * * Initialize memory and post mailbox command to enable FW logging in host * memory. **/ int lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t fwlog_level, uint32_t fwlog_enable) { struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; struct lpfc_dmabuf *dmabuf; LPFC_MBOXQ_t *mbox; uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; int rc = 0; spin_lock_irq(&phba->hbalock); ras_fwlog->state = INACTIVE; spin_unlock_irq(&phba->hbalock); fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize); fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); /* * If re-enabling FW logging support use earlier allocated * DMA buffers while posting MBX command. **/ if (!ras_fwlog->lwpd.virt) { rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "6189 FW Log Memory Allocation Failed"); return rc; } } /* Setup Mailbox command */ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6190 RAS MBX Alloc Failed"); rc = -ENOMEM; goto mem_free; } ras_fwlog->fw_loglevel = fwlog_level; len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, len, LPFC_SLI4_MBX_EMBED); mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, fwlog_enable); bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, ras_fwlog->fw_loglevel); bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, ras_fwlog->fw_buffcount); bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); /* Update DMA buffer address */ list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } /* Update LPWD address */ mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); spin_lock_irq(&phba->hbalock); ras_fwlog->state = REG_INPROGRESS; spin_unlock_irq(&phba->hbalock); mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6191 FW-Log Mailbox failed. " "status %d mbxStatus : x%x", rc, bf_get(lpfc_mqe_status, &mbox->u.mqe)); mempool_free(mbox, phba->mbox_mem_pool); rc = -EIO; goto mem_free; } else rc = 0; mem_free: if (rc) lpfc_sli4_ras_dma_free(phba); return rc; } /** * lpfc_sli4_ras_setup - Check if RAS supported on the adapter * @phba: Pointer to HBA context object. * * Check if RAS is supported on the adapter and initialize it. **/ void lpfc_sli4_ras_setup(struct lpfc_hba *phba) { /* Check RAS FW Log needs to be enabled or not */ if (lpfc_check_fwlog_support(phba)) return; lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, LPFC_RAS_ENABLE_LOGGING); } /** * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. * @phba: Pointer to HBA context object. * * This function allocates all SLI4 resource identifiers. **/ int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) { int i, rc, error = 0; uint16_t count, base; unsigned long longs; if (!phba->sli4_hba.rpi_hdrs_in_use) phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; if (phba->sli4_hba.extents_in_use) { /* * The port supports resource extents. The XRI, VPI, VFI, RPI * resource extent count must be read and allocated before * provisioning the resource id arrays. */ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == LPFC_IDX_RSRC_RDY) { /* * Extent-based resources are set - the driver could * be in a port reset. Figure out if any corrective * actions need to be taken. */ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI); if (rc != 0) error++; rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI); if (rc != 0) error++; rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI); if (rc != 0) error++; rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI); if (rc != 0) error++; /* * It's possible that the number of resources * provided to this port instance changed between * resets. Detect this condition and reallocate * resources. Otherwise, there is no action. */ if (error) { lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT, "2931 Detected extent resource " "change. Reallocating all " "extents.\n"); rc = lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); rc = lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); rc = lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); rc = lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); } else return 0; } rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); if (unlikely(rc)) goto err_exit; rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); if (unlikely(rc)) goto err_exit; rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); if (unlikely(rc)) goto err_exit; rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); if (unlikely(rc)) goto err_exit; bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, LPFC_IDX_RSRC_RDY); return rc; } else { /* * The port does not support resource extents. The XRI, VPI, * VFI, RPI resource ids were determined from READ_CONFIG. * Just allocate the bitmasks and provision the resource id * arrays. If a port reset is active, the resources don't * need any action - just exit. */ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == LPFC_IDX_RSRC_RDY) { lpfc_sli4_dealloc_resource_identifiers(phba); lpfc_sli4_remove_rpis(phba); } /* RPIs. */ count = phba->sli4_hba.max_cfg_param.max_rpi; if (count <= 0) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3279 Invalid provisioning of " "rpi:%d\n", count); rc = -EINVAL; goto err_exit; } base = phba->sli4_hba.max_cfg_param.rpi_base; longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->sli4_hba.rpi_bmask = kcalloc(longs, sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->sli4_hba.rpi_bmask)) { rc = -ENOMEM; goto err_exit; } phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->sli4_hba.rpi_ids)) { rc = -ENOMEM; goto free_rpi_bmask; } for (i = 0; i < count; i++) phba->sli4_hba.rpi_ids[i] = base + i; /* VPIs. */ count = phba->sli4_hba.max_cfg_param.max_vpi; if (count <= 0) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3280 Invalid provisioning of " "vpi:%d\n", count); rc = -EINVAL; goto free_rpi_ids; } base = phba->sli4_hba.max_cfg_param.vpi_base; longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->vpi_bmask)) { rc = -ENOMEM; goto free_rpi_ids; } phba->vpi_ids = kcalloc(count, sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->vpi_ids)) { rc = -ENOMEM; goto free_vpi_bmask; } for (i = 0; i < count; i++) phba->vpi_ids[i] = base + i; /* XRIs. */ count = phba->sli4_hba.max_cfg_param.max_xri; if (count <= 0) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3281 Invalid provisioning of " "xri:%d\n", count); rc = -EINVAL; goto free_vpi_ids; } base = phba->sli4_hba.max_cfg_param.xri_base; longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->sli4_hba.xri_bmask = kcalloc(longs, sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->sli4_hba.xri_bmask)) { rc = -ENOMEM; goto free_vpi_ids; } phba->sli4_hba.max_cfg_param.xri_used = 0; phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->sli4_hba.xri_ids)) { rc = -ENOMEM; goto free_xri_bmask; } for (i = 0; i < count; i++) phba->sli4_hba.xri_ids[i] = base + i; /* VFIs. */ count = phba->sli4_hba.max_cfg_param.max_vfi; if (count <= 0) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3282 Invalid provisioning of " "vfi:%d\n", count); rc = -EINVAL; goto free_xri_ids; } base = phba->sli4_hba.max_cfg_param.vfi_base; longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->sli4_hba.vfi_bmask = kcalloc(longs, sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->sli4_hba.vfi_bmask)) { rc = -ENOMEM; goto free_xri_ids; } phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->sli4_hba.vfi_ids)) { rc = -ENOMEM; goto free_vfi_bmask; } for (i = 0; i < count; i++) phba->sli4_hba.vfi_ids[i] = base + i; /* * Mark all resources ready. An HBA reset doesn't need * to reset the initialization. */ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, LPFC_IDX_RSRC_RDY); return 0; } free_vfi_bmask: kfree(phba->sli4_hba.vfi_bmask); phba->sli4_hba.vfi_bmask = NULL; free_xri_ids: kfree(phba->sli4_hba.xri_ids); phba->sli4_hba.xri_ids = NULL; free_xri_bmask: kfree(phba->sli4_hba.xri_bmask); phba->sli4_hba.xri_bmask = NULL; free_vpi_ids: kfree(phba->vpi_ids); phba->vpi_ids = NULL; free_vpi_bmask: kfree(phba->vpi_bmask); phba->vpi_bmask = NULL; free_rpi_ids: kfree(phba->sli4_hba.rpi_ids); phba->sli4_hba.rpi_ids = NULL; free_rpi_bmask: kfree(phba->sli4_hba.rpi_bmask); phba->sli4_hba.rpi_bmask = NULL; err_exit: return rc; } /** * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. * @phba: Pointer to HBA context object. * * This function allocates the number of elements for the specified * resource type. **/ int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) { if (phba->sli4_hba.extents_in_use) { lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); } else { kfree(phba->vpi_bmask); phba->sli4_hba.max_cfg_param.vpi_used = 0; kfree(phba->vpi_ids); bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); kfree(phba->sli4_hba.xri_bmask); kfree(phba->sli4_hba.xri_ids); kfree(phba->sli4_hba.vfi_bmask); kfree(phba->sli4_hba.vfi_ids); bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); } return 0; } /** * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. * @phba: Pointer to HBA context object. * @type: The resource extent type. * @extnt_cnt: buffer to hold port extent count response * @extnt_size: buffer to hold port extent size response. * * This function calls the port to read the host allocated extents * for a particular type. **/ int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, uint16_t *extnt_cnt, uint16_t *extnt_size) { bool emb; int rc = 0; uint16_t curr_blks = 0; uint32_t req_len, emb_len; uint32_t alloc_len, mbox_tmo; struct list_head *blk_list_head; struct lpfc_rsrc_blks *rsrc_blk; LPFC_MBOXQ_t *mbox; void *virtaddr = NULL; struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; union lpfc_sli4_cfg_shdr *shdr; switch (type) { case LPFC_RSC_TYPE_FCOE_VPI: blk_list_head = &phba->lpfc_vpi_blk_list; break; case LPFC_RSC_TYPE_FCOE_XRI: blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; break; case LPFC_RSC_TYPE_FCOE_VFI: blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; break; case LPFC_RSC_TYPE_FCOE_RPI: blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; break; default: return -EIO; } /* Count the number of extents currently allocatd for this type. */ list_for_each_entry(rsrc_blk, blk_list_head, list) { if (curr_blks == 0) { /* * The GET_ALLOCATED mailbox does not return the size, * just the count. The size should be just the size * stored in the current allocated block and all sizes * for an extent type are the same so set the return * value now. */ *extnt_size = rsrc_blk->rsrc_size; } curr_blks++; } /* * Calculate the size of an embedded mailbox. The uint32_t * accounts for extents-specific word. */ emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - sizeof(uint32_t); /* * Presume the allocation and response will fit into an embedded * mailbox. If not true, reconfigure to a non-embedded mailbox. */ emb = LPFC_SLI4_MBX_EMBED; req_len = emb_len; if (req_len > emb_len) { req_len = curr_blks * sizeof(uint16_t) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); emb = LPFC_SLI4_MBX_NEMBED; } mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, req_len, emb); if (alloc_len < req_len) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2983 Allocated DMA memory size (x%x) is " "less than the requested DMA memory " "size (x%x)\n", alloc_len, req_len); rc = -ENOMEM; goto err_exit; } rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); if (unlikely(rc)) { rc = -EIO; goto err_exit; } if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } if (unlikely(rc)) { rc = -EIO; goto err_exit; } /* * Figure out where the response is located. Then get local pointers * to the response data. The port does not guarantee to respond to * all extents counts request so update the local variable with the * allocated count from the port. */ if (emb == LPFC_SLI4_MBX_EMBED) { rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; shdr = &rsrc_ext->header.cfg_shdr; *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); } else { virtaddr = mbox->sge_array->addr[0]; n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; shdr = &n_rsrc->cfg_shdr; *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); } if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2984 Failed to read allocated resources " "for type %d - Status 0x%x Add'l Status 0x%x.\n", type, bf_get(lpfc_mbox_hdr_status, &shdr->response), bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); rc = -EIO; goto err_exit; } err_exit: lpfc_sli4_mbox_cmd_free(phba, mbox); return rc; } /** * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block * @phba: pointer to lpfc hba data structure. * @sgl_list: linked link of sgl buffers to post * @cnt: number of linked list buffers * * This routine walks the list of buffers that have been allocated and * repost them to the port by using SGL block post. This is needed after a * pci_function_reset/warm_start or start. It attempts to construct blocks * of buffer sgls which contains contiguous xris and uses the non-embedded * SGL block post mailbox commands to post them to the port. For single * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post * mailbox command for posting. * * Returns: 0 = success, non-zero failure. **/ static int lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, struct list_head *sgl_list, int cnt) { struct lpfc_sglq *sglq_entry = NULL; struct lpfc_sglq *sglq_entry_next = NULL; struct lpfc_sglq *sglq_entry_first = NULL; int status, total_cnt; int post_cnt = 0, num_posted = 0, block_cnt = 0; int last_xritag = NO_XRI; LIST_HEAD(prep_sgl_list); LIST_HEAD(blck_sgl_list); LIST_HEAD(allc_sgl_list); LIST_HEAD(post_sgl_list); LIST_HEAD(free_sgl_list); spin_lock_irq(&phba->hbalock); spin_lock(&phba->sli4_hba.sgl_list_lock); list_splice_init(sgl_list, &allc_sgl_list); spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); total_cnt = cnt; list_for_each_entry_safe(sglq_entry, sglq_entry_next, &allc_sgl_list, list) { list_del_init(&sglq_entry->list); block_cnt++; if ((last_xritag != NO_XRI) && (sglq_entry->sli4_xritag != last_xritag + 1)) { /* a hole in xri block, form a sgl posting block */ list_splice_init(&prep_sgl_list, &blck_sgl_list); post_cnt = block_cnt - 1; /* prepare list for next posting block */ list_add_tail(&sglq_entry->list, &prep_sgl_list); block_cnt = 1; } else { /* prepare list for next posting block */ list_add_tail(&sglq_entry->list, &prep_sgl_list); /* enough sgls for non-embed sgl mbox command */ if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { list_splice_init(&prep_sgl_list, &blck_sgl_list); post_cnt = block_cnt; block_cnt = 0; } } num_posted++; /* keep track of last sgl's xritag */ last_xritag = sglq_entry->sli4_xritag; /* end of repost sgl list condition for buffers */ if (num_posted == total_cnt) { if (post_cnt == 0) { list_splice_init(&prep_sgl_list, &blck_sgl_list); post_cnt = block_cnt; } else if (block_cnt == 1) { status = lpfc_sli4_post_sgl(phba, sglq_entry->phys, 0, sglq_entry->sli4_xritag); if (!status) { /* successful, put sgl to posted list */ list_add_tail(&sglq_entry->list, &post_sgl_list); } else { /* Failure, put sgl to free list */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3159 Failed to post " "sgl, xritag:x%x\n", sglq_entry->sli4_xritag); list_add_tail(&sglq_entry->list, &free_sgl_list); total_cnt--; } } } /* continue until a nembed page worth of sgls */ if (post_cnt == 0) continue; /* post the buffer list sgls as a block */ status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, post_cnt); if (!status) { /* success, put sgl list to posted sgl list */ list_splice_init(&blck_sgl_list, &post_sgl_list); } else { /* Failure, put sgl list to free sgl list */ sglq_entry_first = list_first_entry(&blck_sgl_list, struct lpfc_sglq, list); lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3160 Failed to post sgl-list, " "xritag:x%x-x%x\n", sglq_entry_first->sli4_xritag, (sglq_entry_first->sli4_xritag + post_cnt - 1)); list_splice_init(&blck_sgl_list, &free_sgl_list); total_cnt -= post_cnt; } /* don't reset xirtag due to hole in xri block */ if (block_cnt == 0) last_xritag = NO_XRI; /* reset sgl post count for next round of posting */ post_cnt = 0; } /* free the sgls failed to post */ lpfc_free_sgl_list(phba, &free_sgl_list); /* push sgls posted to the available list */ if (!list_empty(&post_sgl_list)) { spin_lock_irq(&phba->hbalock); spin_lock(&phba->sli4_hba.sgl_list_lock); list_splice_init(&post_sgl_list, sgl_list); spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); } else { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3161 Failure to post sgl to port,status %x " "blkcnt %d totalcnt %d postcnt %d\n", status, block_cnt, total_cnt, post_cnt); return -EIO; } /* return the number of XRIs actually posted */ return total_cnt; } /** * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls * @phba: pointer to lpfc hba data structure. * * This routine walks the list of nvme buffers that have been allocated and * repost them to the port by using SGL block post. This is needed after a * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. * * Returns: 0 = success, non-zero failure. **/ static int lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) { LIST_HEAD(post_nblist); int num_posted, rc = 0; /* get all NVME buffers need to repost to a local list */ lpfc_io_buf_flush(phba, &post_nblist); /* post the list of nvme buffer sgls to port if available */ if (!list_empty(&post_nblist)) { num_posted = lpfc_sli4_post_io_sgl_list( phba, &post_nblist, phba->sli4_hba.io_xri_cnt); /* failed to post any nvme buffer, return error */ if (num_posted == 0) rc = -EIO; } return rc; } static void lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { uint32_t len; len = sizeof(struct lpfc_mbx_set_host_data) - sizeof(struct lpfc_sli4_cfg_mhdr); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_SET_HOST_DATA, len, LPFC_SLI4_MBX_EMBED); mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; mbox->u.mqe.un.set_host_data.param_len = LPFC_HOST_OS_DRIVER_VERSION_SIZE; snprintf(mbox->u.mqe.un.set_host_data.un.data, LPFC_HOST_OS_DRIVER_VERSION_SIZE, "Linux %s v"LPFC_DRIVER_VERSION, (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); } int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, struct lpfc_queue *drq, int count, int idx) { int rc, i; struct lpfc_rqe hrqe; struct lpfc_rqe drqe; struct lpfc_rqb *rqbp; unsigned long flags; struct rqb_dmabuf *rqb_buffer; LIST_HEAD(rqb_buf_list); rqbp = hrq->rqbp; for (i = 0; i < count; i++) { spin_lock_irqsave(&phba->hbalock, flags); /* IF RQ is already full, don't bother */ if (rqbp->buffer_count + i >= rqbp->entry_count - 1) { spin_unlock_irqrestore(&phba->hbalock, flags); break; } spin_unlock_irqrestore(&phba->hbalock, flags); rqb_buffer = rqbp->rqb_alloc_buffer(phba); if (!rqb_buffer) break; rqb_buffer->hrq = hrq; rqb_buffer->drq = drq; rqb_buffer->idx = idx; list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); } spin_lock_irqsave(&phba->hbalock, flags); while (!list_empty(&rqb_buf_list)) { list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, hbuf.list); hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); if (rc < 0) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6421 Cannot post to HRQ %d: %x %x %x " "DRQ %x %x\n", hrq->queue_id, hrq->host_index, hrq->hba_index, hrq->entry_count, drq->host_index, drq->hba_index); rqbp->rqb_free_buffer(phba, rqb_buffer); } else { list_add_tail(&rqb_buffer->hbuf.list, &rqbp->rqb_buffer_list); rqbp->buffer_count++; } } spin_unlock_irqrestore(&phba->hbalock, flags); return 1; } static void lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { union lpfc_sli4_cfg_shdr *shdr; u32 shdr_status, shdr_add_status; shdr = (union lpfc_sli4_cfg_shdr *) &pmb->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) { lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX, "4622 SET_FEATURE (x%x) mbox failed, " "status x%x add_status x%x, mbx status x%x\n", LPFC_SET_LD_SIGNAL, shdr_status, shdr_add_status, pmb->u.mb.mbxStatus); phba->degrade_activate_threshold = 0; phba->degrade_deactivate_threshold = 0; phba->fec_degrade_interval = 0; goto out; } phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7; phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8; phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10; lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT, "4624 Success: da x%x dd x%x interval x%x\n", phba->degrade_activate_threshold, phba->degrade_deactivate_threshold, phba->fec_degrade_interval); out: mempool_free(pmb, phba->mbox_mem_pool); } int lpfc_read_lds_params(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; int rc; mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return -ENOMEM; lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL); mboxq->vport = phba->pport; mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mboxq, phba->mbox_mem_pool); return -EIO; } return 0; } static void lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; union lpfc_sli4_cfg_shdr *shdr; u32 shdr_status, shdr_add_status; u32 sig, acqe; /* Two outcomes. (1) Set featurs was successul and EDC negotiation * is done. (2) Mailbox failed and send FPIN support only. */ shdr = (union lpfc_sli4_cfg_shdr *) &pmb->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, "2516 CGN SET_FEATURE mbox failed with " "status x%x add_status x%x, mbx status x%x " "Reset Congestion to FPINs only\n", shdr_status, shdr_add_status, pmb->u.mb.mbxStatus); /* If there is a mbox error, move on to RDF */ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; goto out; } /* Zero out Congestion Signal ACQE counter */ phba->cgn_acqe_cnt = 0; acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq, &pmb->u.mqe.un.set_feature); sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq, &pmb->u.mqe.un.set_feature); lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "4620 SET_FEATURES Success: Freq: %ds %dms " " Reg: x%x x%x\n", acqe, sig, phba->cgn_reg_signal, phba->cgn_reg_fpin); out: mempool_free(pmb, phba->mbox_mem_pool); /* Register for FPIN events from the fabric now that the * EDC common_set_features has completed. */ lpfc_issue_els_rdf(vport, 0); } int lpfc_config_cgn_signal(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; u32 rc; mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) goto out_rdf; lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL); mboxq->vport = phba->pport; mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs; lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "4621 SET_FEATURES: FREQ sig x%x acqe x%x: " "Reg: x%x x%x\n", phba->cgn_sig_freq, lpfc_acqe_cgn_frequency, phba->cgn_reg_signal, phba->cgn_reg_fpin); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) goto out; return 0; out: mempool_free(mboxq, phba->mbox_mem_pool); out_rdf: /* If there is a mbox error, move on to RDF */ phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; lpfc_issue_els_rdf(phba->pport, 0); return -EIO; } /** * lpfc_init_idle_stat_hb - Initialize idle_stat tracking * @phba: pointer to lpfc hba data structure. * * This routine initializes the per-eq idle_stat to dynamically dictate * polling decisions. * * Return codes: * None **/ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) { int i; struct lpfc_sli4_hdw_queue *hdwq; struct lpfc_queue *eq; struct lpfc_idle_stat *idle_stat; u64 wall; for_each_present_cpu(i) { hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; eq = hdwq->hba_eq; /* Skip if we've already handled this eq's primary CPU */ if (eq->chann != i) continue; idle_stat = &phba->sli4_hba.idle_stat[i]; idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1); idle_stat->prev_wall = wall; if (phba->nvmet_support || phba->cmf_active_mode != LPFC_CFG_OFF || phba->intr_type != MSIX) eq->poll_mode = LPFC_QUEUE_WORK; else eq->poll_mode = LPFC_THREADED_IRQ; } if (!phba->nvmet_support && phba->intr_type == MSIX) schedule_delayed_work(&phba->idle_stat_delay_work, msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); } static void lpfc_sli4_dip(struct lpfc_hba *phba) { uint32_t if_type; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if (if_type == LPFC_SLI_INTF_IF_TYPE_2 || if_type == LPFC_SLI_INTF_IF_TYPE_6) { struct lpfc_register reg_data; if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &reg_data.word0)) return; if (bf_get(lpfc_sliport_status_dip, &reg_data)) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2904 Firmware Dump Image Present" " on Adapter"); } } /** * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor * @rx_monitor: Pointer to lpfc_rx_info_monitor object * @entries: Number of rx_info_entry objects to allocate in ring * * Return: * 0 - Success * ENOMEM - Failure to kmalloc **/ int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor, u32 entries) { rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry), GFP_KERNEL); if (!rx_monitor->ring) return -ENOMEM; rx_monitor->head_idx = 0; rx_monitor->tail_idx = 0; spin_lock_init(&rx_monitor->lock); rx_monitor->entries = entries; return 0; } /** * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor * @rx_monitor: Pointer to lpfc_rx_info_monitor object * * Called after cancellation of cmf_timer. **/ void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor) { kfree(rx_monitor->ring); rx_monitor->ring = NULL; rx_monitor->entries = 0; rx_monitor->head_idx = 0; rx_monitor->tail_idx = 0; } /** * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring * @rx_monitor: Pointer to lpfc_rx_info_monitor object * @entry: Pointer to rx_info_entry * * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr. * * This is called from lpfc_cmf_timer, which is in timer/softirq context. * * In cases of old data overflow, we do a best effort of FIFO order. **/ void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor, struct rx_info_entry *entry) { struct rx_info_entry *ring = rx_monitor->ring; u32 *head_idx = &rx_monitor->head_idx; u32 *tail_idx = &rx_monitor->tail_idx; spinlock_t *ring_lock = &rx_monitor->lock; u32 ring_size = rx_monitor->entries; spin_lock(ring_lock); memcpy(&ring[*tail_idx], entry, sizeof(*entry)); *tail_idx = (*tail_idx + 1) % ring_size; /* Best effort of FIFO saved data */ if (*tail_idx == *head_idx) *head_idx = (*head_idx + 1) % ring_size; spin_unlock(ring_lock); } /** * lpfc_rx_monitor_report - Read out rx_monitor's ring * @phba: Pointer to lpfc_hba object * @rx_monitor: Pointer to lpfc_rx_info_monitor object * @buf: Pointer to char buffer that will contain rx monitor info data * @buf_len: Length buf including null char * @max_read_entries: Maximum number of entries to read out of ring * * Used to dump/read what's in rx_monitor's ring buffer. * * If buf is NULL || buf_len == 0, then it is implied that we want to log the * information to kmsg instead of filling out buf. * * Return: * Number of entries read out of the ring **/ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, struct lpfc_rx_info_monitor *rx_monitor, char *buf, u32 buf_len, u32 max_read_entries) { struct rx_info_entry *ring = rx_monitor->ring; struct rx_info_entry *entry; u32 *head_idx = &rx_monitor->head_idx; u32 *tail_idx = &rx_monitor->tail_idx; spinlock_t *ring_lock = &rx_monitor->lock; u32 ring_size = rx_monitor->entries; u32 cnt = 0; char tmp[DBG_LOG_STR_SZ] = {0}; bool log_to_kmsg = (!buf || !buf_len) ? true : false; if (!log_to_kmsg) { /* clear the buffer to be sure */ memset(buf, 0, buf_len); scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s" "%-8s%-8s%-8s%-16s\n", "MaxBPI", "Tot_Data_CMF", "Tot_Data_Cmd", "Tot_Data_Cmpl", "Lat(us)", "Avg_IO", "Max_IO", "Bsy", "IO_cnt", "Info", "BWutil(ms)"); } /* Needs to be _irq because record is called from timer interrupt * context */ spin_lock_irq(ring_lock); while (*head_idx != *tail_idx) { entry = &ring[*head_idx]; /* Read out this entry's data. */ if (!log_to_kmsg) { /* If !log_to_kmsg, then store to buf. */ scnprintf(tmp, sizeof(tmp), "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu" "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n", *head_idx, entry->max_bytes_per_interval, entry->cmf_bytes, entry->total_bytes, entry->rcv_bytes, entry->avg_io_latency, entry->avg_io_size, entry->max_read_cnt, entry->cmf_busy, entry->io_cnt, entry->cmf_info, entry->timer_utilization, entry->timer_interval); /* Check for buffer overflow */ if ((strlen(buf) + strlen(tmp)) >= buf_len) break; /* Append entry's data to buffer */ strlcat(buf, tmp, buf_len); } else { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "4410 %02u: MBPI %llu Xmit %llu " "Cmpl %llu Lat %llu ASz %llu Info %02u " "BWUtil %u Int %u slot %u\n", cnt, entry->max_bytes_per_interval, entry->total_bytes, entry->rcv_bytes, entry->avg_io_latency, entry->avg_io_size, entry->cmf_info, entry->timer_utilization, entry->timer_interval, *head_idx); } *head_idx = (*head_idx + 1) % ring_size; /* Don't feed more than max_read_entries */ cnt++; if (cnt >= max_read_entries) break; } spin_unlock_irq(ring_lock); return cnt; } /** * lpfc_cmf_setup - Initialize idle_stat tracking * @phba: Pointer to HBA context object. * * This is called from HBA setup during driver load or when the HBA * comes online. this does all the initialization to support CMF and MI. **/ static int lpfc_cmf_setup(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; struct lpfc_dmabuf *mp; struct lpfc_pc_sli4_params *sli4_params; int rc, cmf, mi_ver; rc = lpfc_sli4_refresh_params(phba); if (unlikely(rc)) return rc; mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return -ENOMEM; sli4_params = &phba->sli4_hba.pc_sli4_params; /* Always try to enable MI feature if we can */ if (sli4_params->mi_ver) { lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); mi_ver = bf_get(lpfc_mbx_set_feature_mi, &mboxq->u.mqe.un.set_feature); if (rc == MBX_SUCCESS) { if (mi_ver) { lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, "6215 MI is enabled\n"); sli4_params->mi_ver = mi_ver; } else { lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, "6338 MI is disabled\n"); sli4_params->mi_ver = 0; } } else { /* mi_ver is already set from GET_SLI4_PARAMETERS */ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, "6245 Enable MI Mailbox x%x (x%x/x%x) " "failed, rc:x%x mi:x%x\n", bf_get(lpfc_mqe_command, &mboxq->u.mqe), lpfc_sli_config_mbox_subsys_get (phba, mboxq), lpfc_sli_config_mbox_opcode_get (phba, mboxq), rc, sli4_params->mi_ver); } } else { lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, "6217 MI is disabled\n"); } /* Ensure FDMI is enabled for MI if enable_mi is set */ if (sli4_params->mi_ver) phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; /* Always try to enable CMF feature if we can */ if (sli4_params->cmf) { lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); cmf = bf_get(lpfc_mbx_set_feature_cmf, &mboxq->u.mqe.un.set_feature); if (rc == MBX_SUCCESS && cmf) { lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, "6218 CMF is enabled: mode %d\n", phba->cmf_active_mode); } else { lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT | LOG_INIT, "6219 Enable CMF Mailbox x%x (x%x/x%x) " "failed, rc:x%x dd:x%x\n", bf_get(lpfc_mqe_command, &mboxq->u.mqe), lpfc_sli_config_mbox_subsys_get (phba, mboxq), lpfc_sli_config_mbox_opcode_get (phba, mboxq), rc, cmf); sli4_params->cmf = 0; phba->cmf_active_mode = LPFC_CFG_OFF; goto no_cmf; } /* Allocate Congestion Information Buffer */ if (!phba->cgn_i) { mp = kmalloc(sizeof(*mp), GFP_KERNEL); if (mp) mp->virt = dma_alloc_coherent (&phba->pcidev->dev, sizeof(struct lpfc_cgn_info), &mp->phys, GFP_KERNEL); if (!mp || !mp->virt) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2640 Failed to alloc memory " "for Congestion Info\n"); kfree(mp); sli4_params->cmf = 0; phba->cmf_active_mode = LPFC_CFG_OFF; goto no_cmf; } phba->cgn_i = mp; /* initialize congestion buffer info */ lpfc_init_congestion_buf(phba); lpfc_init_congestion_stat(phba); /* Zero out Congestion Signal counters */ atomic64_set(&phba->cgn_acqe_stat.alarm, 0); atomic64_set(&phba->cgn_acqe_stat.warn, 0); } rc = lpfc_sli4_cgn_params_read(phba); if (rc < 0) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, "6242 Error reading Cgn Params (%d)\n", rc); /* Ensure CGN Mode is off */ sli4_params->cmf = 0; } else if (!rc) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, "6243 CGN Event empty object.\n"); /* Ensure CGN Mode is off */ sli4_params->cmf = 0; } } else { no_cmf: lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, "6220 CMF is disabled\n"); } /* Only register congestion buffer with firmware if BOTH * CMF and E2E are enabled. */ if (sli4_params->cmf && sli4_params->mi_ver) { rc = lpfc_reg_congestion_buf(phba); if (rc) { dma_free_coherent(&phba->pcidev->dev, sizeof(struct lpfc_cgn_info), phba->cgn_i->virt, phba->cgn_i->phys); kfree(phba->cgn_i); phba->cgn_i = NULL; /* Ensure CGN Mode is off */ phba->cmf_active_mode = LPFC_CFG_OFF; sli4_params->cmf = 0; return 0; } } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "6470 Setup MI version %d CMF %d mode %d\n", sli4_params->mi_ver, sli4_params->cmf, phba->cmf_active_mode); mempool_free(mboxq, phba->mbox_mem_pool); /* Initialize atomic counters */ atomic_set(&phba->cgn_fabric_warn_cnt, 0); atomic_set(&phba->cgn_fabric_alarm_cnt, 0); atomic_set(&phba->cgn_sync_alarm_cnt, 0); atomic_set(&phba->cgn_sync_warn_cnt, 0); atomic_set(&phba->cgn_driver_evt_cnt, 0); atomic_set(&phba->cgn_latency_evt_cnt, 0); atomic64_set(&phba->cgn_latency_evt, 0); phba->cmf_interval_rate = LPFC_CMF_INTERVAL; /* Allocate RX Monitor Buffer */ if (!phba->rx_monitor) { phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor), GFP_KERNEL); if (!phba->rx_monitor) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2644 Failed to alloc memory " "for RX Monitor Buffer\n"); return -ENOMEM; } /* Instruct the rx_monitor object to instantiate its ring */ if (lpfc_rx_monitor_create_ring(phba->rx_monitor, LPFC_MAX_RXMONITOR_ENTRY)) { kfree(phba->rx_monitor); phba->rx_monitor = NULL; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2645 Failed to alloc memory " "for RX Monitor's Ring\n"); return -ENOMEM; } } return 0; } static int lpfc_set_host_tm(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; uint32_t len, rc; struct timespec64 cur_time; struct tm broken; uint32_t month, day, year; uint32_t hour, minute, second; struct lpfc_mbx_set_host_date_time *tm; mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return -ENOMEM; len = sizeof(struct lpfc_mbx_set_host_data) - sizeof(struct lpfc_sli4_cfg_mhdr); lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_SET_HOST_DATA, len, LPFC_SLI4_MBX_EMBED); mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME; mboxq->u.mqe.un.set_host_data.param_len = sizeof(struct lpfc_mbx_set_host_date_time); tm = &mboxq->u.mqe.un.set_host_data.un.tm; ktime_get_real_ts64(&cur_time); time64_to_tm(cur_time.tv_sec, 0, &broken); month = broken.tm_mon + 1; day = broken.tm_mday; year = broken.tm_year - 100; hour = broken.tm_hour; minute = broken.tm_min; second = broken.tm_sec; bf_set(lpfc_mbx_set_host_month, tm, month); bf_set(lpfc_mbx_set_host_day, tm, day); bf_set(lpfc_mbx_set_host_year, tm, year); bf_set(lpfc_mbx_set_host_hour, tm, hour); bf_set(lpfc_mbx_set_host_min, tm, minute); bf_set(lpfc_mbx_set_host_sec, tm, second); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); mempool_free(mboxq, phba->mbox_mem_pool); return rc; } /** * lpfc_sli4_hba_setup - SLI4 device initialization PCI function * @phba: Pointer to HBA context object. * * This function is the main SLI4 device initialization PCI function. This * function is called by the HBA initialization code, HBA reset code and * HBA error attention handler code. Caller is not required to hold any * locks. **/ int lpfc_sli4_hba_setup(struct lpfc_hba *phba) { int rc, i, cnt, len, dd; LPFC_MBOXQ_t *mboxq; struct lpfc_mqe *mqe; uint8_t *vpd; uint32_t vpd_size; uint32_t ftr_rsp = 0; struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); struct lpfc_vport *vport = phba->pport; struct lpfc_dmabuf *mp; struct lpfc_rqb *rqbp; u32 flg; /* Perform a PCI function reset to start from clean */ rc = lpfc_pci_function_reset(phba); if (unlikely(rc)) return -ENODEV; /* Check the HBA Host Status Register for readyness */ rc = lpfc_sli4_post_status_check(phba); if (unlikely(rc)) return -ENODEV; else { spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_SLI_ACTIVE; flg = phba->sli.sli_flag; spin_unlock_irq(&phba->hbalock); /* Allow a little time after setting SLI_ACTIVE for any polled * MBX commands to complete via BSG. */ for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) { msleep(20); spin_lock_irq(&phba->hbalock); flg = phba->sli.sli_flag; spin_unlock_irq(&phba->hbalock); } } phba->hba_flag &= ~HBA_SETUP; lpfc_sli4_dip(phba); /* * Allocate a single mailbox container for initializing the * port. */ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return -ENOMEM; /* Issue READ_REV to collect vpd and FW information. */ vpd_size = SLI4_PAGE_SIZE; vpd = kzalloc(vpd_size, GFP_KERNEL); if (!vpd) { rc = -ENOMEM; goto out_free_mbox; } rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); if (unlikely(rc)) { kfree(vpd); goto out_free_mbox; } mqe = &mboxq->u.mqe; phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { phba->hba_flag |= HBA_FCOE_MODE; phba->fcp_embed_io = 0; /* SLI4 FC support only */ } else { phba->hba_flag &= ~HBA_FCOE_MODE; } if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == LPFC_DCBX_CEE_MODE) phba->hba_flag |= HBA_FIP_SUPPORT; else phba->hba_flag &= ~HBA_FIP_SUPPORT; phba->hba_flag &= ~HBA_IOQ_FLUSH; if (phba->sli_rev != LPFC_SLI_REV4) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0376 READ_REV Error. SLI Level %d " "FCoE enabled %d\n", phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); rc = -EIO; kfree(vpd); goto out_free_mbox; } rc = lpfc_set_host_tm(phba); lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, "6468 Set host date / time: Status x%x:\n", rc); /* * Continue initialization with default values even if driver failed * to read FCoE param config regions, only read parameters if the * board is FCoE */ if (phba->hba_flag & HBA_FCOE_MODE && lpfc_sli4_read_fcoe_params(phba)) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, "2570 Failed to read FCoE parameters\n"); /* * Retrieve sli4 device physical port name, failure of doing it * is considered as non-fatal. */ rc = lpfc_sli4_retrieve_pport_name(phba); if (!rc) lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "3080 Successful retrieving SLI4 device " "physical port name: %s.\n", phba->Port); rc = lpfc_sli4_get_ctl_attr(phba); if (!rc) lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "8351 Successful retrieving SLI4 device " "CTL ATTR\n"); /* * Evaluate the read rev and vpd data. Populate the driver * state with the results. If this routine fails, the failure * is not fatal as the driver will use generic values. */ rc = lpfc_parse_vpd(phba, vpd, vpd_size); if (unlikely(!rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0377 Error %d parsing vpd. " "Using defaults.\n", rc); rc = 0; } kfree(vpd); /* Save information as VPD data */ phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; /* * This is because first G7 ASIC doesn't support the standard * 0x5a NVME cmd descriptor type/subtype */ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_6) && (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && (phba->vpd.rev.smRev == 0) && (phba->cfg_nvme_embed_cmd == 1)) phba->cfg_nvme_embed_cmd = 0; phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, &mqe->un.read_rev); phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, &mqe->un.read_rev); phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, &mqe->un.read_rev); phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, &mqe->un.read_rev); phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0380 READ_REV Status x%x " "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", mboxq->vport ? mboxq->vport->vpi : 0, bf_get(lpfc_mqe_status, mqe), phba->vpd.rev.opFwName, phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_0) { lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc == MBX_SUCCESS) { phba->hba_flag |= HBA_RECOVERABLE_UE; /* Set 1Sec interval to detect UE */ phba->eratt_poll_interval = 1; phba->sli4_hba.ue_to_sr = bf_get( lpfc_mbx_set_feature_UESR, &mboxq->u.mqe.un.set_feature); phba->sli4_hba.ue_to_rp = bf_get( lpfc_mbx_set_feature_UERP, &mboxq->u.mqe.un.set_feature); } } if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { /* Enable MDS Diagnostics only if the SLI Port supports it */ lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) phba->mds_diags_support = 0; } /* * Discover the port's supported feature set and match it against the * hosts requests. */ lpfc_request_features(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (unlikely(rc)) { rc = -EIO; goto out_free_mbox; } /* Disable VMID if app header is not supported */ if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr, &mqe->un.req_ftrs))) { bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0); phba->cfg_vmid_app_header = 0; lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI, "1242 vmid feature not supported\n"); } /* * The port must support FCP initiator mode as this is the * only mode running in the host. */ if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "0378 No support for fcpi mode.\n"); ftr_rsp++; } /* Performance Hints are ONLY for FCoE */ if (phba->hba_flag & HBA_FCOE_MODE) { if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; else phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; } /* * If the port cannot support the host's requested features * then turn off the global config parameters to disable the * feature in the driver. This is not a fatal error. */ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { phba->cfg_enable_bg = 0; phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; ftr_rsp++; } } if (phba->max_vpi && phba->cfg_enable_npiv && !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) ftr_rsp++; if (ftr_rsp) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "0379 Feature Mismatch Data: x%08x %08x " "x%x x%x x%x\n", mqe->un.req_ftrs.word2, mqe->un.req_ftrs.word3, phba->cfg_enable_bg, phba->cfg_enable_npiv, phba->max_vpi); if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) phba->cfg_enable_bg = 0; if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) phba->cfg_enable_npiv = 0; } /* These SLI3 features are assumed in SLI4 */ spin_lock_irq(&phba->hbalock); phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); spin_unlock_irq(&phba->hbalock); /* Always try to enable dual dump feature if we can */ lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature); if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP)) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6448 Dual Dump is enabled\n"); else lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT, "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, " "rc:x%x dd:x%x\n", bf_get(lpfc_mqe_command, &mboxq->u.mqe), lpfc_sli_config_mbox_subsys_get( phba, mboxq), lpfc_sli_config_mbox_opcode_get( phba, mboxq), rc, dd); /* * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent * calls depends on these resources to complete port setup. */ rc = lpfc_sli4_alloc_resource_identifiers(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2920 Failed to alloc Resource IDs " "rc = x%x\n", rc); goto out_free_mbox; } lpfc_set_host_data(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "2134 Failed to set host os driver version %x", rc); } /* Read the port's service parameters. */ rc = lpfc_read_sparam(phba, mboxq, vport->vpi); if (rc) { phba->link_state = LPFC_HBA_ERROR; rc = -ENOMEM; goto out_free_mbox; } mboxq->vport = vport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; if (rc == MBX_SUCCESS) { memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); rc = 0; } /* * This memory was allocated by the lpfc_read_sparam routine but is * no longer needed. It is released and ctx_buf NULLed to prevent * unintended pointer access as the mbox is reused. */ lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mboxq->ctx_buf = NULL; if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0382 READ_SPARAM command failed " "status %d, mbxStatus x%x\n", rc, bf_get(lpfc_mqe_status, mqe)); phba->link_state = LPFC_HBA_ERROR; rc = -EIO; goto out_free_mbox; } lpfc_update_vport_wwn(vport); /* Update the fc_host data structures with new wwn. */ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); /* Create all the SLI4 queues */ rc = lpfc_sli4_queue_create(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3089 Failed to allocate queues\n"); rc = -ENODEV; goto out_free_mbox; } /* Set up all the queues to the device */ rc = lpfc_sli4_queue_setup(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0381 Error %d during queue setup.\n ", rc); goto out_stop_timers; } /* Initialize the driver internal SLI layer lists. */ lpfc_sli4_setup(phba); lpfc_sli4_queue_init(phba); /* update host els xri-sgl sizes and mappings */ rc = lpfc_sli4_els_sgl_update(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1400 Failed to update xri-sgl size and " "mapping: %d\n", rc); goto out_destroy_queue; } /* register the els sgl pool to the port */ rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, phba->sli4_hba.els_xri_cnt); if (unlikely(rc < 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0582 Error %d during els sgl post " "operation\n", rc); rc = -ENODEV; goto out_destroy_queue; } phba->sli4_hba.els_xri_cnt = rc; if (phba->nvmet_support) { /* update host nvmet xri-sgl sizes and mappings */ rc = lpfc_sli4_nvmet_sgl_update(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6308 Failed to update nvmet-sgl size " "and mapping: %d\n", rc); goto out_destroy_queue; } /* register the nvmet sgl pool to the port */ rc = lpfc_sli4_repost_sgl_list( phba, &phba->sli4_hba.lpfc_nvmet_sgl_list, phba->sli4_hba.nvmet_xri_cnt); if (unlikely(rc < 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3117 Error %d during nvmet " "sgl post\n", rc); rc = -ENODEV; goto out_destroy_queue; } phba->sli4_hba.nvmet_xri_cnt = rc; /* We allocate an iocbq for every receive context SGL. * The additional allocation is for abort and ls handling. */ cnt = phba->sli4_hba.nvmet_xri_cnt + phba->sli4_hba.max_cfg_param.max_xri; } else { /* update host common xri-sgl sizes and mappings */ rc = lpfc_sli4_io_sgl_update(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6082 Failed to update nvme-sgl size " "and mapping: %d\n", rc); goto out_destroy_queue; } /* register the allocated common sgl pool to the port */ rc = lpfc_sli4_repost_io_sgl_list(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6116 Error %d during nvme sgl post " "operation\n", rc); /* Some NVME buffers were moved to abort nvme list */ /* A pci function reset will repost them */ rc = -ENODEV; goto out_destroy_queue; } /* Each lpfc_io_buf job structure has an iocbq element. * This cnt provides for abort, els, ct and ls requests. */ cnt = phba->sli4_hba.max_cfg_param.max_xri; } if (!phba->sli.iocbq_lookup) { /* Initialize and populate the iocb list per host */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2821 initialize iocb list with %d entries\n", cnt); rc = lpfc_init_iocb_list(phba, cnt); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1413 Failed to init iocb list.\n"); goto out_destroy_queue; } } if (phba->nvmet_support) lpfc_nvmet_create_targetport(phba); if (phba->nvmet_support && phba->cfg_nvmet_mrq) { /* Post initial buffers to all RQs created */ for (i = 0; i < phba->cfg_nvmet_mrq; i++) { rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; INIT_LIST_HEAD(&rqbp->rqb_buffer_list); rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; rqbp->buffer_count = 0; lpfc_post_rq_buffer( phba, phba->sli4_hba.nvmet_mrq_hdr[i], phba->sli4_hba.nvmet_mrq_data[i], phba->cfg_nvmet_mrq_post, i); } } /* Post the rpi header region to the device. */ rc = lpfc_sli4_post_all_rpi_hdrs(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0393 Error %d during rpi post operation\n", rc); rc = -ENODEV; goto out_free_iocblist; } lpfc_sli4_node_prep(phba); if (!(phba->hba_flag & HBA_FCOE_MODE)) { if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { /* * The FC Port needs to register FCFI (index 0) */ lpfc_reg_fcfi(phba, mboxq); mboxq->vport = phba->pport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) goto out_unset_queue; rc = 0; phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); } else { /* We are a NVME Target mode with MRQ > 1 */ /* First register the FCFI */ lpfc_reg_fcfi_mrq(phba, mboxq, 0); mboxq->vport = phba->pport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) goto out_unset_queue; rc = 0; phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, &mboxq->u.mqe.un.reg_fcfi_mrq); /* Next register the MRQs */ lpfc_reg_fcfi_mrq(phba, mboxq, 1); mboxq->vport = phba->pport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) goto out_unset_queue; rc = 0; } /* Check if the port is configured to be disabled */ lpfc_sli_read_link_ste(phba); } /* Don't post more new bufs if repost already recovered * the nvme sgls. */ if (phba->nvmet_support == 0) { if (phba->sli4_hba.io_xri_cnt == 0) { len = lpfc_new_io_buf( phba, phba->sli4_hba.io_xri_max); if (len == 0) { rc = -ENOMEM; goto out_unset_queue; } if (phba->cfg_xri_rebalancing) lpfc_create_multixri_pools(phba); } } else { phba->cfg_xri_rebalancing = 0; } /* Allow asynchronous mailbox command to go through */ spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; spin_unlock_irq(&phba->hbalock); /* Post receive buffers to the device */ lpfc_sli4_rb_setup(phba); /* Reset HBA FCF states after HBA reset */ phba->fcf.fcf_flag = 0; phba->fcf.current_rec.flag = 0; /* Start the ELS watchdog timer */ mod_timer(&vport->els_tmofunc, jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); phba->last_completion_time = jiffies; /* start eq_delay heartbeat */ if (phba->cfg_auto_imax) queue_delayed_work(phba->wq, &phba->eq_delay_work, msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); /* start per phba idle_stat_delay heartbeat */ lpfc_init_idle_stat_hb(phba); /* Start error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); /* * The port is ready, set the host's link state to LINK_DOWN * in preparation for link interrupts. */ spin_lock_irq(&phba->hbalock); phba->link_state = LPFC_LINK_DOWN; /* Check if physical ports are trunked */ if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) phba->trunk_link.link0.state = LPFC_LINK_DOWN; if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) phba->trunk_link.link1.state = LPFC_LINK_DOWN; if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) phba->trunk_link.link2.state = LPFC_LINK_DOWN; if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) phba->trunk_link.link3.state = LPFC_LINK_DOWN; spin_unlock_irq(&phba->hbalock); /* Arm the CQs and then EQs on device */ lpfc_sli4_arm_cqeq_intr(phba); /* Indicate device interrupt mode */ phba->sli4_hba.intr_enable = 1; /* Setup CMF after HBA is initialized */ lpfc_cmf_setup(phba); if (!(phba->hba_flag & HBA_FCOE_MODE) && (phba->hba_flag & LINK_DISABLED)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3103 Adapter Link is disabled.\n"); lpfc_down_link(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3104 Adapter failed to issue " "DOWN_LINK mbox cmd, rc:x%x\n", rc); goto out_io_buff_free; } } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { /* don't perform init_link on SLI4 FC port loopback test */ if (!(phba->link_flag & LS_LOOPBACK_MODE)) { rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); if (rc) goto out_io_buff_free; } } mempool_free(mboxq, phba->mbox_mem_pool); /* Enable RAS FW log support */ lpfc_sli4_ras_setup(phba); phba->hba_flag |= HBA_SETUP; return rc; out_io_buff_free: /* Free allocated IO Buffers */ lpfc_io_free(phba); out_unset_queue: /* Unset all the queues set up in this routine when error out */ lpfc_sli4_queue_unset(phba); out_free_iocblist: lpfc_free_iocb_list(phba); out_destroy_queue: lpfc_sli4_queue_destroy(phba); out_stop_timers: lpfc_stop_hba_timers(phba); out_free_mbox: mempool_free(mboxq, phba->mbox_mem_pool); return rc; } /** * lpfc_mbox_timeout - Timeout call back function for mbox timer * @t: Context to fetch pointer to hba structure from. * * This is the callback function for mailbox timer. The mailbox * timer is armed when a new mailbox command is issued and the timer * is deleted when the mailbox complete. The function is called by * the kernel timer code when a mailbox does not complete within * expected time. This function wakes up the worker thread to * process the mailbox timeout and returns. All the processing is * done by the worker thread function lpfc_mbox_timeout_handler. **/ void lpfc_mbox_timeout(struct timer_list *t) { struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); unsigned long iflag; uint32_t tmo_posted; spin_lock_irqsave(&phba->pport->work_port_lock, iflag); tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; if (!tmo_posted) phba->pport->work_port_events |= WORKER_MBOX_TMO; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); if (!tmo_posted) lpfc_worker_wake_up(phba); return; } /** * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions * are pending * @phba: Pointer to HBA context object. * * This function checks if any mailbox completions are present on the mailbox * completion queue. **/ static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) { uint32_t idx; struct lpfc_queue *mcq; struct lpfc_mcqe *mcqe; bool pending_completions = false; uint8_t qe_valid; if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) return false; /* Check for completions on mailbox completion queue */ mcq = phba->sli4_hba.mbx_cq; idx = mcq->hba_index; qe_valid = mcq->qe_valid; while (bf_get_le32(lpfc_cqe_valid, (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx)); if (bf_get_le32(lpfc_trailer_completed, mcqe) && (!bf_get_le32(lpfc_trailer_async, mcqe))) { pending_completions = true; break; } idx = (idx + 1) % mcq->entry_count; if (mcq->hba_index == idx) break; /* if the index wrapped around, toggle the valid bit */ if (phba->sli4_hba.pc_sli4_params.cqav && !idx) qe_valid = (qe_valid) ? 0 : 1; } return pending_completions; } /** * lpfc_sli4_process_missed_mbox_completions - process mbox completions * that were missed. * @phba: Pointer to HBA context object. * * For sli4, it is possible to miss an interrupt. As such mbox completions * maybe missed causing erroneous mailbox timeouts to occur. This function * checks to see if mbox completions are on the mailbox completion queue * and will process all the completions associated with the eq for the * mailbox completion queue. **/ static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) { struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; uint32_t eqidx; struct lpfc_queue *fpeq = NULL; struct lpfc_queue *eq; bool mbox_pending; if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) return false; /* Find the EQ associated with the mbox CQ */ if (sli4_hba->hdwq) { for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) { eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq; if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { fpeq = eq; break; } } } if (!fpeq) return false; /* Turn off interrupts from this EQ */ sli4_hba->sli4_eq_clr_intr(fpeq); /* Check to see if a mbox completion is pending */ mbox_pending = lpfc_sli4_mbox_completions_pending(phba); /* * If a mbox completion is pending, process all the events on EQ * associated with the mbox completion queue (this could include * mailbox commands, async events, els commands, receive queue data * and fcp commands) */ if (mbox_pending) /* process and rearm the EQ */ lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, LPFC_QUEUE_WORK); else /* Always clear and re-arm the EQ */ sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); return mbox_pending; } /** * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout * @phba: Pointer to HBA context object. * * This function is called from worker thread when a mailbox command times out. * The caller is not required to hold any locks. This function will reset the * HBA and recover all the pending commands. **/ void lpfc_mbox_timeout_handler(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; MAILBOX_t *mb = NULL; struct lpfc_sli *psli = &phba->sli; /* If the mailbox completed, process the completion */ lpfc_sli4_process_missed_mbox_completions(phba); if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) return; if (pmbox != NULL) mb = &pmbox->u.mb; /* Check the pmbox pointer first. There is a race condition * between the mbox timeout handler getting executed in the * worklist and the mailbox actually completing. When this * race condition occurs, the mbox_active will be NULL. */ spin_lock_irq(&phba->hbalock); if (pmbox == NULL) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "0353 Active Mailbox cleared - mailbox timeout " "exiting\n"); spin_unlock_irq(&phba->hbalock); return; } /* Mbox cmd <mbxCommand> timeout */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n", mb->mbxCommand, phba->pport->port_state, phba->sli.sli_flag, phba->sli.mbox_active); spin_unlock_irq(&phba->hbalock); /* Setting state unknown so lpfc_sli_abort_iocb_ring * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing * it to fail all outstanding SCSI IO. */ set_bit(MBX_TMO_ERR, &phba->bit_flags); spin_lock_irq(&phba->pport->work_port_lock); phba->pport->work_port_events &= ~WORKER_MBOX_TMO; spin_unlock_irq(&phba->pport->work_port_lock); spin_lock_irq(&phba->hbalock); phba->link_state = LPFC_LINK_UNKNOWN; psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0345 Resetting board due to mailbox timeout\n"); /* Reset the HBA device */ lpfc_reset_hba(phba); } /** * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware * @phba: Pointer to HBA context object. * @pmbox: Pointer to mailbox object. * @flag: Flag indicating how the mailbox need to be processed. * * This function is called by discovery code and HBA management code * to submit a mailbox command to firmware with SLI-3 interface spec. This * function gets the hbalock to protect the data structures. * The mailbox command can be submitted in polling mode, in which case * this function will wait in a polling loop for the completion of the * mailbox. * If the mailbox is submitted in no_wait mode (not polling) the * function will submit the command and returns immediately without waiting * for the mailbox completion. The no_wait is supported only when HBA * is in SLI2/SLI3 mode - interrupts are enabled. * The SLI interface allows only one mailbox pending at a time. If the * mailbox is issued in polling mode and there is already a mailbox * pending, then the function will return an error. If the mailbox is issued * in NO_WAIT mode and there is a mailbox pending already, the function * will return MBX_BUSY after queuing the mailbox into mailbox queue. * The sli layer owns the mailbox object until the completion of mailbox * command if this function return MBX_BUSY or MBX_SUCCESS. For all other * return codes the caller owns the mailbox command after the return of * the function. **/ static int lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) { MAILBOX_t *mbx; struct lpfc_sli *psli = &phba->sli; uint32_t status, evtctr; uint32_t ha_copy, hc_copy; int i; unsigned long timeout; unsigned long drvr_flag = 0; uint32_t word0, ldata; void __iomem *to_slim; int processing_queue = 0; spin_lock_irqsave(&phba->hbalock, drvr_flag); if (!pmbox) { phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; /* processing mbox queue from intr_handler */ if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); return MBX_SUCCESS; } processing_queue = 1; pmbox = lpfc_mbox_get(phba); if (!pmbox) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); return MBX_SUCCESS; } } if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { if(!pmbox->vport) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, "1806 Mbox x%x failed. No vport\n", pmbox->u.mb.mbxCommand); dump_stack(); goto out_not_finished; } } /* If the PCI channel is in offline state, do not post mbox. */ if (unlikely(pci_channel_offline(phba->pcidev))) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); goto out_not_finished; } /* If HBA has a deferred error attention, fail the iocb. */ if (unlikely(phba->hba_flag & DEFER_ERATT)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); goto out_not_finished; } psli = &phba->sli; mbx = &pmbox->u.mb; status = MBX_SUCCESS; if (phba->link_state == LPFC_HBA_ERROR) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):0311 Mailbox command x%x cannot " "issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, pmbox->u.mb.mbxCommand, psli->sli_flag, flag); goto out_not_finished; } if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { if (lpfc_readl(phba->HCregaddr, &hc_copy) || !(hc_copy & HC_MBINT_ENA)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2528 Mailbox command x%x cannot " "issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, pmbox->u.mb.mbxCommand, psli->sli_flag, flag); goto out_not_finished; } } if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { /* Polling for a mbox command when another one is already active * is not allowed in SLI. Also, the driver must have established * SLI2 mode to queue and process multiple mbox commands. */ if (flag & MBX_POLL) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2529 Mailbox command x%x " "cannot issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, pmbox->u.mb.mbxCommand, psli->sli_flag, flag); goto out_not_finished; } if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2530 Mailbox command x%x " "cannot issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, pmbox->u.mb.mbxCommand, psli->sli_flag, flag); goto out_not_finished; } /* Another mailbox command is still being processed, queue this * command to be processed later. */ lpfc_mbox_put(phba, pmbox); /* Mbox cmd issue - BUSY */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0308 Mbox cmd issue - BUSY Data: " "x%x x%x x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0xffffff, mbx->mbxCommand, phba->pport ? phba->pport->port_state : 0xff, psli->sli_flag, flag); psli->slistat.mbox_busy++; spin_unlock_irqrestore(&phba->hbalock, drvr_flag); if (pmbox->vport) { lpfc_debugfs_disc_trc(pmbox->vport, LPFC_DISC_TRC_MBOX_VPORT, "MBOX Bsy vport: cmd:x%x mb:x%x x%x", (uint32_t)mbx->mbxCommand, mbx->un.varWords[0], mbx->un.varWords[1]); } else { lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_MBOX, "MBOX Bsy: cmd:x%x mb:x%x x%x", (uint32_t)mbx->mbxCommand, mbx->un.varWords[0], mbx->un.varWords[1]); } return MBX_BUSY; } psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; /* If we are not polling, we MUST be in SLI2 mode */ if (flag != MBX_POLL) { if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && (mbx->mbxCommand != MBX_KILL_BOARD)) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2531 Mailbox command x%x " "cannot issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, pmbox->u.mb.mbxCommand, psli->sli_flag, flag); goto out_not_finished; } /* timeout active mbox command */ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 1000); mod_timer(&psli->mbox_tmo, jiffies + timeout); } /* Mailbox cmd <cmd> issue */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " "x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, mbx->mbxCommand, phba->pport ? phba->pport->port_state : 0xff, psli->sli_flag, flag); if (mbx->mbxCommand != MBX_HEARTBEAT) { if (pmbox->vport) { lpfc_debugfs_disc_trc(pmbox->vport, LPFC_DISC_TRC_MBOX_VPORT, "MBOX Send vport: cmd:x%x mb:x%x x%x", (uint32_t)mbx->mbxCommand, mbx->un.varWords[0], mbx->un.varWords[1]); } else { lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_MBOX, "MBOX Send: cmd:x%x mb:x%x x%x", (uint32_t)mbx->mbxCommand, mbx->un.varWords[0], mbx->un.varWords[1]); } } psli->slistat.mbox_cmd++; evtctr = psli->slistat.mbox_event; /* next set own bit for the adapter and copy over command word */ mbx->mbxOwner = OWN_CHIP; if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* Populate mbox extension offset word. */ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { *(((uint32_t *)mbx) + pmbox->mbox_offset_word) = (uint8_t *)phba->mbox_ext - (uint8_t *)phba->mbox; } /* Copy the mailbox extension data */ if (pmbox->in_ext_byte_len && pmbox->ctx_buf) { lpfc_sli_pcimem_bcopy(pmbox->ctx_buf, (uint8_t *)phba->mbox_ext, pmbox->in_ext_byte_len); } /* Copy command data to host SLIM area */ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); } else { /* Populate mbox extension offset word. */ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) *(((uint32_t *)mbx) + pmbox->mbox_offset_word) = MAILBOX_HBA_EXT_OFFSET; /* Copy the mailbox extension data */ if (pmbox->in_ext_byte_len && pmbox->ctx_buf) lpfc_memcpy_to_slim(phba->MBslimaddr + MAILBOX_HBA_EXT_OFFSET, pmbox->ctx_buf, pmbox->in_ext_byte_len); if (mbx->mbxCommand == MBX_CONFIG_PORT) /* copy command data into host mbox for cmpl */ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); /* First copy mbox command data to HBA SLIM, skip past first word */ to_slim = phba->MBslimaddr + sizeof (uint32_t); lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], MAILBOX_CMD_SIZE - sizeof (uint32_t)); /* Next copy over first word, with mbxOwner set */ ldata = *((uint32_t *)mbx); to_slim = phba->MBslimaddr; writel(ldata, to_slim); readl(to_slim); /* flush */ if (mbx->mbxCommand == MBX_CONFIG_PORT) /* switch over to host mailbox */ psli->sli_flag |= LPFC_SLI_ACTIVE; } wmb(); switch (flag) { case MBX_NOWAIT: /* Set up reference to mailbox command */ psli->mbox_active = pmbox; /* Interrupt board to do it */ writel(CA_MBATT, phba->CAregaddr); readl(phba->CAregaddr); /* flush */ /* Don't wait for it to finish, just return */ break; case MBX_POLL: /* Set up null reference to mailbox command */ psli->mbox_active = NULL; /* Interrupt board to do it */ writel(CA_MBATT, phba->CAregaddr); readl(phba->CAregaddr); /* flush */ if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* First read mbox status word */ word0 = *((uint32_t *)phba->mbox); word0 = le32_to_cpu(word0); } else { /* First read mbox status word */ if (lpfc_readl(phba->MBslimaddr, &word0)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); goto out_not_finished; } } /* Read the HBA Host Attention Register */ if (lpfc_readl(phba->HAregaddr, &ha_copy)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); goto out_not_finished; } timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 1000) + jiffies; i = 0; /* Wait for command to complete */ while (((word0 & OWN_CHIP) == OWN_CHIP) || (!(ha_copy & HA_MBATT) && (phba->link_state > LPFC_WARM_START))) { if (time_after(jiffies, timeout)) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(&phba->hbalock, drvr_flag); goto out_not_finished; } /* Check if we took a mbox interrupt while we were polling */ if (((word0 & OWN_CHIP) != OWN_CHIP) && (evtctr != psli->slistat.mbox_event)) break; if (i++ > 10) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); msleep(1); spin_lock_irqsave(&phba->hbalock, drvr_flag); } if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* First copy command data */ word0 = *((uint32_t *)phba->mbox); word0 = le32_to_cpu(word0); if (mbx->mbxCommand == MBX_CONFIG_PORT) { MAILBOX_t *slimmb; uint32_t slimword0; /* Check real SLIM for any errors */ slimword0 = readl(phba->MBslimaddr); slimmb = (MAILBOX_t *) & slimword0; if (((slimword0 & OWN_CHIP) != OWN_CHIP) && slimmb->mbxStatus) { psli->sli_flag &= ~LPFC_SLI_ACTIVE; word0 = slimword0; } } } else { /* First copy command data */ word0 = readl(phba->MBslimaddr); } /* Read the HBA Host Attention Register */ if (lpfc_readl(phba->HAregaddr, &ha_copy)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); goto out_not_finished; } } if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* copy results back to user */ lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE); /* Copy the mailbox extension data */ if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { lpfc_sli_pcimem_bcopy(phba->mbox_ext, pmbox->ctx_buf, pmbox->out_ext_byte_len); } } else { /* First copy command data */ lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, MAILBOX_CMD_SIZE); /* Copy the mailbox extension data */ if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { lpfc_memcpy_from_slim( pmbox->ctx_buf, phba->MBslimaddr + MAILBOX_HBA_EXT_OFFSET, pmbox->out_ext_byte_len); } } writel(HA_MBATT, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; status = mbx->mbxStatus; } spin_unlock_irqrestore(&phba->hbalock, drvr_flag); return status; out_not_finished: if (processing_queue) { pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; lpfc_mbox_cmpl_put(phba, pmbox); } return MBX_NOT_FINISHED; } /** * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command * @phba: Pointer to HBA context object. * * The function blocks the posting of SLI4 asynchronous mailbox commands from * the driver internal pending mailbox queue. It will then try to wait out the * possible outstanding mailbox command before return. * * Returns: * 0 - the outstanding mailbox command completed; otherwise, the wait for * the outstanding mailbox command timed out. **/ static int lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; LPFC_MBOXQ_t *mboxq; int rc = 0; unsigned long timeout = 0; u32 sli_flag; u8 cmd, subsys, opcode; /* Mark the asynchronous mailbox command posting as blocked */ spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; /* Determine how long we might wait for the active mailbox * command to be gracefully completed by firmware. */ if (phba->sli.mbox_active) timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, phba->sli.mbox_active) * 1000) + jiffies; spin_unlock_irq(&phba->hbalock); /* Make sure the mailbox is really active */ if (timeout) lpfc_sli4_process_missed_mbox_completions(phba); /* Wait for the outstanding mailbox command to complete */ while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ msleep(2); if (time_after(jiffies, timeout)) { /* Timeout, mark the outstanding cmd not complete */ /* Sanity check sli.mbox_active has not completed or * cancelled from another context during last 2ms sleep, * so take hbalock to be sure before logging. */ spin_lock_irq(&phba->hbalock); if (phba->sli.mbox_active) { mboxq = phba->sli.mbox_active; cmd = mboxq->u.mb.mbxCommand; subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq); opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq); sli_flag = psli->sli_flag; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2352 Mailbox command x%x " "(x%x/x%x) sli_flag x%x could " "not complete\n", cmd, subsys, opcode, sli_flag); } else { spin_unlock_irq(&phba->hbalock); } rc = 1; break; } } /* Can not cleanly block async mailbox command, fails it */ if (rc) { spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; spin_unlock_irq(&phba->hbalock); } return rc; } /** * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command * @phba: Pointer to HBA context object. * * The function unblocks and resume posting of SLI4 asynchronous mailbox * commands from the driver internal pending mailbox queue. It makes sure * that there is no outstanding mailbox command before resuming posting * asynchronous mailbox commands. If, for any reason, there is outstanding * mailbox command, it will try to wait it out before resuming asynchronous * mailbox command posting. **/ static void lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { /* Asynchronous mailbox posting is not blocked, do nothing */ spin_unlock_irq(&phba->hbalock); return; } /* Outstanding synchronous mailbox command is guaranteed to be done, * successful or timeout, after timing-out the outstanding mailbox * command shall always be removed, so just unblock posting async * mailbox command and resume */ psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; spin_unlock_irq(&phba->hbalock); /* wake up worker thread to post asynchronous mailbox command */ lpfc_worker_wake_up(phba); } /** * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready * @phba: Pointer to HBA context object. * @mboxq: Pointer to mailbox object. * * The function waits for the bootstrap mailbox register ready bit from * port for twice the regular mailbox command timeout value. * * 0 - no timeout on waiting for bootstrap mailbox register ready. * MBXERR_ERROR - wait for bootstrap mailbox register timed out or port * is in an unrecoverable state. **/ static int lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { uint32_t db_ready; unsigned long timeout; struct lpfc_register bmbx_reg; struct lpfc_register portstat_reg = {-1}; /* Sanity check - there is no point to wait if the port is in an * unrecoverable state. */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) { if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0) || lpfc_sli4_unrecoverable_port(&portstat_reg)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3858 Skipping bmbx ready because " "Port Status x%x\n", portstat_reg.word0); return MBXERR_ERROR; } } timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) * 1000) + jiffies; do { bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); if (!db_ready) mdelay(2); if (time_after(jiffies, timeout)) return MBXERR_ERROR; } while (!db_ready); return 0; } /** * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox * @phba: Pointer to HBA context object. * @mboxq: Pointer to mailbox object. * * The function posts a mailbox to the port. The mailbox is expected * to be comletely filled in and ready for the port to operate on it. * This routine executes a synchronous completion operation on the * mailbox by polling for its completion. * * The caller must not be holding any locks when calling this routine. * * Returns: * MBX_SUCCESS - mailbox posted successfully * Any of the MBX error values. **/ static int lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { int rc = MBX_SUCCESS; unsigned long iflag; uint32_t mcqe_status; uint32_t mbx_cmnd; struct lpfc_sli *psli = &phba->sli; struct lpfc_mqe *mb = &mboxq->u.mqe; struct lpfc_bmbx_create *mbox_rgn; struct dma_address *dma_address; /* * Only one mailbox can be active to the bootstrap mailbox region * at a time and there is no queueing provided. */ spin_lock_irqsave(&phba->hbalock, iflag); if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2532 Mailbox command x%x (x%x/x%x) " "cannot issue Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), psli->sli_flag, MBX_POLL); return MBXERR_ERROR; } /* The server grabs the token and owns it until release */ psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; phba->sli.mbox_active = mboxq; spin_unlock_irqrestore(&phba->hbalock, iflag); /* wait for bootstrap mbox register for readyness */ rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); if (rc) goto exit; /* * Initialize the bootstrap memory region to avoid stale data areas * in the mailbox post. Then copy the caller's mailbox contents to * the bmbx mailbox region. */ mbx_cmnd = bf_get(lpfc_mqe_command, mb); memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, sizeof(struct lpfc_mqe)); /* Post the high mailbox dma address to the port and wait for ready. */ dma_address = &phba->sli4_hba.bmbx.dma_address; writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); /* wait for bootstrap mbox register for hi-address write done */ rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); if (rc) goto exit; /* Post the low mailbox dma address to the port. */ writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); /* wait for bootstrap mbox register for low address write done */ rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); if (rc) goto exit; /* * Read the CQ to ensure the mailbox has completed. * If so, update the mailbox status so that the upper layers * can complete the request normally. */ lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, sizeof(struct lpfc_mqe)); mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, sizeof(struct lpfc_mcqe)); mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); /* * When the CQE status indicates a failure and the mailbox status * indicates success then copy the CQE status into the mailbox status * (and prefix it with x4000). */ if (mcqe_status != MB_CQE_STATUS_SUCCESS) { if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) bf_set(lpfc_mqe_status, mb, (LPFC_MBX_ERROR_RANGE | mcqe_status)); rc = MBXERR_ERROR; } else lpfc_sli4_swap_str(phba, mboxq); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" " x%x x%x CQ: x%x x%x x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), bf_get(lpfc_mqe_status, mb), mb->un.mb_words[0], mb->un.mb_words[1], mb->un.mb_words[2], mb->un.mb_words[3], mb->un.mb_words[4], mb->un.mb_words[5], mb->un.mb_words[6], mb->un.mb_words[7], mb->un.mb_words[8], mb->un.mb_words[9], mb->un.mb_words[10], mb->un.mb_words[11], mb->un.mb_words[12], mboxq->mcqe.word0, mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, mboxq->mcqe.trailer); exit: /* We are holding the token, no needed for lock when release */ spin_lock_irqsave(&phba->hbalock, iflag); psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; phba->sli.mbox_active = NULL; spin_unlock_irqrestore(&phba->hbalock, iflag); return rc; } /** * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware * @phba: Pointer to HBA context object. * @mboxq: Pointer to mailbox object. * @flag: Flag indicating how the mailbox need to be processed. * * This function is called by discovery code and HBA management code to submit * a mailbox command to firmware with SLI-4 interface spec. * * Return codes the caller owns the mailbox command after the return of the * function. **/ static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, uint32_t flag) { struct lpfc_sli *psli = &phba->sli; unsigned long iflags; int rc; /* dump from issue mailbox command if setup */ lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); rc = lpfc_mbox_dev_check(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2544 Mailbox command x%x (x%x/x%x) " "cannot issue Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), psli->sli_flag, flag); goto out_not_finished; } /* Detect polling mode and jump to a handler */ if (!phba->sli4_hba.intr_enable) { if (flag == MBX_POLL) rc = lpfc_sli4_post_sync_mbox(phba, mboxq); else rc = -EIO; if (rc != MBX_SUCCESS) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "(%d):2541 Mailbox command x%x " "(x%x/x%x) failure: " "mqe_sta: x%x mcqe_sta: x%x/x%x " "Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), bf_get(lpfc_mqe_status, &mboxq->u.mqe), bf_get(lpfc_mcqe_status, &mboxq->mcqe), bf_get(lpfc_mcqe_ext_status, &mboxq->mcqe), psli->sli_flag, flag); return rc; } else if (flag == MBX_POLL) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "(%d):2542 Try to issue mailbox command " "x%x (x%x/x%x) synchronously ahead of async " "mailbox command queue: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), psli->sli_flag, flag); /* Try to block the asynchronous mailbox posting */ rc = lpfc_sli4_async_mbox_block(phba); if (!rc) { /* Successfully blocked, now issue sync mbox cmd */ rc = lpfc_sli4_post_sync_mbox(phba, mboxq); if (rc != MBX_SUCCESS) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "(%d):2597 Sync Mailbox command " "x%x (x%x/x%x) failure: " "mqe_sta: x%x mcqe_sta: x%x/x%x " "Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), bf_get(lpfc_mqe_status, &mboxq->u.mqe), bf_get(lpfc_mcqe_status, &mboxq->mcqe), bf_get(lpfc_mcqe_ext_status, &mboxq->mcqe), psli->sli_flag, flag); /* Unblock the async mailbox posting afterward */ lpfc_sli4_async_mbox_unblock(phba); } return rc; } /* Now, interrupt mode asynchronous mailbox command */ rc = lpfc_mbox_cmd_check(phba, mboxq); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2543 Mailbox command x%x (x%x/x%x) " "cannot issue Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), psli->sli_flag, flag); goto out_not_finished; } /* Put the mailbox command to the driver internal FIFO */ psli->slistat.mbox_busy++; spin_lock_irqsave(&phba->hbalock, iflags); lpfc_mbox_put(phba, mboxq); spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0354 Mbox cmd issue - Enqueue Data: " "x%x (x%x/x%x) x%x x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0xffffff, bf_get(lpfc_mqe_command, &mboxq->u.mqe), lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), phba->pport->port_state, psli->sli_flag, MBX_NOWAIT); /* Wake up worker thread to transport mailbox command from head */ lpfc_worker_wake_up(phba); return MBX_BUSY; out_not_finished: return MBX_NOT_FINISHED; } /** * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device * @phba: Pointer to HBA context object. * * This function is called by worker thread to send a mailbox command to * SLI4 HBA firmware. * **/ int lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; LPFC_MBOXQ_t *mboxq; int rc = MBX_SUCCESS; unsigned long iflags; struct lpfc_mqe *mqe; uint32_t mbx_cmnd; /* Check interrupt mode before post async mailbox command */ if (unlikely(!phba->sli4_hba.intr_enable)) return MBX_NOT_FINISHED; /* Check for mailbox command service token */ spin_lock_irqsave(&phba->hbalock, iflags); if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { spin_unlock_irqrestore(&phba->hbalock, iflags); return MBX_NOT_FINISHED; } if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { spin_unlock_irqrestore(&phba->hbalock, iflags); return MBX_NOT_FINISHED; } if (unlikely(phba->sli.mbox_active)) { spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0384 There is pending active mailbox cmd\n"); return MBX_NOT_FINISHED; } /* Take the mailbox command service token */ psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; /* Get the next mailbox command from head of queue */ mboxq = lpfc_mbox_get(phba); /* If no more mailbox command waiting for post, we're done */ if (!mboxq) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(&phba->hbalock, iflags); return MBX_SUCCESS; } phba->sli.mbox_active = mboxq; spin_unlock_irqrestore(&phba->hbalock, iflags); /* Check device readiness for posting mailbox command */ rc = lpfc_mbox_dev_check(phba); if (unlikely(rc)) /* Driver clean routine will clean up pending mailbox */ goto out_not_finished; /* Prepare the mbox command to be posted */ mqe = &mboxq->u.mqe; mbx_cmnd = bf_get(lpfc_mqe_command, mqe); /* Start timer for the mbox_tmo and log some mailbox post messages */ mod_timer(&psli->mbox_tmo, (jiffies + msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " "x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), phba->pport->port_state, psli->sli_flag); if (mbx_cmnd != MBX_HEARTBEAT) { if (mboxq->vport) { lpfc_debugfs_disc_trc(mboxq->vport, LPFC_DISC_TRC_MBOX_VPORT, "MBOX Send vport: cmd:x%x mb:x%x x%x", mbx_cmnd, mqe->un.mb_words[0], mqe->un.mb_words[1]); } else { lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_MBOX, "MBOX Send: cmd:x%x mb:x%x x%x", mbx_cmnd, mqe->un.mb_words[0], mqe->un.mb_words[1]); } } psli->slistat.mbox_cmd++; /* Post the mailbox command to the port */ rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2533 Mailbox command x%x (x%x/x%x) " "cannot issue Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, mboxq), lpfc_sli_config_mbox_opcode_get(phba, mboxq), psli->sli_flag, MBX_NOWAIT); goto out_not_finished; } return rc; out_not_finished: spin_lock_irqsave(&phba->hbalock, iflags); if (phba->sli.mbox_active) { mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; __lpfc_mbox_cmpl_put(phba, mboxq); /* Release the token */ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; phba->sli.mbox_active = NULL; } spin_unlock_irqrestore(&phba->hbalock, iflags); return MBX_NOT_FINISHED; } /** * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command * @phba: Pointer to HBA context object. * @pmbox: Pointer to mailbox object. * @flag: Flag indicating how the mailbox need to be processed. * * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from * the API jump table function pointer from the lpfc_hba struct. * * Return codes the caller owns the mailbox command after the return of the * function. **/ int lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) { return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); } /** * lpfc_mbox_api_table_setup - Set up mbox api function jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. * * This routine sets up the mbox interface API function jump table in @phba * struct. * Returns: 0 - success, -ENODEV - failure. **/ int lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { switch (dev_grp) { case LPFC_PCI_DEV_LP: phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; phba->lpfc_sli_handle_slow_ring_event = lpfc_sli_handle_slow_ring_event_s3; phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; break; case LPFC_PCI_DEV_OC: phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; phba->lpfc_sli_handle_slow_ring_event = lpfc_sli_handle_slow_ring_event_s4; phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1420 Invalid HBA PCI-device group: 0x%x\n", dev_grp); return -ENODEV; } return 0; } /** * __lpfc_sli_ringtx_put - Add an iocb to the txq * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @piocb: Pointer to address of newly added command iocb. * * This function is called with hbalock held for SLI3 ports or * the ring lock held for SLI4 ports to add a command * iocb to the txq when SLI layer cannot submit the command iocb * to the ring. **/ void __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb) { if (phba->sli_rev == LPFC_SLI_REV4) lockdep_assert_held(&pring->ring_lock); else lockdep_assert_held(&phba->hbalock); /* Insert the caller's iocb in the txq tail for later processing. */ list_add_tail(&piocb->list, &pring->txq); } /** * lpfc_sli_next_iocb - Get the next iocb in the txq * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @piocb: Pointer to address of newly added command iocb. * * This function is called with hbalock held before a new * iocb is submitted to the firmware. This function checks * txq to flush the iocbs in txq to Firmware before * submitting new iocbs to the Firmware. * If there are iocbs in the txq which need to be submitted * to firmware, lpfc_sli_next_iocb returns the first element * of the txq after dequeuing it from txq. * If there is no iocb in the txq then the function will return * *piocb and *piocb is set to NULL. Caller needs to check * *piocb to find if there are more commands in the txq. **/ static struct lpfc_iocbq * lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq **piocb) { struct lpfc_iocbq * nextiocb; lockdep_assert_held(&phba->hbalock); nextiocb = lpfc_sli_ringtx_get(phba, pring); if (!nextiocb) { nextiocb = *piocb; *piocb = NULL; } return nextiocb; } /** * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb * @phba: Pointer to HBA context object. * @ring_number: SLI ring number to issue iocb on. * @piocb: Pointer to command iocb. * @flag: Flag indicating if this command can be put into txq. * * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT * flag is turned on, the function returns IOCB_ERROR. When the link is down, * this function allows only iocbs for posting buffers. This function finds * next available slot in the command ring and posts the command to the * available slot and writes the port attention register to request HBA start * processing new iocb. If there is no slot available in the ring and * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise * the function returns IOCB_BUSY. * * This function is called with hbalock held. The function will return success * after it successfully submit the iocb to firmware or after adding to the * txq. **/ static int __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_iocbq *nextiocb; IOCB_t *iocb; struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; lockdep_assert_held(&phba->hbalock); if (piocb->cmd_cmpl && (!piocb->vport) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1807 IOCB x%x failed. No vport\n", piocb->iocb.ulpCommand); dump_stack(); return IOCB_ERROR; } /* If the PCI channel is in offline state, do not post iocbs. */ if (unlikely(pci_channel_offline(phba->pcidev))) return IOCB_ERROR; /* If HBA has a deferred error attention, fail the iocb. */ if (unlikely(phba->hba_flag & DEFER_ERATT)) return IOCB_ERROR; /* * We should never get an IOCB if we are in a < LINK_DOWN state */ if (unlikely(phba->link_state < LPFC_LINK_DOWN)) return IOCB_ERROR; /* * Check to see if we are blocking IOCB processing because of a * outstanding event. */ if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) goto iocb_busy; if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { /* * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF * can be issued if the link is not up. */ switch (piocb->iocb.ulpCommand) { case CMD_QUE_RING_BUF_CN: case CMD_QUE_RING_BUF64_CN: /* * For IOCBs, like QUE_RING_BUF, that have no rsp ring * completion, cmd_cmpl MUST be 0. */ if (piocb->cmd_cmpl) piocb->cmd_cmpl = NULL; fallthrough; case CMD_CREATE_XRI_CR: case CMD_CLOSE_XRI_CN: case CMD_CLOSE_XRI_CX: break; default: goto iocb_busy; } /* * For FCP commands, we must be in a state where we can process link * attention events. */ } else if (unlikely(pring->ringno == LPFC_FCP_RING && !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { goto iocb_busy; } while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); if (iocb) lpfc_sli_update_ring(phba, pring); else lpfc_sli_update_full_ring(phba, pring); if (!piocb) return IOCB_SUCCESS; goto out_busy; iocb_busy: pring->stats.iocb_cmd_delay++; out_busy: if (!(flag & SLI_IOCB_RET_IOCB)) { __lpfc_sli_ringtx_put(phba, pring, piocb); return IOCB_SUCCESS; } return IOCB_BUSY; } /** * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb * @phba: Pointer to HBA context object. * @ring_number: SLI ring number to issue wqe on. * @piocb: Pointer to command iocb. * @flag: Flag indicating if this command can be put into txq. * * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to * send an iocb command to an HBA with SLI-3 interface spec. * * This function takes the hbalock before invoking the lockless version. * The function will return success after it successfully submit the wqe to * firmware or after adding to the txq. **/ static int __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { unsigned long iflags; int rc; spin_lock_irqsave(&phba->hbalock, iflags); rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag); spin_unlock_irqrestore(&phba->hbalock, iflags); return rc; } /** * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe * @phba: Pointer to HBA context object. * @ring_number: SLI ring number to issue wqe on. * @piocb: Pointer to command iocb. * @flag: Flag indicating if this command can be put into txq. * * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue * an wqe command to an HBA with SLI-4 interface spec. * * This function is a lockless version. The function will return success * after it successfully submit the wqe to firmware or after adding to the * txq. **/ static int __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_io_buf *lpfc_cmd = piocb->io_buf; lpfc_prep_embed_io(phba, lpfc_cmd); return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb); } void lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq; union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe; struct sli4_sge *sgl; /* 128 byte wqe support here */ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; if (phba->fcp_embed_io) { struct fcp_cmnd *fcp_cmnd; u32 *ptr; fcp_cmnd = lpfc_cmd->fcp_cmnd; /* Word 0-2 - FCP_CMND */ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED; wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; wqe->generic.bde.addrHigh = 0; wqe->generic.bde.addrLow = 88; /* Word 22 */ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); /* Word 22-29 FCP CMND Payload */ ptr = &wqe->words[22]; memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); } else { /* Word 0-2 - Inline BDE */ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd); wqe->generic.bde.addrHigh = sgl->addr_hi; wqe->generic.bde.addrLow = sgl->addr_lo; /* Word 10 */ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); } /* add the VMID tags as per switch response */ if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) { if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) { bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, (piocb->vmid_tag.cs_ctl_vmid)); } else if (phba->cfg_vmid_app_header) { bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1); bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); wqe->words[31] = piocb->vmid_tag.app_id; } } } /** * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb * @phba: Pointer to HBA context object. * @ring_number: SLI ring number to issue iocb on. * @piocb: Pointer to command iocb. * @flag: Flag indicating if this command can be put into txq. * * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue * an iocb command to an HBA with SLI-4 interface spec. * * This function is called with ringlock held. The function will return success * after it successfully submit the iocb to firmware or after adding to the * txq. **/ static int __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_sglq *sglq; union lpfc_wqe128 *wqe; struct lpfc_queue *wq; struct lpfc_sli_ring *pring; u32 ulp_command = get_job_cmnd(phba, piocb); /* Get the WQ */ if ((piocb->cmd_flag & LPFC_IO_FCP) || (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) { wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq; } else { wq = phba->sli4_hba.els_wq; } /* Get corresponding ring */ pring = wq->pring; /* * The WQE can be either 64 or 128 bytes, */ lockdep_assert_held(&pring->ring_lock); wqe = &piocb->wqe; if (piocb->sli4_xritag == NO_XRI) { if (ulp_command == CMD_ABORT_XRI_CX) sglq = NULL; else { sglq = __lpfc_sli_get_els_sglq(phba, piocb); if (!sglq) { if (!(flag & SLI_IOCB_RET_IOCB)) { __lpfc_sli_ringtx_put(phba, pring, piocb); return IOCB_SUCCESS; } else { return IOCB_BUSY; } } } } else if (piocb->cmd_flag & LPFC_IO_FCP) { /* These IO's already have an XRI and a mapped sgl. */ sglq = NULL; } else { /* * This is a continuation of a commandi,(CX) so this * sglq is on the active list */ sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); if (!sglq) return IOCB_ERROR; } if (sglq) { piocb->sli4_lxritag = sglq->sli4_lxritag; piocb->sli4_xritag = sglq->sli4_xritag; /* ABTS sent by initiator to CT exchange, the * RX_ID field will be filled with the newly * allocated responder XRI. */ if (ulp_command == CMD_XMIT_BLS_RSP64_CX && piocb->abort_bls == LPFC_ABTS_UNSOL_INT) bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, piocb->sli4_xritag); bf_set(wqe_xri_tag, &wqe->generic.wqe_com, piocb->sli4_xritag); if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI) return IOCB_ERROR; } if (lpfc_sli4_wq_put(wq, wqe)) return IOCB_ERROR; lpfc_sli_ringtxcmpl_put(phba, pring, piocb); return 0; } /* * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o * * This routine wraps the actual fcp i/o function for issusing WQE for sli-4 * or IOCB for sli-3 function. * pointer from the lpfc_hba struct. * * Return codes: * IOCB_ERROR - Error * IOCB_SUCCESS - Success * IOCB_BUSY - Busy **/ int lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag); } /* * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb * * This routine wraps the actual lockless version for issusing IOCB function * pointer from the lpfc_hba struct. * * Return codes: * IOCB_ERROR - Error * IOCB_SUCCESS - Success * IOCB_BUSY - Busy **/ int __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); } static void __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, u32 elscmd, u8 tmo, u8 expect_rsp) { struct lpfc_hba *phba = vport->phba; IOCB_t *cmd; cmd = &cmdiocbq->iocb; memset(cmd, 0, sizeof(*cmd)); cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys); cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; if (expect_rsp) { cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); cmd->un.elsreq64.remoteID = did; /* DID */ cmd->ulpCommand = CMD_ELS_REQUEST64_CR; cmd->ulpTimeout = tmo; } else { cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64); cmd->un.genreq64.xmit_els_remoteID = did; /* DID */ cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; cmd->ulpPU = PARM_NPIV_DID; } cmd->ulpBdeCount = 1; cmd->ulpLe = 1; cmd->ulpClass = CLASS3; /* If we have NPIV enabled, we want to send ELS traffic by VPI. */ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { if (expect_rsp) { cmd->un.elsreq64.myID = vport->fc_myDID; /* For ELS_REQUEST64_CR, use the VPI by default */ cmd->ulpContext = phba->vpi_ids[vport->vpi]; } cmd->ulpCt_h = 0; /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ if (elscmd == ELS_CMD_ECHO) cmd->ulpCt_l = 0; /* context = invalid RPI */ else cmd->ulpCt_l = 1; /* context = VPI */ } } static void __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, u32 elscmd, u8 tmo, u8 expect_rsp) { struct lpfc_hba *phba = vport->phba; union lpfc_wqe128 *wqe; struct ulp_bde64_le *bde; u8 els_id; wqe = &cmdiocbq->wqe; memset(wqe, 0, sizeof(*wqe)); /* Word 0 - 2 BDE */ bde = (struct ulp_bde64_le *)&wqe->generic.bde; bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys)); bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys)); bde->type_size = cpu_to_le32(cmd_size); bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); if (expect_rsp) { bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE); /* Transfer length */ wqe->els_req.payload_len = cmd_size; wqe->els_req.max_response_payload_len = FCELSSIZE; /* DID */ bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did); /* Word 11 - ELS_ID */ switch (elscmd) { case ELS_CMD_PLOGI: els_id = LPFC_ELS_ID_PLOGI; break; case ELS_CMD_FLOGI: els_id = LPFC_ELS_ID_FLOGI; break; case ELS_CMD_LOGO: els_id = LPFC_ELS_ID_LOGO; break; case ELS_CMD_FDISC: if (!vport->fc_myDID) { els_id = LPFC_ELS_ID_FDISC; break; } fallthrough; default: els_id = LPFC_ELS_ID_DEFAULT; break; } bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); } else { /* DID */ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did); /* Transfer length */ wqe->xmit_els_rsp.response_payload_len = cmd_size; bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com, CMD_XMIT_ELS_RSP64_WQE); } bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo); bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag); bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); /* If we have NPIV enabled, we want to send ELS traffic by VPI. * For SLI4, since the driver controls VPIs we also want to include * all ELS pt2pt protocol traffic as well. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || (vport->fc_flag & FC_PT2PT)) { if (expect_rsp) { bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID); /* For ELS_REQUEST64_WQE, use the VPI by default */ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, phba->vpi_ids[vport->vpi]); } /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ if (elscmd == ELS_CMD_ECHO) bf_set(wqe_ct, &wqe->generic.wqe_com, 0); else bf_set(wqe_ct, &wqe->generic.wqe_com, 1); } } void lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, u32 elscmd, u8 tmo, u8 expect_rsp) { phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did, elscmd, tmo, expect_rsp); } static void __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo) { IOCB_t *cmd; cmd = &cmdiocbq->iocb; memset(cmd, 0, sizeof(*cmd)); cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64); cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); cmd->ulpContext = rpi; cmd->ulpClass = CLASS3; cmd->ulpCommand = CMD_GEN_REQUEST64_CR; cmd->ulpBdeCount = 1; cmd->ulpLe = 1; cmd->ulpOwner = OWN_CHIP; cmd->ulpTimeout = tmo; } static void __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo) { union lpfc_wqe128 *cmdwqe; struct ulp_bde64_le *bde, *bpl; u32 xmit_len = 0, total_len = 0, size, type, i; cmdwqe = &cmdiocbq->wqe; memset(cmdwqe, 0, sizeof(*cmdwqe)); /* Calculate total_len and xmit_len */ bpl = (struct ulp_bde64_le *)bmp->virt; for (i = 0; i < num_entry; i++) { size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK; total_len += size; } for (i = 0; i < num_entry; i++) { size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK; type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK; if (type != ULP_BDE64_TYPE_BDE_64) break; xmit_len += size; } /* Words 0 - 2 */ bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde; bde->addr_low = bpl->addr_low; bde->addr_high = bpl->addr_high; bde->type_size = cpu_to_le32(xmit_len); bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); /* Word 3 */ cmdwqe->gen_req.request_payload_len = xmit_len; /* Word 5 */ bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT); bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL); bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1); bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1); /* Word 6 */ bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi); /* Word 7 */ bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo); bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3); bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR); bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI); /* Word 12 */ cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len; } void lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo) { phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo); } static void __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd) { IOCB_t *icmd; icmd = &cmdiocbq->iocb; memset(icmd, 0, sizeof(*icmd)); icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); icmd->un.xseq64.w5.hcsw.Fctl = LA; if (last_seq) icmd->un.xseq64.w5.hcsw.Fctl |= LS; icmd->un.xseq64.w5.hcsw.Dfctl = 0; icmd->un.xseq64.w5.hcsw.Rctl = rctl; icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; icmd->ulpBdeCount = 1; icmd->ulpLe = 1; icmd->ulpClass = CLASS3; switch (cr_cx_cmd) { case CMD_XMIT_SEQUENCE64_CR: icmd->ulpContext = rpi; icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; break; case CMD_XMIT_SEQUENCE64_CX: icmd->ulpContext = ox_id; icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; break; default: break; } } static void __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd) { union lpfc_wqe128 *wqe; struct ulp_bde64 *bpl; wqe = &cmdiocbq->wqe; memset(wqe, 0, sizeof(*wqe)); /* Words 0 - 2 */ bpl = (struct ulp_bde64 *)bmp->virt; wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh; wqe->xmit_sequence.bde.addrLow = bpl->addrLow; wqe->xmit_sequence.bde.tus.w = bpl->tus.w; /* Word 5 */ bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq); bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1); bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl); bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT); /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi); bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, CMD_XMIT_SEQUENCE64_WQE); /* Word 7 */ bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); /* Word 9 */ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id); /* Word 12 */ if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) wqe->xmit_sequence.xmit_len = full_size; else wqe->xmit_sequence.xmit_len = wqe->xmit_sequence.bde.tus.f.bdeSize; } void lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd) { phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry, rctl, last_seq, cr_cx_cmd); } static void __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid, bool ia, bool wqec) { IOCB_t *icmd = NULL; icmd = &cmdiocbq->iocb; memset(icmd, 0, sizeof(*icmd)); /* Word 5 */ icmd->un.acxri.abortContextTag = ulp_context; icmd->un.acxri.abortIoTag = iotag; if (ia) { /* Word 7 */ icmd->ulpCommand = CMD_CLOSE_XRI_CN; } else { /* Word 3 */ icmd->un.acxri.abortType = ABORT_TYPE_ABTS; /* Word 7 */ icmd->ulpClass = ulp_class; icmd->ulpCommand = CMD_ABORT_XRI_CN; } /* Word 7 */ icmd->ulpLe = 1; } static void __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid, bool ia, bool wqec) { union lpfc_wqe128 *wqe; wqe = &cmdiocbq->wqe; memset(wqe, 0, sizeof(*wqe)); /* Word 3 */ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); if (ia) bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); else bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); /* Word 7 */ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE); /* Word 8 */ wqe->abort_cmd.wqe_com.abort_tag = ulp_context; /* Word 9 */ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag); /* Word 10 */ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); /* Word 11 */ if (wqec) bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1); bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid); bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND); } void lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid, bool ia, bool wqec) { phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class, cqid, ia, wqec); } /** * lpfc_sli_api_table_setup - Set up sli api function jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. * * This routine sets up the SLI interface API function jump table in @phba * struct. * Returns: 0 - success, -ENODEV - failure. **/ int lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { switch (dev_grp) { case LPFC_PCI_DEV_LP: phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3; phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3; phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3; phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3; phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3; break; case LPFC_PCI_DEV_OC: phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4; phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4; phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4; phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4; phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1419 Invalid HBA PCI-device group: 0x%x\n", dev_grp); return -ENODEV; } return 0; } /** * lpfc_sli4_calc_ring - Calculates which ring to use * @phba: Pointer to HBA context object. * @piocb: Pointer to command iocb. * * For SLI4 only, FCP IO can deferred to one fo many WQs, based on * hba_wqidx, thus we need to calculate the corresponding ring. * Since ABORTS must go on the same WQ of the command they are * aborting, we use command's hba_wqidx. */ struct lpfc_sli_ring * lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) { struct lpfc_io_buf *lpfc_cmd; if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { if (unlikely(!phba->sli4_hba.hdwq)) return NULL; /* * for abort iocb hba_wqidx should already * be setup based on what work queue we used. */ if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) { lpfc_cmd = piocb->io_buf; piocb->hba_wqidx = lpfc_cmd->hdwq_no; } return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring; } else { if (unlikely(!phba->sli4_hba.els_wq)) return NULL; piocb->hba_wqidx = 0; return phba->sli4_hba.els_wq->pring; } } inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq) { struct lpfc_hba *phba = eq->phba; /* * Unlocking an irq is one of the entry point to check * for re-schedule, but we are good for io submission * path as midlayer does a get_cpu to glue us in. Flush * out the invalidate queue so we can see the updated * value for flag. */ smp_rmb(); if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) /* We will not likely get the completion for the caller * during this iteration but i guess that's fine. * Future io's coming on this eq should be able to * pick it up. As for the case of single io's, they * will be handled through a sched from polling timer * function which is currently triggered every 1msec. */ lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM, LPFC_QUEUE_WORK); } /** * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb * @phba: Pointer to HBA context object. * @ring_number: Ring number * @piocb: Pointer to command iocb. * @flag: Flag indicating if this command can be put into txq. * * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb * function. This function gets the hbalock and calls * __lpfc_sli_issue_iocb function and will return the error returned * by __lpfc_sli_issue_iocb function. This wrapper is used by * functions which do not hold hbalock. **/ int lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_sli_ring *pring; struct lpfc_queue *eq; unsigned long iflags; int rc; /* If the PCI channel is in offline state, do not post iocbs. */ if (unlikely(pci_channel_offline(phba->pcidev))) return IOCB_ERROR; if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_sli_prep_wqe(phba, piocb); eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq; pring = lpfc_sli4_calc_ring(phba, piocb); if (unlikely(pring == NULL)) return IOCB_ERROR; spin_lock_irqsave(&pring->ring_lock, iflags); rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_sli4_poll_eq(eq); } else { /* For now, SLI2/3 will still use hbalock */ spin_lock_irqsave(&phba->hbalock, iflags); rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); spin_unlock_irqrestore(&phba->hbalock, iflags); } return rc; } /** * lpfc_extra_ring_setup - Extra ring setup function * @phba: Pointer to HBA context object. * * This function is called while driver attaches with the * HBA to setup the extra ring. The extra ring is used * only when driver needs to support target mode functionality * or IP over FC functionalities. * * This function is called with no lock held. SLI3 only. **/ static int lpfc_extra_ring_setup( struct lpfc_hba *phba) { struct lpfc_sli *psli; struct lpfc_sli_ring *pring; psli = &phba->sli; /* Adjust cmd/rsp ring iocb entries more evenly */ /* Take some away from the FCP ring */ pring = &psli->sli3_ring[LPFC_FCP_RING]; pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; /* and give them to the extra ring */ pring = &psli->sli3_ring[LPFC_EXTRA_RING]; pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; /* Setup default profile for this ring */ pring->iotag_max = 4096; pring->num_mask = 1; pring->prt[0].profile = 0; /* Mask 0 */ pring->prt[0].rctl = phba->cfg_multi_ring_rctl; pring->prt[0].type = phba->cfg_multi_ring_type; pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; return 0; } static void lpfc_sli_post_recovery_event(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { unsigned long iflags; struct lpfc_work_evt *evtp = &ndlp->recovery_evt; spin_lock_irqsave(&phba->hbalock, iflags); if (!list_empty(&evtp->evt_listp)) { spin_unlock_irqrestore(&phba->hbalock, iflags); return; } /* Incrementing the reference count until the queued work is done. */ evtp->evt_arg1 = lpfc_nlp_get(ndlp); if (!evtp->evt_arg1) { spin_unlock_irqrestore(&phba->hbalock, iflags); return; } evtp->evt = LPFC_EVT_RECOVER_PORT; list_add_tail(&evtp->evt_listp, &phba->work_list); spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_worker_wake_up(phba); } /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. * @phba: Pointer to HBA context object. * @iocbq: Pointer to iocb object. * * The async_event handler calls this routine when it receives * an ASYNC_STATUS_CN event from the port. The port generates * this event when an Abort Sequence request to an rport fails * twice in succession. The abort could be originated by the * driver or by the port. The ABTS could have been for an ELS * or FCP IO. The port only generates this event when an ABTS * fails to complete after one retry. */ static void lpfc_sli_abts_err_handler(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { struct lpfc_nodelist *ndlp = NULL; uint16_t rpi = 0, vpi = 0; struct lpfc_vport *vport = NULL; /* The rpi in the ulpContext is vport-sensitive. */ vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; rpi = iocbq->iocb.ulpContext; lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3092 Port generated ABTS async event " "on vpi %d rpi %d status 0x%x\n", vpi, rpi, iocbq->iocb.ulpStatus); vport = lpfc_find_vport_by_vpid(phba, vpi); if (!vport) goto err_exit; ndlp = lpfc_findnode_rpi(vport, rpi); if (!ndlp) goto err_exit; if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) lpfc_sli_abts_recover_port(vport, ndlp); return; err_exit: lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3095 Event Context not found, no " "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", vpi, rpi, iocbq->iocb.ulpStatus, iocbq->iocb.ulpContext); } /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. * @phba: pointer to HBA context object. * @ndlp: nodelist pointer for the impacted rport. * @axri: pointer to the wcqe containing the failed exchange. * * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the * port. The port generates this event when an abort exchange request to an * rport fails twice in succession with no reply. The abort could be originated * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. */ void lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, struct sli4_wcqe_xri_aborted *axri) { uint32_t ext_status = 0; if (!ndlp) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3115 Node Context not found, driver " "ignoring abts err event\n"); return; } lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3116 Port generated FCP XRI ABORT event on " "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], bf_get(lpfc_wcqe_xa_xri, axri), bf_get(lpfc_wcqe_xa_status, axri), axri->parameter); /* * Catch the ABTS protocol failure case. Older OCe FW releases returned * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. */ ext_status = axri->parameter & IOERR_PARAM_MASK; if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) lpfc_sli_post_recovery_event(phba, ndlp); } /** * lpfc_sli_async_event_handler - ASYNC iocb handler function * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @iocbq: Pointer to iocb object. * * This function is called by the slow ring event handler * function when there is an ASYNC event iocb in the ring. * This function is called with no lock held. * Currently this function handles only temperature related * ASYNC events. The function decodes the temperature sensor * event message and posts events for the management applications. **/ static void lpfc_sli_async_event_handler(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) { IOCB_t *icmd; uint16_t evt_code; struct temp_event temp_event_data; struct Scsi_Host *shost; uint32_t *iocb_w; icmd = &iocbq->iocb; evt_code = icmd->un.asyncstat.evt_code; switch (evt_code) { case ASYNC_TEMP_WARN: case ASYNC_TEMP_SAFE: temp_event_data.data = (uint32_t) icmd->ulpContext; temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; if (evt_code == ASYNC_TEMP_WARN) { temp_event_data.event_code = LPFC_THRESHOLD_TEMP; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0347 Adapter is very hot, please take " "corrective action. temperature : %d Celsius\n", (uint32_t) icmd->ulpContext); } else { temp_event_data.event_code = LPFC_NORMAL_TEMP; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0340 Adapter temperature is OK now. " "temperature : %d Celsius\n", (uint32_t) icmd->ulpContext); } /* Send temperature change event to applications */ shost = lpfc_shost_from_vport(phba->pport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(temp_event_data), (char *) &temp_event_data, LPFC_NL_VENDOR_ID); break; case ASYNC_STATUS_CN: lpfc_sli_abts_err_handler(phba, iocbq); break; default: iocb_w = (uint32_t *) icmd; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0346 Ring %d handler: unexpected ASYNC_STATUS" " evt_code 0x%x\n" "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", pring->ringno, icmd->un.asyncstat.evt_code, iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); break; } } /** * lpfc_sli4_setup - SLI ring setup function * @phba: Pointer to HBA context object. * * lpfc_sli_setup sets up rings of the SLI interface with * number of iocbs per ring and iotags. This function is * called while driver attach to the HBA and before the * interrupts are enabled. So there is no need for locking. * * This function always returns 0. **/ int lpfc_sli4_setup(struct lpfc_hba *phba) { struct lpfc_sli_ring *pring; pring = phba->sli4_hba.els_wq->pring; pring->num_mask = LPFC_MAX_RING_MASK; pring->prt[0].profile = 0; /* Mask 0 */ pring->prt[0].rctl = FC_RCTL_ELS_REQ; pring->prt[0].type = FC_TYPE_ELS; pring->prt[0].lpfc_sli_rcv_unsol_event = lpfc_els_unsol_event; pring->prt[1].profile = 0; /* Mask 1 */ pring->prt[1].rctl = FC_RCTL_ELS_REP; pring->prt[1].type = FC_TYPE_ELS; pring->prt[1].lpfc_sli_rcv_unsol_event = lpfc_els_unsol_event; pring->prt[2].profile = 0; /* Mask 2 */ /* NameServer Inquiry */ pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; /* NameServer */ pring->prt[2].type = FC_TYPE_CT; pring->prt[2].lpfc_sli_rcv_unsol_event = lpfc_ct_unsol_event; pring->prt[3].profile = 0; /* Mask 3 */ /* NameServer response */ pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; /* NameServer */ pring->prt[3].type = FC_TYPE_CT; pring->prt[3].lpfc_sli_rcv_unsol_event = lpfc_ct_unsol_event; return 0; } /** * lpfc_sli_setup - SLI ring setup function * @phba: Pointer to HBA context object. * * lpfc_sli_setup sets up rings of the SLI interface with * number of iocbs per ring and iotags. This function is * called while driver attach to the HBA and before the * interrupts are enabled. So there is no need for locking. * * This function always returns 0. SLI3 only. **/ int lpfc_sli_setup(struct lpfc_hba *phba) { int i, totiocbsize = 0; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; psli->sli_flag = 0; psli->iocbq_lookup = NULL; psli->iocbq_lookup_len = 0; psli->last_iotag = 0; for (i = 0; i < psli->num_rings; i++) { pring = &psli->sli3_ring[i]; switch (i) { case LPFC_FCP_RING: /* ring 0 - FCP */ /* numCiocb and numRiocb are used in config_port */ pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? SLI3_IOCB_CMD_SIZE : SLI2_IOCB_CMD_SIZE; pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? SLI3_IOCB_RSP_SIZE : SLI2_IOCB_RSP_SIZE; pring->iotag_ctr = 0; pring->iotag_max = (phba->cfg_hba_queue_depth * 2); pring->fast_iotag = pring->iotag_max; pring->num_mask = 0; break; case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ /* numCiocb and numRiocb are used in config_port */ pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? SLI3_IOCB_CMD_SIZE : SLI2_IOCB_CMD_SIZE; pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? SLI3_IOCB_RSP_SIZE : SLI2_IOCB_RSP_SIZE; pring->iotag_max = phba->cfg_hba_queue_depth; pring->num_mask = 0; break; case LPFC_ELS_RING: /* ring 2 - ELS / CT */ /* numCiocb and numRiocb are used in config_port */ pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? SLI3_IOCB_CMD_SIZE : SLI2_IOCB_CMD_SIZE; pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? SLI3_IOCB_RSP_SIZE : SLI2_IOCB_RSP_SIZE; pring->fast_iotag = 0; pring->iotag_ctr = 0; pring->iotag_max = 4096; pring->lpfc_sli_rcv_async_status = lpfc_sli_async_event_handler; pring->num_mask = LPFC_MAX_RING_MASK; pring->prt[0].profile = 0; /* Mask 0 */ pring->prt[0].rctl = FC_RCTL_ELS_REQ; pring->prt[0].type = FC_TYPE_ELS; pring->prt[0].lpfc_sli_rcv_unsol_event = lpfc_els_unsol_event; pring->prt[1].profile = 0; /* Mask 1 */ pring->prt[1].rctl = FC_RCTL_ELS_REP; pring->prt[1].type = FC_TYPE_ELS; pring->prt[1].lpfc_sli_rcv_unsol_event = lpfc_els_unsol_event; pring->prt[2].profile = 0; /* Mask 2 */ /* NameServer Inquiry */ pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; /* NameServer */ pring->prt[2].type = FC_TYPE_CT; pring->prt[2].lpfc_sli_rcv_unsol_event = lpfc_ct_unsol_event; pring->prt[3].profile = 0; /* Mask 3 */ /* NameServer response */ pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; /* NameServer */ pring->prt[3].type = FC_TYPE_CT; pring->prt[3].lpfc_sli_rcv_unsol_event = lpfc_ct_unsol_event; break; } totiocbsize += (pring->sli.sli3.numCiocb * pring->sli.sli3.sizeCiocb) + (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); } if (totiocbsize > MAX_SLIM_IOCB_SIZE) { /* Too many cmd / rsp ring entries in SLI2 SLIM */ printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " "SLI2 SLIM Data: x%x x%lx\n", phba->brd_no, totiocbsize, (unsigned long) MAX_SLIM_IOCB_SIZE); } if (phba->cfg_multi_ring_support == 2) lpfc_extra_ring_setup(phba); return 0; } /** * lpfc_sli4_queue_init - Queue initialization function * @phba: Pointer to HBA context object. * * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each * ring. This function also initializes ring indices of each ring. * This function is called during the initialization of the SLI * interface of an HBA. * This function is called with no lock held and always returns * 1. **/ void lpfc_sli4_queue_init(struct lpfc_hba *phba) { struct lpfc_sli *psli; struct lpfc_sli_ring *pring; int i; psli = &phba->sli; spin_lock_irq(&phba->hbalock); INIT_LIST_HEAD(&psli->mboxq); INIT_LIST_HEAD(&psli->mboxq_cmpl); /* Initialize list headers for txq and txcmplq as double linked lists */ for (i = 0; i < phba->cfg_hdw_queue; i++) { pring = phba->sli4_hba.hdwq[i].io_wq->pring; pring->flag = 0; pring->ringno = LPFC_FCP_RING; pring->txcmplq_cnt = 0; INIT_LIST_HEAD(&pring->txq); INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); spin_lock_init(&pring->ring_lock); } pring = phba->sli4_hba.els_wq->pring; pring->flag = 0; pring->ringno = LPFC_ELS_RING; pring->txcmplq_cnt = 0; INIT_LIST_HEAD(&pring->txq); INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); spin_lock_init(&pring->ring_lock); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { pring = phba->sli4_hba.nvmels_wq->pring; pring->flag = 0; pring->ringno = LPFC_ELS_RING; pring->txcmplq_cnt = 0; INIT_LIST_HEAD(&pring->txq); INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); spin_lock_init(&pring->ring_lock); } spin_unlock_irq(&phba->hbalock); } /** * lpfc_sli_queue_init - Queue initialization function * @phba: Pointer to HBA context object. * * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each * ring. This function also initializes ring indices of each ring. * This function is called during the initialization of the SLI * interface of an HBA. * This function is called with no lock held and always returns * 1. **/ void lpfc_sli_queue_init(struct lpfc_hba *phba) { struct lpfc_sli *psli; struct lpfc_sli_ring *pring; int i; psli = &phba->sli; spin_lock_irq(&phba->hbalock); INIT_LIST_HEAD(&psli->mboxq); INIT_LIST_HEAD(&psli->mboxq_cmpl); /* Initialize list headers for txq and txcmplq as double linked lists */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->sli3_ring[i]; pring->ringno = i; pring->sli.sli3.next_cmdidx = 0; pring->sli.sli3.local_getidx = 0; pring->sli.sli3.cmdidx = 0; INIT_LIST_HEAD(&pring->iocb_continueq); INIT_LIST_HEAD(&pring->iocb_continue_saveq); INIT_LIST_HEAD(&pring->postbufq); pring->flag = 0; INIT_LIST_HEAD(&pring->txq); INIT_LIST_HEAD(&pring->txcmplq); spin_lock_init(&pring->ring_lock); } spin_unlock_irq(&phba->hbalock); } /** * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system * @phba: Pointer to HBA context object. * * This routine flushes the mailbox command subsystem. It will unconditionally * flush all the mailbox commands in the three possible stages in the mailbox * command sub-system: pending mailbox command queue; the outstanding mailbox * command; and completed mailbox command queue. It is caller's responsibility * to make sure that the driver is in the proper state to flush the mailbox * command sub-system. Namely, the posting of mailbox commands into the * pending mailbox command queue from the various clients must be stopped; * either the HBA is in a state that it will never works on the outstanding * mailbox command (such as in EEH or ERATT conditions) or the outstanding * mailbox command has been completed. **/ static void lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) { LIST_HEAD(completions); struct lpfc_sli *psli = &phba->sli; LPFC_MBOXQ_t *pmb; unsigned long iflag; /* Disable softirqs, including timers from obtaining phba->hbalock */ local_bh_disable(); /* Flush all the mailbox commands in the mbox system */ spin_lock_irqsave(&phba->hbalock, iflag); /* The pending mailbox command queue */ list_splice_init(&phba->sli.mboxq, &completions); /* The outstanding active mailbox command */ if (psli->mbox_active) { list_add_tail(&psli->mbox_active->list, &completions); psli->mbox_active = NULL; psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; } /* The completed mailbox command queue */ list_splice_init(&phba->sli.mboxq_cmpl, &completions); spin_unlock_irqrestore(&phba->hbalock, iflag); /* Enable softirqs again, done with phba->hbalock */ local_bh_enable(); /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ while (!list_empty(&completions)) { list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; if (pmb->mbox_cmpl) pmb->mbox_cmpl(phba, pmb); } } /** * lpfc_sli_host_down - Vport cleanup function * @vport: Pointer to virtual port object. * * lpfc_sli_host_down is called to clean up the resources * associated with a vport before destroying virtual * port data structures. * This function does following operations: * - Free discovery resources associated with this virtual * port. * - Free iocbs associated with this virtual port in * the txq. * - Send abort for all iocb commands associated with this * vport in txcmplq. * * This function is called with no lock held and always returns 1. **/ int lpfc_sli_host_down(struct lpfc_vport *vport) { LIST_HEAD(completions); struct lpfc_hba *phba = vport->phba; struct lpfc_sli *psli = &phba->sli; struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; int i; unsigned long flags = 0; uint16_t prev_pring_flag; lpfc_cleanup_discovery_resources(vport); spin_lock_irqsave(&phba->hbalock, flags); /* * Error everything on the txq since these iocbs * have not been given to the FW yet. * Also issue ABTS for everything on the txcmplq */ if (phba->sli_rev != LPFC_SLI_REV4) { for (i = 0; i < psli->num_rings; i++) { pring = &psli->sli3_ring[i]; prev_pring_flag = pring->flag; /* Only slow rings */ if (pring->ringno == LPFC_ELS_RING) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Set the lpfc data pending flag */ set_bit(LPFC_DATA_READY, &phba->data_flags); } list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { if (iocb->vport != vport) continue; list_move_tail(&iocb->list, &completions); } list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { if (iocb->vport != vport) continue; lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); } pring->flag = prev_pring_flag; } } else { list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { pring = qp->pring; if (!pring) continue; if (pring == phba->sli4_hba.els_wq->pring) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Set the lpfc data pending flag */ set_bit(LPFC_DATA_READY, &phba->data_flags); } prev_pring_flag = pring->flag; spin_lock(&pring->ring_lock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { if (iocb->vport != vport) continue; list_move_tail(&iocb->list, &completions); } spin_unlock(&pring->ring_lock); list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { if (iocb->vport != vport) continue; lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); } pring->flag = prev_pring_flag; } } spin_unlock_irqrestore(&phba->hbalock, flags); /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_DOWN); return 1; } /** * lpfc_sli_hba_down - Resource cleanup function for the HBA * @phba: Pointer to HBA context object. * * This function cleans up all iocb, buffers, mailbox commands * while shutting down the HBA. This function is called with no * lock held and always returns 1. * This function does the following to cleanup driver resources: * - Free discovery resources for each virtual port * - Cleanup any pending fabric iocbs * - Iterate through the iocb txq and free each entry * in the list. * - Free up any buffer posted to the HBA * - Free mailbox commands in the mailbox queue. **/ int lpfc_sli_hba_down(struct lpfc_hba *phba) { LIST_HEAD(completions); struct lpfc_sli *psli = &phba->sli; struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *buf_ptr; unsigned long flags = 0; int i; /* Shutdown the mailbox command sub-system */ lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); lpfc_hba_down_prep(phba); /* Disable softirqs, including timers from obtaining phba->hbalock */ local_bh_disable(); lpfc_fabric_abort_hba(phba); spin_lock_irqsave(&phba->hbalock, flags); /* * Error everything on the txq since these iocbs * have not been given to the FW yet. */ if (phba->sli_rev != LPFC_SLI_REV4) { for (i = 0; i < psli->num_rings; i++) { pring = &psli->sli3_ring[i]; /* Only slow rings */ if (pring->ringno == LPFC_ELS_RING) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Set the lpfc data pending flag */ set_bit(LPFC_DATA_READY, &phba->data_flags); } list_splice_init(&pring->txq, &completions); } } else { list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { pring = qp->pring; if (!pring) continue; spin_lock(&pring->ring_lock); list_splice_init(&pring->txq, &completions); spin_unlock(&pring->ring_lock); if (pring == phba->sli4_hba.els_wq->pring) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Set the lpfc data pending flag */ set_bit(LPFC_DATA_READY, &phba->data_flags); } } } spin_unlock_irqrestore(&phba->hbalock, flags); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_DOWN); spin_lock_irqsave(&phba->hbalock, flags); list_splice_init(&phba->elsbuf, &completions); phba->elsbuf_cnt = 0; phba->elsbuf_prev_cnt = 0; spin_unlock_irqrestore(&phba->hbalock, flags); while (!list_empty(&completions)) { list_remove_head(&completions, buf_ptr, struct lpfc_dmabuf, list); lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); } /* Enable softirqs again, done with phba->hbalock */ local_bh_enable(); /* Return any active mbox cmds */ del_timer_sync(&psli->mbox_tmo); spin_lock_irqsave(&phba->pport->work_port_lock, flags); phba->pport->work_port_events &= ~WORKER_MBOX_TMO; spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); return 1; } /** * lpfc_sli_pcimem_bcopy - SLI memory copy function * @srcp: Source memory pointer. * @destp: Destination memory pointer. * @cnt: Number of words required to be copied. * * This function is used for copying data between driver memory * and the SLI memory. This function also changes the endianness * of each word if native endianness is different from SLI * endianness. This function can be called with or without * lock. **/ void lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) { uint32_t *src = srcp; uint32_t *dest = destp; uint32_t ldata; int i; for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { ldata = *src; ldata = le32_to_cpu(ldata); *dest = ldata; src++; dest++; } } /** * lpfc_sli_bemem_bcopy - SLI memory copy function * @srcp: Source memory pointer. * @destp: Destination memory pointer. * @cnt: Number of words required to be copied. * * This function is used for copying data between a data structure * with big endian representation to local endianness. * This function can be called with or without lock. **/ void lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) { uint32_t *src = srcp; uint32_t *dest = destp; uint32_t ldata; int i; for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { ldata = *src; ldata = be32_to_cpu(ldata); *dest = ldata; src++; dest++; } } /** * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @mp: Pointer to driver buffer object. * * This function is called with no lock held. * It always return zero after adding the buffer to the postbufq * buffer list. **/ int lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_dmabuf *mp) { /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up later */ spin_lock_irq(&phba->hbalock); list_add_tail(&mp->list, &pring->postbufq); pring->postbufq_cnt++; spin_unlock_irq(&phba->hbalock); return 0; } /** * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer * @phba: Pointer to HBA context object. * * When HBQ is enabled, buffers are searched based on tags. This function * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag * does not conflict with tags of buffer posted for unsolicited events. * The function returns the allocated tag. The function is called with * no locks held. **/ uint32_t lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) { spin_lock_irq(&phba->hbalock); phba->buffer_tag_count++; /* * Always set the QUE_BUFTAG_BIT to distiguish between * a tag assigned by HBQ. */ phba->buffer_tag_count |= QUE_BUFTAG_BIT; spin_unlock_irq(&phba->hbalock); return phba->buffer_tag_count; } /** * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @tag: Buffer tag. * * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX * iocb is posted to the response ring with the tag of the buffer. * This function searches the pring->postbufq list using the tag * to find buffer associated with CMD_IOCB_RET_XRI64_CX * iocb. If the buffer is found then lpfc_dmabuf object of the * buffer is returned to the caller else NULL is returned. * This function is called with no lock held. **/ struct lpfc_dmabuf * lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t tag) { struct lpfc_dmabuf *mp, *next_mp; struct list_head *slp = &pring->postbufq; /* Search postbufq, from the beginning, looking for a match on tag */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { if (mp->buffer_tag == tag) { list_del_init(&mp->list); pring->postbufq_cnt--; spin_unlock_irq(&phba->hbalock); return mp; } } spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0402 Cannot find virtual addr for buffer tag on " "ring %d Data x%lx x%px x%px x%x\n", pring->ringno, (unsigned long) tag, slp->next, slp->prev, pring->postbufq_cnt); return NULL; } /** * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @phys: DMA address of the buffer. * * This function searches the buffer list using the dma_address * of unsolicited event to find the driver's lpfc_dmabuf object * corresponding to the dma_address. The function returns the * lpfc_dmabuf object if a buffer is found else it returns NULL. * This function is called by the ct and els unsolicited event * handlers to get the buffer associated with the unsolicited * event. * * This function is called with no lock held. **/ struct lpfc_dmabuf * lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, dma_addr_t phys) { struct lpfc_dmabuf *mp, *next_mp; struct list_head *slp = &pring->postbufq; /* Search postbufq, from the beginning, looking for a match on phys */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { if (mp->phys == phys) { list_del_init(&mp->list); pring->postbufq_cnt--; spin_unlock_irq(&phba->hbalock); return mp; } } spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0410 Cannot find virtual addr for mapped buf on " "ring %d Data x%llx x%px x%px x%x\n", pring->ringno, (unsigned long long)phys, slp->next, slp->prev, pring->postbufq_cnt); return NULL; } /** * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs * @phba: Pointer to HBA context object. * @cmdiocb: Pointer to driver command iocb object. * @rspiocb: Pointer to driver response iocb object. * * This function is the completion handler for the abort iocbs for * ELS commands. This function is called from the ELS ring event * handler with no lock held. This function frees memory resources * associated with the abort iocb. **/ static void lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); u8 cmnd = get_job_cmnd(phba, cmdiocb); if (ulp_status) { /* * Assume that the port already completed and returned, or * will return the iocb. Just Log the message. */ if (phba->sli_rev < LPFC_SLI_REV4) { if (cmnd == CMD_ABORT_XRI_CX && ulp_status == IOSTAT_LOCAL_REJECT && ulp_word4 == IOERR_ABORT_REQUESTED) { goto release_iocb; } } lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, "0327 Cannot abort els iocb x%px " "with io cmd xri %x abort tag : x%x, " "abort status %x abort code %x\n", cmdiocb, get_job_abtsiotag(phba, cmdiocb), (phba->sli_rev == LPFC_SLI_REV4) ? get_wqe_reqtag(cmdiocb) : cmdiocb->iocb.un.acxri.abortContextTag, ulp_status, ulp_word4); } release_iocb: lpfc_sli_release_iocbq(phba, cmdiocb); return; } /** * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command * @phba: Pointer to HBA context object. * @cmdiocb: Pointer to driver command iocb object. * @rspiocb: Pointer to driver response iocb object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for ELS commands * which are aborted. The function frees memory resources used for * the aborted ELS commands. **/ void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_nodelist *ndlp = cmdiocb->ndlp; IOCB_t *irsp; LPFC_MBOXQ_t *mbox; u32 ulp_command, ulp_status, ulp_word4, iotag; ulp_command = get_job_cmnd(phba, cmdiocb); ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); if (phba->sli_rev == LPFC_SLI_REV4) { iotag = get_wqe_reqtag(cmdiocb); } else { irsp = &rspiocb->iocb; iotag = irsp->ulpIoTag; /* It is possible a PLOGI_RJT for NPIV ports to get aborted. * The MBX_REG_LOGIN64 mbox command is freed back to the * mbox_mem_pool here. */ if (cmdiocb->context_un.mbox) { mbox = cmdiocb->context_un.mbox; lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); cmdiocb->context_un.mbox = NULL; } } /* ELS cmd tag <ulpIoTag> completes */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "0139 Ignoring ELS cmd code x%x completion Data: " "x%x x%x x%x x%px\n", ulp_command, ulp_status, ulp_word4, iotag, cmdiocb->ndlp); /* * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp * if exchange is busy. */ if (ulp_command == CMD_GEN_REQUEST64_CR) lpfc_ct_free_iocb(phba, cmdiocb); else lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } /** * lpfc_sli_issue_abort_iotag - Abort function for a command iocb * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @cmdiocb: Pointer to driver command iocb object. * @cmpl: completion function. * * This function issues an abort iocb for the provided command iocb. In case * of unloading, the abort iocb will not be issued to commands on the ELS * ring. Instead, the callback function shall be changed to those commands * so that nothing happens when them finishes. This function is called with * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS * when the command iocb is an abort request. * **/ int lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *cmdiocb, void *cmpl) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_iocbq *abtsiocbp; int retval = IOCB_ERROR; unsigned long iflags; struct lpfc_nodelist *ndlp = NULL; u32 ulp_command = get_job_cmnd(phba, cmdiocb); u16 ulp_context, iotag; bool ia; /* * There are certain command types we don't want to abort. And we * don't want to abort commands that are already in the process of * being aborted. */ if (ulp_command == CMD_ABORT_XRI_WQE || ulp_command == CMD_ABORT_XRI_CN || ulp_command == CMD_CLOSE_XRI_CN || cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED) return IOCB_ABORTING; if (!pring) { if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl; else cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl; return retval; } /* * If we're unloading, don't abort iocb on the ELS ring, but change * the callback so that nothing happens when it finishes. */ if ((vport->load_flag & FC_UNLOADING) && pring->ringno == LPFC_ELS_RING) { if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl; else cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl; return retval; } /* issue ABTS for this IOCB based on iotag */ abtsiocbp = __lpfc_sli_get_iocbq(phba); if (abtsiocbp == NULL) return IOCB_NORESOURCE; /* This signals the response to set the correct status * before calling the completion handler */ cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED; if (phba->sli_rev == LPFC_SLI_REV4) { ulp_context = cmdiocb->sli4_xritag; iotag = abtsiocbp->iotag; } else { iotag = cmdiocb->iocb.ulpIoTag; if (pring->ringno == LPFC_ELS_RING) { ndlp = cmdiocb->ndlp; ulp_context = ndlp->nlp_rpi; } else { ulp_context = cmdiocb->iocb.ulpContext; } } if (phba->link_state < LPFC_LINK_UP || (phba->sli_rev == LPFC_SLI_REV4 && phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) || (phba->link_flag & LS_EXTERNAL_LOOPBACK)) ia = true; else ia = false; lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag, cmdiocb->iocb.ulpClass, LPFC_WQE_CQ_ID_DEFAULT, ia, false); abtsiocbp->vport = vport; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; if (cmdiocb->cmd_flag & LPFC_IO_FCP) abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX); if (cmdiocb->cmd_flag & LPFC_IO_FOF) abtsiocbp->cmd_flag |= LPFC_IO_FOF; if (cmpl) abtsiocbp->cmd_cmpl = cmpl; else abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl; abtsiocbp->vport = vport; if (phba->sli_rev == LPFC_SLI_REV4) { pring = lpfc_sli4_calc_ring(phba, abtsiocbp); if (unlikely(pring == NULL)) goto abort_iotag_exit; /* Note: both hbalock and ring_lock need to be set here */ spin_lock_irqsave(&pring->ring_lock, iflags); retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); spin_unlock_irqrestore(&pring->ring_lock, iflags); } else { retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); } abort_iotag_exit: lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "0339 Abort IO XRI x%x, Original iotag x%x, " "abort tag x%x Cmdjob : x%px Abortjob : x%px " "retval x%x\n", ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ? cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp, retval); if (retval) { cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; __lpfc_sli_release_iocbq(phba, abtsiocbp); } /* * Caller to this routine should check for IOCB_ERROR * and handle it properly. This routine no longer removes * iocb off txcmplq and call compl in case of IOCB_ERROR. */ return retval; } /** * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. * @phba: pointer to lpfc HBA data structure. * * This routine will abort all pending and outstanding iocbs to an HBA. **/ void lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; struct lpfc_queue *qp = NULL; int i; if (phba->sli_rev != LPFC_SLI_REV4) { for (i = 0; i < psli->num_rings; i++) { pring = &psli->sli3_ring[i]; lpfc_sli_abort_iocb_ring(phba, pring); } return; } list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { pring = qp->pring; if (!pring) continue; lpfc_sli_abort_iocb_ring(phba, pring); } } /** * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts * @iocbq: Pointer to iocb object. * @vport: Pointer to driver virtual port object. * * This function acts as an iocb filter for functions which abort FCP iocbs. * * Return values * -ENODEV, if a null iocb or vport ptr is encountered * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as * driver already started the abort process, or is an abort iocb itself * 0, passes criteria for aborting the FCP I/O iocb **/ static int lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport) { u8 ulp_command; /* No null ptr vports */ if (!iocbq || iocbq->vport != vport) return -ENODEV; /* iocb must be for FCP IO, already exists on the TX cmpl queue, * can't be premarked as driver aborted, nor be an ABORT iocb itself */ ulp_command = get_job_cmnd(vport->phba, iocbq); if (!(iocbq->cmd_flag & LPFC_IO_FCP) || !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) || (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) || (ulp_command == CMD_ABORT_XRI_CN || ulp_command == CMD_CLOSE_XRI_CN || ulp_command == CMD_ABORT_XRI_WQE)) return -EINVAL; return 0; } /** * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target * @iocbq: Pointer to driver iocb object. * @vport: Pointer to driver virtual port object. * @tgt_id: SCSI ID of the target. * @lun_id: LUN ID of the scsi device. * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST * * This function acts as an iocb filter for validating a lun/SCSI target/SCSI * host. * * It will return * 0 if the filtering criteria is met for the given iocb and will return * 1 if the filtering criteria is not met. * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the * given iocb is for the SCSI device specified by vport, tgt_id and * lun_id parameter. * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the * given iocb is for the SCSI target specified by vport and tgt_id * parameters. * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the * given iocb is for the SCSI host associated with the given vport. * This function is called with no locks held. **/ static int lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) { struct lpfc_io_buf *lpfc_cmd; int rc = 1; lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); if (lpfc_cmd->pCmd == NULL) return rc; switch (ctx_cmd) { case LPFC_CTX_LUN: if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) rc = 0; break; case LPFC_CTX_TGT: if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) rc = 0; break; case LPFC_CTX_HOST: rc = 0; break; default: printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", __func__, ctx_cmd); break; } return rc; } /** * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending * @vport: Pointer to virtual port. * @tgt_id: SCSI ID of the target. * @lun_id: LUN ID of the scsi device. * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. * * This function returns number of FCP commands pending for the vport. * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP * commands pending on the vport associated with SCSI device specified * by tgt_id and lun_id parameters. * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP * commands pending on the vport associated with SCSI target specified * by tgt_id parameter. * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP * commands pending on the vport. * This function returns the number of iocbs which satisfy the filter. * This function is called without any lock held. **/ int lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *iocbq; int sum, i; unsigned long iflags; u8 ulp_command; spin_lock_irqsave(&phba->hbalock, iflags); for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; if (!iocbq || iocbq->vport != vport) continue; if (!(iocbq->cmd_flag & LPFC_IO_FCP) || !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) continue; /* Include counting outstanding aborts */ ulp_command = get_job_cmnd(phba, iocbq); if (ulp_command == CMD_ABORT_XRI_CN || ulp_command == CMD_CLOSE_XRI_CN || ulp_command == CMD_ABORT_XRI_WQE) { sum++; continue; } if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, ctx_cmd) == 0) sum++; } spin_unlock_irqrestore(&phba->hbalock, iflags); return sum; } /** * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs * @phba: Pointer to HBA context object * @cmdiocb: Pointer to command iocb object. * @rspiocb: Pointer to response iocb object. * * This function is called when an aborted FCP iocb completes. This * function is called by the ring event handler with no lock held. * This function frees the iocb. **/ void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3096 ABORT_XRI_CX completing on rpi x%x " "original iotag x%x, abort cmd iotag x%x " "status 0x%x, reason 0x%x\n", (phba->sli_rev == LPFC_SLI_REV4) ? cmdiocb->sli4_xritag : cmdiocb->iocb.un.acxri.abortContextTag, get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb), get_job_word4(phba, rspiocb)); lpfc_sli_release_iocbq(phba, cmdiocb); return; } /** * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN * @vport: Pointer to virtual port. * @tgt_id: SCSI ID of the target. * @lun_id: LUN ID of the scsi device. * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. * * This function sends an abort command for every SCSI command * associated with the given virtual port pending on the ring * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then * lpfc_sli_validate_fcp_iocb function. The ordering for validation before * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort * followed by lpfc_sli_validate_fcp_iocb. * * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the * FCP iocbs associated with lun specified by tgt_id and lun_id * parameters * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the * FCP iocbs associated with SCSI target specified by tgt_id parameter. * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all * FCP iocbs associated with virtual port. * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4 * lpfc_sli4_calc_ring is used. * This function returns number of iocbs it failed to abort. * This function is called with no locks held. **/ int lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id, lpfc_ctx_cmd abort_cmd) { struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ring *pring = NULL; struct lpfc_iocbq *iocbq; int errcnt = 0, ret_val = 0; unsigned long iflags; int i; /* all I/Os are in process of being flushed */ if (phba->hba_flag & HBA_IOQ_FLUSH) return errcnt; for (i = 1; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport)) continue; if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, abort_cmd) != 0) continue; spin_lock_irqsave(&phba->hbalock, iflags); if (phba->sli_rev == LPFC_SLI_REV3) { pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; } else if (phba->sli_rev == LPFC_SLI_REV4) { pring = lpfc_sli4_calc_ring(phba, iocbq); } ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq, lpfc_sli_abort_fcp_cmpl); spin_unlock_irqrestore(&phba->hbalock, iflags); if (ret_val != IOCB_SUCCESS) errcnt++; } return errcnt; } /** * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN * @vport: Pointer to virtual port. * @pring: Pointer to driver SLI ring object. * @tgt_id: SCSI ID of the target. * @lun_id: LUN ID of the scsi device. * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. * * This function sends an abort command for every SCSI command * associated with the given virtual port pending on the ring * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then * lpfc_sli_validate_fcp_iocb function. The ordering for validation before * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort * followed by lpfc_sli_validate_fcp_iocb. * * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the * FCP iocbs associated with lun specified by tgt_id and lun_id * parameters * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the * FCP iocbs associated with SCSI target specified by tgt_id parameter. * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all * FCP iocbs associated with virtual port. * This function returns number of iocbs it aborted . * This function is called with no locks held right after a taskmgmt * command is sent. **/ int lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) { struct lpfc_hba *phba = vport->phba; struct lpfc_io_buf *lpfc_cmd; struct lpfc_iocbq *abtsiocbq; struct lpfc_nodelist *ndlp = NULL; struct lpfc_iocbq *iocbq; int sum, i, ret_val; unsigned long iflags; struct lpfc_sli_ring *pring_s4 = NULL; u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT; bool ia; spin_lock_irqsave(&phba->hbalock, iflags); /* all I/Os are in process of being flushed */ if (phba->hba_flag & HBA_IOQ_FLUSH) { spin_unlock_irqrestore(&phba->hbalock, iflags); return 0; } sum = 0; for (i = 1; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport)) continue; if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, cmd) != 0) continue; /* Guard against IO completion being called at same time */ lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); spin_lock(&lpfc_cmd->buf_lock); if (!lpfc_cmd->pCmd) { spin_unlock(&lpfc_cmd->buf_lock); continue; } if (phba->sli_rev == LPFC_SLI_REV4) { pring_s4 = phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring; if (!pring_s4) { spin_unlock(&lpfc_cmd->buf_lock); continue; } /* Note: both hbalock and ring_lock must be set here */ spin_lock(&pring_s4->ring_lock); } /* * If the iocbq is already being aborted, don't take a second * action, but do count it. */ if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) || !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring_s4->ring_lock); spin_unlock(&lpfc_cmd->buf_lock); continue; } /* issue ABTS for this IOCB based on iotag */ abtsiocbq = __lpfc_sli_get_iocbq(phba); if (!abtsiocbq) { if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring_s4->ring_lock); spin_unlock(&lpfc_cmd->buf_lock); continue; } if (phba->sli_rev == LPFC_SLI_REV4) { iotag = abtsiocbq->iotag; ulp_context = iocbq->sli4_xritag; cqid = lpfc_cmd->hdwq->io_cq_map; } else { iotag = iocbq->iocb.ulpIoTag; if (pring->ringno == LPFC_ELS_RING) { ndlp = iocbq->ndlp; ulp_context = ndlp->nlp_rpi; } else { ulp_context = iocbq->iocb.ulpContext; } } ndlp = lpfc_cmd->rdata->pnode; if (lpfc_is_link_up(phba) && (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) && !(phba->link_flag & LS_EXTERNAL_LOOPBACK)) ia = false; else ia = true; lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag, iocbq->iocb.ulpClass, cqid, ia, false); abtsiocbq->vport = vport; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abtsiocbq->hba_wqidx = iocbq->hba_wqidx; if (iocbq->cmd_flag & LPFC_IO_FCP) abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX; if (iocbq->cmd_flag & LPFC_IO_FOF) abtsiocbq->cmd_flag |= LPFC_IO_FOF; /* Setup callback routine and issue the command. */ abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl; /* * Indicate the IO is being aborted by the driver and set * the caller's flag into the aborted IO. */ iocbq->cmd_flag |= LPFC_DRIVER_ABORTED; if (phba->sli_rev == LPFC_SLI_REV4) { ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, abtsiocbq, 0); spin_unlock(&pring_s4->ring_lock); } else { ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbq, 0); } spin_unlock(&lpfc_cmd->buf_lock); if (ret_val == IOCB_ERROR) __lpfc_sli_release_iocbq(phba, abtsiocbq); else sum++; } spin_unlock_irqrestore(&phba->hbalock, iflags); return sum; } /** * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler * @phba: Pointer to HBA context object. * @cmdiocbq: Pointer to command iocb. * @rspiocbq: Pointer to response iocb. * * This function is the completion handler for iocbs issued using * lpfc_sli_issue_iocb_wait function. This function is called by the * ring event handler function without any lock held. This function * can be called from both worker thread context and interrupt * context. This function also can be called from other thread which * cleans up the SLI layer objects. * This function copy the contents of the response iocb to the * response iocb memory object provided by the caller of * lpfc_sli_issue_iocb_wait and then wakes up the thread which * sleeps for the iocb completion. **/ static void lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, struct lpfc_iocbq *rspiocbq) { wait_queue_head_t *pdone_q; unsigned long iflags; struct lpfc_io_buf *lpfc_cmd; size_t offset = offsetof(struct lpfc_iocbq, wqe); spin_lock_irqsave(&phba->hbalock, iflags); if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) { /* * A time out has occurred for the iocb. If a time out * completion handler has been supplied, call it. Otherwise, * just free the iocbq. */ spin_unlock_irqrestore(&phba->hbalock, iflags); cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl; cmdiocbq->wait_cmd_cmpl = NULL; if (cmdiocbq->cmd_cmpl) cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL); else lpfc_sli_release_iocbq(phba, cmdiocbq); return; } /* Copy the contents of the local rspiocb into the caller's buffer. */ cmdiocbq->cmd_flag |= LPFC_IO_WAKE; if (cmdiocbq->rsp_iocb && rspiocbq) memcpy((char *)cmdiocbq->rsp_iocb + offset, (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset); /* Set the exchange busy flag for task management commands */ if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) && !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, cur_iocbq); if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY)) lpfc_cmd->flags |= LPFC_SBUF_XBUSY; else lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; } pdone_q = cmdiocbq->context_un.wait_queue; if (pdone_q) wake_up(pdone_q); spin_unlock_irqrestore(&phba->hbalock, iflags); return; } /** * lpfc_chk_iocb_flg - Test IOCB flag with lock held. * @phba: Pointer to HBA context object.. * @piocbq: Pointer to command iocb. * @flag: Flag to test. * * This routine grabs the hbalock and then test the cmd_flag to * see if the passed in flag is set. * Returns: * 1 if flag is set. * 0 if flag is not set. **/ static int lpfc_chk_iocb_flg(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, uint32_t flag) { unsigned long iflags; int ret; spin_lock_irqsave(&phba->hbalock, iflags); ret = piocbq->cmd_flag & flag; spin_unlock_irqrestore(&phba->hbalock, iflags); return ret; } /** * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands * @phba: Pointer to HBA context object.. * @ring_number: Ring number * @piocb: Pointer to command iocb. * @prspiocbq: Pointer to response iocb. * @timeout: Timeout in number of seconds. * * This function issues the iocb to firmware and waits for the * iocb to complete. The cmd_cmpl field of the shall be used * to handle iocbs which time out. If the field is NULL, the * function shall free the iocbq structure. If more clean up is * needed, the caller is expected to provide a completion function * that will provide the needed clean up. If the iocb command is * not completed within timeout seconds, the function will either * free the iocbq structure (if cmd_cmpl == NULL) or execute the * completion function set in the cmd_cmpl field and then return * a status of IOCB_TIMEDOUT. The caller should not free the iocb * resources if this function returns IOCB_TIMEDOUT. * The function waits for the iocb completion using an * non-interruptible wait. * This function will sleep while waiting for iocb completion. * So, this function should not be called from any context which * does not allow sleeping. Due to the same reason, this function * cannot be called with interrupt disabled. * This function assumes that the iocb completions occur while * this function sleep. So, this function cannot be called from * the thread which process iocb completion for this ring. * This function clears the cmd_flag of the iocb object before * issuing the iocb and the iocb completion handler sets this * flag and wakes this thread when the iocb completes. * The contents of the response iocb will be copied to prspiocbq * by the completion handler when the command completes. * This function returns IOCB_SUCCESS when success. * This function is called with no lock held. **/ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, struct lpfc_iocbq *prspiocbq, uint32_t timeout) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); long timeleft, timeout_req = 0; int retval = IOCB_SUCCESS; uint32_t creg_val; struct lpfc_iocbq *iocb; int txq_cnt = 0; int txcmplq_cnt = 0; struct lpfc_sli_ring *pring; unsigned long iflags; bool iocb_completed = true; if (phba->sli_rev >= LPFC_SLI_REV4) { lpfc_sli_prep_wqe(phba, piocb); pring = lpfc_sli4_calc_ring(phba, piocb); } else pring = &phba->sli.sli3_ring[ring_number]; /* * If the caller has provided a response iocbq buffer, then rsp_iocb * is NULL or its an error. */ if (prspiocbq) { if (piocb->rsp_iocb) return IOCB_ERROR; piocb->rsp_iocb = prspiocbq; } piocb->wait_cmd_cmpl = piocb->cmd_cmpl; piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait; piocb->context_un.wait_queue = &done_q; piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) return IOCB_ERROR; creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, SLI_IOCB_RET_IOCB); if (retval == IOCB_SUCCESS) { timeout_req = msecs_to_jiffies(timeout * 1000); timeleft = wait_event_timeout(done_q, lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); spin_lock_irqsave(&phba->hbalock, iflags); if (!(piocb->cmd_flag & LPFC_IO_WAKE)) { /* * IOCB timed out. Inform the wake iocb wait * completion function and set local status */ iocb_completed = false; piocb->cmd_flag |= LPFC_IO_WAKE_TMO; } spin_unlock_irqrestore(&phba->hbalock, iflags); if (iocb_completed) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0331 IOCB wake signaled\n"); /* Note: we are not indicating if the IOCB has a success * status or not - that's for the caller to check. * IOCB_SUCCESS means just that the command was sent and * completed. Not that it completed successfully. * */ } else if (timeleft == 0) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0338 IOCB wait timeout error - no " "wake response Data x%x\n", timeout); retval = IOCB_TIMEDOUT; } else { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0330 IOCB wake NOT set, " "Data x%x x%lx\n", timeout, (timeleft / jiffies)); retval = IOCB_TIMEDOUT; } } else if (retval == IOCB_BUSY) { if (phba->cfg_log_verbose & LOG_SLI) { list_for_each_entry(iocb, &pring->txq, list) { txq_cnt++; } list_for_each_entry(iocb, &pring->txcmplq, list) { txcmplq_cnt++; } lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", phba->iocb_cnt, txq_cnt, txcmplq_cnt); } return retval; } else { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0332 IOCB wait issue failed, Data x%x\n", retval); retval = IOCB_ERROR; } if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) return IOCB_ERROR; creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } if (prspiocbq) piocb->rsp_iocb = NULL; piocb->context_un.wait_queue = NULL; piocb->cmd_cmpl = NULL; return retval; } /** * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox * @phba: Pointer to HBA context object. * @pmboxq: Pointer to driver mailbox object. * @timeout: Timeout in number of seconds. * * This function issues the mailbox to firmware and waits for the * mailbox command to complete. If the mailbox command is not * completed within timeout seconds, it returns MBX_TIMEOUT. * The function waits for the mailbox completion using an * interruptible wait. If the thread is woken up due to a * signal, MBX_TIMEOUT error is returned to the caller. Caller * should not free the mailbox resources, if this function returns * MBX_TIMEOUT. * This function will sleep while waiting for mailbox completion. * So, this function should not be called from any context which * does not allow sleeping. Due to the same reason, this function * cannot be called with interrupt disabled. * This function assumes that the mailbox completion occurs while * this function sleep. So, this function cannot be called from * the worker thread which processes mailbox completion. * This function is called in the context of HBA management * applications. * This function returns MBX_SUCCESS when successful. * This function is called with no lock held. **/ int lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, uint32_t timeout) { struct completion mbox_done; int retval; unsigned long flag; pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; /* setup wake call as IOCB callback */ pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; /* setup context3 field to pass wait_queue pointer to wake function */ init_completion(&mbox_done); pmboxq->context3 = &mbox_done; /* now issue the command */ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); if (retval == MBX_BUSY || retval == MBX_SUCCESS) { wait_for_completion_timeout(&mbox_done, msecs_to_jiffies(timeout * 1000)); spin_lock_irqsave(&phba->hbalock, flag); pmboxq->context3 = NULL; /* * if LPFC_MBX_WAKE flag is set the mailbox is completed * else do not free the resources. */ if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { retval = MBX_SUCCESS; } else { retval = MBX_TIMEOUT; pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } spin_unlock_irqrestore(&phba->hbalock, flag); } return retval; } /** * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system * @phba: Pointer to HBA context. * @mbx_action: Mailbox shutdown options. * * This function is called to shutdown the driver's mailbox sub-system. * It first marks the mailbox sub-system is in a block state to prevent * the asynchronous mailbox command from issued off the pending mailbox * command queue. If the mailbox command sub-system shutdown is due to * HBA error conditions such as EEH or ERATT, this routine shall invoke * the mailbox sub-system flush routine to forcefully bring down the * mailbox sub-system. Otherwise, if it is due to normal condition (such * as with offline or HBA function reset), this routine will wait for the * outstanding mailbox command to complete before invoking the mailbox * sub-system flush routine to gracefully bring down mailbox sub-system. **/ void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) { struct lpfc_sli *psli = &phba->sli; unsigned long timeout; if (mbx_action == LPFC_MBX_NO_WAIT) { /* delay 100ms for port state */ msleep(100); lpfc_sli_mbox_sys_flush(phba); return; } timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; /* Disable softirqs, including timers from obtaining phba->hbalock */ local_bh_disable(); spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* Determine how long we might wait for the active mailbox * command to be gracefully completed by firmware. */ if (phba->sli.mbox_active) timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, phba->sli.mbox_active) * 1000) + jiffies; spin_unlock_irq(&phba->hbalock); /* Enable softirqs again, done with phba->hbalock */ local_bh_enable(); while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ msleep(2); if (time_after(jiffies, timeout)) /* Timeout, let the mailbox flush routine to * forcefully release active mailbox command */ break; } } else { spin_unlock_irq(&phba->hbalock); /* Enable softirqs again, done with phba->hbalock */ local_bh_enable(); } lpfc_sli_mbox_sys_flush(phba); } /** * lpfc_sli_eratt_read - read sli-3 error attention events * @phba: Pointer to HBA context. * * This function is called to read the SLI3 device error attention registers * for possible error attention events. The caller must hold the hostlock * with spin_lock_irq(). * * This function returns 1 when there is Error Attention in the Host Attention * Register and returns 0 otherwise. **/ static int lpfc_sli_eratt_read(struct lpfc_hba *phba) { uint32_t ha_copy; /* Read chip Host Attention (HA) register */ if (lpfc_readl(phba->HAregaddr, &ha_copy)) goto unplug_err; if (ha_copy & HA_ERATT) { /* Read host status register to retrieve error event */ if (lpfc_sli_read_hs(phba)) goto unplug_err; /* Check if there is a deferred error condition is active */ if ((HS_FFER1 & phba->work_hs) && ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { phba->hba_flag |= DEFER_ERATT; /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); } /* Set the driver HA work bitmap */ phba->work_ha |= HA_ERATT; /* Indicate polling handles this ERATT */ phba->hba_flag |= HBA_ERATT_HANDLED; return 1; } return 0; unplug_err: /* Set the driver HS work bitmap */ phba->work_hs |= UNPLUG_ERR; /* Set the driver HA work bitmap */ phba->work_ha |= HA_ERATT; /* Indicate polling handles this ERATT */ phba->hba_flag |= HBA_ERATT_HANDLED; return 1; } /** * lpfc_sli4_eratt_read - read sli-4 error attention events * @phba: Pointer to HBA context. * * This function is called to read the SLI4 device error attention registers * for possible error attention events. The caller must hold the hostlock * with spin_lock_irq(). * * This function returns 1 when there is Error Attention in the Host Attention * Register and returns 0 otherwise. **/ static int lpfc_sli4_eratt_read(struct lpfc_hba *phba) { uint32_t uerr_sta_hi, uerr_sta_lo; uint32_t if_type, portsmphr; struct lpfc_register portstat_reg; u32 logmask; /* * For now, use the SLI4 device internal unrecoverable error * registers for error attention. This can be changed later. */ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, &uerr_sta_lo) || lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, &uerr_sta_hi)) { phba->work_hs |= UNPLUG_ERR; phba->work_ha |= HA_ERATT; phba->hba_flag |= HBA_ERATT_HANDLED; return 1; } if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1423 HBA Unrecoverable error: " "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " "ue_mask_lo_reg=0x%x, " "ue_mask_hi_reg=0x%x\n", uerr_sta_lo, uerr_sta_hi, phba->sli4_hba.ue_mask_lo, phba->sli4_hba.ue_mask_hi); phba->work_status[0] = uerr_sta_lo; phba->work_status[1] = uerr_sta_hi; phba->work_ha |= HA_ERATT; phba->hba_flag |= HBA_ERATT_HANDLED; return 1; } break; case LPFC_SLI_INTF_IF_TYPE_2: case LPFC_SLI_INTF_IF_TYPE_6: if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0) || lpfc_readl(phba->sli4_hba.PSMPHRregaddr, &portsmphr)){ phba->work_hs |= UNPLUG_ERR; phba->work_ha |= HA_ERATT; phba->hba_flag |= HBA_ERATT_HANDLED; return 1; } if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { phba->work_status[0] = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); phba->work_status[1] = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); logmask = LOG_TRACE_EVENT; if (phba->work_status[0] == SLIPORT_ERR1_REG_ERR_CODE_2 && phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART) logmask = LOG_SLI; lpfc_printf_log(phba, KERN_ERR, logmask, "2885 Port Status Event: " "port status reg 0x%x, " "port smphr reg 0x%x, " "error 1=0x%x, error 2=0x%x\n", portstat_reg.word0, portsmphr, phba->work_status[0], phba->work_status[1]); phba->work_ha |= HA_ERATT; phba->hba_flag |= HBA_ERATT_HANDLED; return 1; } break; case LPFC_SLI_INTF_IF_TYPE_1: default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2886 HBA Error Attention on unsupported " "if type %d.", if_type); return 1; } return 0; } /** * lpfc_sli_check_eratt - check error attention events * @phba: Pointer to HBA context. * * This function is called from timer soft interrupt context to check HBA's * error attention register bit for error attention events. * * This function returns 1 when there is Error Attention in the Host Attention * Register and returns 0 otherwise. **/ int lpfc_sli_check_eratt(struct lpfc_hba *phba) { uint32_t ha_copy; /* If somebody is waiting to handle an eratt, don't process it * here. The brdkill function will do this. */ if (phba->link_flag & LS_IGNORE_ERATT) return 0; /* Check if interrupt handler handles this ERATT */ spin_lock_irq(&phba->hbalock); if (phba->hba_flag & HBA_ERATT_HANDLED) { /* Interrupt handler has handled ERATT */ spin_unlock_irq(&phba->hbalock); return 0; } /* * If there is deferred error attention, do not check for error * attention */ if (unlikely(phba->hba_flag & DEFER_ERATT)) { spin_unlock_irq(&phba->hbalock); return 0; } /* If PCI channel is offline, don't process it */ if (unlikely(pci_channel_offline(phba->pcidev))) { spin_unlock_irq(&phba->hbalock); return 0; } switch (phba->sli_rev) { case LPFC_SLI_REV2: case LPFC_SLI_REV3: /* Read chip Host Attention (HA) register */ ha_copy = lpfc_sli_eratt_read(phba); break; case LPFC_SLI_REV4: /* Read device Uncoverable Error (UERR) registers */ ha_copy = lpfc_sli4_eratt_read(phba); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0299 Invalid SLI revision (%d)\n", phba->sli_rev); ha_copy = 0; break; } spin_unlock_irq(&phba->hbalock); return ha_copy; } /** * lpfc_intr_state_check - Check device state for interrupt handling * @phba: Pointer to HBA context. * * This inline routine checks whether a device or its PCI slot is in a state * that the interrupt should be handled. * * This function returns 0 if the device or the PCI slot is in a state that * interrupt should be handled, otherwise -EIO. */ static inline int lpfc_intr_state_check(struct lpfc_hba *phba) { /* If the pci channel is offline, ignore all the interrupts */ if (unlikely(pci_channel_offline(phba->pcidev))) return -EIO; /* Update device level interrupt statistics */ phba->sli.slistat.sli_intr++; /* Ignore all interrupts during initialization. */ if (unlikely(phba->link_state < LPFC_LINK_DOWN)) return -EIO; return 0; } /** * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device * @irq: Interrupt number. * @dev_id: The device context pointer. * * This function is directly called from the PCI layer as an interrupt * service routine when device with SLI-3 interface spec is enabled with * MSI-X multi-message interrupt mode and there are slow-path events in * the HBA. However, when the device is enabled with either MSI or Pin-IRQ * interrupt mode, this function is called as part of the device-level * interrupt handler. When the PCI slot is in error recovery or the HBA * is undergoing initialization, the interrupt handler will not process * the interrupt. The link attention and ELS ring attention events are * handled by the worker thread. The interrupt handler signals the worker * thread and returns for these events. This function is called without * any lock held. It gets the hbalock to access and update SLI data * structures. * * This function returns IRQ_HANDLED when interrupt is handled else it * returns IRQ_NONE. **/ irqreturn_t lpfc_sli_sp_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; uint32_t ha_copy, hc_copy; uint32_t work_ha_copy; unsigned long status; unsigned long iflag; uint32_t control; MAILBOX_t *mbox, *pmbox; struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; struct lpfc_dmabuf *mp; LPFC_MBOXQ_t *pmb; int rc; /* * Get the driver's phba structure from the dev_id and * assume the HBA is not interrupting. */ phba = (struct lpfc_hba *)dev_id; if (unlikely(!phba)) return IRQ_NONE; /* * Stuff needs to be attented to when this function is invoked as an * individual interrupt handler in MSI-X multi-message interrupt mode */ if (phba->intr_type == MSIX) { /* Check device state for handling interrupt */ if (lpfc_intr_state_check(phba)) return IRQ_NONE; /* Need to read HA REG for slow-path events */ spin_lock_irqsave(&phba->hbalock, iflag); if (lpfc_readl(phba->HAregaddr, &ha_copy)) goto unplug_error; /* If somebody is waiting to handle an eratt don't process it * here. The brdkill function will do this. */ if (phba->link_flag & LS_IGNORE_ERATT) ha_copy &= ~HA_ERATT; /* Check the need for handling ERATT in interrupt handler */ if (ha_copy & HA_ERATT) { if (phba->hba_flag & HBA_ERATT_HANDLED) /* ERATT polling has handled ERATT */ ha_copy &= ~HA_ERATT; else /* Indicate interrupt handler handles ERATT */ phba->hba_flag |= HBA_ERATT_HANDLED; } /* * If there is deferred error attention, do not check for any * interrupt. */ if (unlikely(phba->hba_flag & DEFER_ERATT)) { spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_NONE; } /* Clear up only attention source related to slow-path */ if (lpfc_readl(phba->HCregaddr, &hc_copy)) goto unplug_error; writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), phba->HCregaddr); writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), phba->HAregaddr); writel(hc_copy, phba->HCregaddr); readl(phba->HAregaddr); /* flush */ spin_unlock_irqrestore(&phba->hbalock, iflag); } else ha_copy = phba->ha_copy; work_ha_copy = ha_copy & phba->work_ha_mask; if (work_ha_copy) { if (work_ha_copy & HA_LATT) { if (phba->sli.sli_flag & LPFC_PROCESS_LA) { /* * Turn off Link Attention interrupts * until CLEAR_LA done */ spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag &= ~LPFC_PROCESS_LA; if (lpfc_readl(phba->HCregaddr, &control)) goto unplug_error; control &= ~HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ spin_unlock_irqrestore(&phba->hbalock, iflag); } else work_ha_copy &= ~HA_LATT; } if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { /* * Turn off Slow Rings interrupts, LPFC_ELS_RING is * the only slow ring. */ status = (work_ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); status >>= (4*LPFC_ELS_RING); if (status & HA_RXMASK) { spin_lock_irqsave(&phba->hbalock, iflag); if (lpfc_readl(phba->HCregaddr, &control)) goto unplug_error; lpfc_debugfs_slow_ring_trc(phba, "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", control, status, (uint32_t)phba->sli.slistat.sli_intr); if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { lpfc_debugfs_slow_ring_trc(phba, "ISR Disable ring:" "pwork:x%x hawork:x%x wait:x%x", phba->work_ha, work_ha_copy, (uint32_t)((unsigned long) &phba->work_waitq)); control &= ~(HC_R0INT_ENA << LPFC_ELS_RING); writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } else { lpfc_debugfs_slow_ring_trc(phba, "ISR slow ring: pwork:" "x%x hawork:x%x wait:x%x", phba->work_ha, work_ha_copy, (uint32_t)((unsigned long) &phba->work_waitq)); } spin_unlock_irqrestore(&phba->hbalock, iflag); } } spin_lock_irqsave(&phba->hbalock, iflag); if (work_ha_copy & HA_ERATT) { if (lpfc_sli_read_hs(phba)) goto unplug_error; /* * Check if there is a deferred error condition * is active */ if ((HS_FFER1 & phba->work_hs) && ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { phba->hba_flag |= DEFER_ERATT; /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); } } if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { pmb = phba->sli.mbox_active; pmbox = &pmb->u.mb; mbox = phba->mbox; vport = pmb->vport; /* First check out the status word */ lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); if (pmbox->mbxOwner != OWN_HOST) { spin_unlock_irqrestore(&phba->hbalock, iflag); /* * Stray Mailbox Interrupt, mbxCommand <cmd> * mbxStatus <status> */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):0304 Stray Mailbox " "Interrupt mbxCommand x%x " "mbxStatus x%x\n", (vport ? vport->vpi : 0), pmbox->mbxCommand, pmbox->mbxStatus); /* clear mailbox attention bit */ work_ha_copy &= ~HA_MBATT; } else { phba->sli.mbox_active = NULL; spin_unlock_irqrestore(&phba->hbalock, iflag); phba->last_completion_time = jiffies; del_timer(&phba->sli.mbox_tmo); if (pmb->mbox_cmpl) { lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE); if (pmb->out_ext_byte_len && pmb->ctx_buf) lpfc_sli_pcimem_bcopy( phba->mbox_ext, pmb->ctx_buf, pmb->out_ext_byte_len); } if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, "MBOX dflt rpi: : " "status:x%x rpi:x%x", (uint32_t)pmbox->mbxStatus, pmbox->un.varWords[0], 0); if (!pmbox->mbxStatus) { mp = (struct lpfc_dmabuf *) (pmb->ctx_buf); ndlp = (struct lpfc_nodelist *) pmb->ctx_ndlp; /* Reg_LOGIN of dflt RPI was * successful. new lets get * rid of the RPI using the * same mbox buffer. */ lpfc_unreg_login(phba, vport->vpi, pmbox->un.varWords[0], pmb); pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; pmb->ctx_buf = mp; pmb->ctx_ndlp = ndlp; pmb->vport = vport; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc != MBX_BUSY) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0350 rc should have" "been MBX_BUSY\n"); if (rc != MBX_NOT_FINISHED) goto send_current_mbox; } } spin_lock_irqsave( &phba->pport->work_port_lock, iflag); phba->pport->work_port_events &= ~WORKER_MBOX_TMO; spin_unlock_irqrestore( &phba->pport->work_port_lock, iflag); /* Do NOT queue MBX_HEARTBEAT to the worker * thread for processing. */ if (pmbox->mbxCommand == MBX_HEARTBEAT) { /* Process mbox now */ phba->sli.mbox_active = NULL; phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; if (pmb->mbox_cmpl) pmb->mbox_cmpl(phba, pmb); } else { /* Queue to worker thread to process */ lpfc_mbox_cmpl_put(phba, pmb); } } } else spin_unlock_irqrestore(&phba->hbalock, iflag); if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active == NULL)) { send_current_mbox: /* Process next mailbox command if there is one */ do { rc = lpfc_sli_issue_mbox(phba, NULL, MBX_NOWAIT); } while (rc == MBX_NOT_FINISHED); if (rc != MBX_SUCCESS) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0349 rc should be " "MBX_SUCCESS\n"); } spin_lock_irqsave(&phba->hbalock, iflag); phba->work_ha |= work_ha_copy; spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_worker_wake_up(phba); } return IRQ_HANDLED; unplug_error: spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_HANDLED; } /* lpfc_sli_sp_intr_handler */ /** * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. * @irq: Interrupt number. * @dev_id: The device context pointer. * * This function is directly called from the PCI layer as an interrupt * service routine when device with SLI-3 interface spec is enabled with * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB * ring event in the HBA. However, when the device is enabled with either * MSI or Pin-IRQ interrupt mode, this function is called as part of the * device-level interrupt handler. When the PCI slot is in error recovery * or the HBA is undergoing initialization, the interrupt handler will not * process the interrupt. The SCSI FCP fast-path ring event are handled in * the intrrupt context. This function is called without any lock held. * It gets the hbalock to access and update SLI data structures. * * This function returns IRQ_HANDLED when interrupt is handled else it * returns IRQ_NONE. **/ irqreturn_t lpfc_sli_fp_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; uint32_t ha_copy; unsigned long status; unsigned long iflag; struct lpfc_sli_ring *pring; /* Get the driver's phba structure from the dev_id and * assume the HBA is not interrupting. */ phba = (struct lpfc_hba *) dev_id; if (unlikely(!phba)) return IRQ_NONE; /* * Stuff needs to be attented to when this function is invoked as an * individual interrupt handler in MSI-X multi-message interrupt mode */ if (phba->intr_type == MSIX) { /* Check device state for handling interrupt */ if (lpfc_intr_state_check(phba)) return IRQ_NONE; /* Need to read HA REG for FCP ring and other ring events */ if (lpfc_readl(phba->HAregaddr, &ha_copy)) return IRQ_HANDLED; /* Clear up only attention source related to fast-path */ spin_lock_irqsave(&phba->hbalock, iflag); /* * If there is deferred error attention, do not check for * any interrupt. */ if (unlikely(phba->hba_flag & DEFER_ERATT)) { spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_NONE; } writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), phba->HAregaddr); readl(phba->HAregaddr); /* flush */ spin_unlock_irqrestore(&phba->hbalock, iflag); } else ha_copy = phba->ha_copy; /* * Process all events on FCP ring. Take the optimized path for FCP IO. */ ha_copy &= ~(phba->work_ha_mask); status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); status >>= (4*LPFC_FCP_RING); pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; if (status & HA_RXMASK) lpfc_sli_handle_fast_ring_event(phba, pring, status); if (phba->cfg_multi_ring_support == 2) { /* * Process all events on extra ring. Take the optimized path * for extra ring IO. */ status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); status >>= (4*LPFC_EXTRA_RING); if (status & HA_RXMASK) { lpfc_sli_handle_fast_ring_event(phba, &phba->sli.sli3_ring[LPFC_EXTRA_RING], status); } } return IRQ_HANDLED; } /* lpfc_sli_fp_intr_handler */ /** * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device * @irq: Interrupt number. * @dev_id: The device context pointer. * * This function is the HBA device-level interrupt handler to device with * SLI-3 interface spec, called from the PCI layer when either MSI or * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which * requires driver attention. This function invokes the slow-path interrupt * attention handling function and fast-path interrupt attention handling * function in turn to process the relevant HBA attention events. This * function is called without any lock held. It gets the hbalock to access * and update SLI data structures. * * This function returns IRQ_HANDLED when interrupt is handled, else it * returns IRQ_NONE. **/ irqreturn_t lpfc_sli_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; irqreturn_t sp_irq_rc, fp_irq_rc; unsigned long status1, status2; uint32_t hc_copy; /* * Get the driver's phba structure from the dev_id and * assume the HBA is not interrupting. */ phba = (struct lpfc_hba *) dev_id; if (unlikely(!phba)) return IRQ_NONE; /* Check device state for handling interrupt */ if (lpfc_intr_state_check(phba)) return IRQ_NONE; spin_lock(&phba->hbalock); if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { spin_unlock(&phba->hbalock); return IRQ_HANDLED; } if (unlikely(!phba->ha_copy)) { spin_unlock(&phba->hbalock); return IRQ_NONE; } else if (phba->ha_copy & HA_ERATT) { if (phba->hba_flag & HBA_ERATT_HANDLED) /* ERATT polling has handled ERATT */ phba->ha_copy &= ~HA_ERATT; else /* Indicate interrupt handler handles ERATT */ phba->hba_flag |= HBA_ERATT_HANDLED; } /* * If there is deferred error attention, do not check for any interrupt. */ if (unlikely(phba->hba_flag & DEFER_ERATT)) { spin_unlock(&phba->hbalock); return IRQ_NONE; } /* Clear attention sources except link and error attentions */ if (lpfc_readl(phba->HCregaddr, &hc_copy)) { spin_unlock(&phba->hbalock); return IRQ_HANDLED; } writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), phba->HCregaddr); writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); writel(hc_copy, phba->HCregaddr); readl(phba->HAregaddr); /* flush */ spin_unlock(&phba->hbalock); /* * Invokes slow-path host attention interrupt handling as appropriate. */ /* status of events with mailbox and link attention */ status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); /* status of events with ELS ring */ status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); status2 >>= (4*LPFC_ELS_RING); if (status1 || (status2 & HA_RXMASK)) sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); else sp_irq_rc = IRQ_NONE; /* * Invoke fast-path host attention interrupt handling as appropriate. */ /* status of events with FCP ring */ status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); status1 >>= (4*LPFC_FCP_RING); /* status of events with extra ring */ if (phba->cfg_multi_ring_support == 2) { status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); status2 >>= (4*LPFC_EXTRA_RING); } else status2 = 0; if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); else fp_irq_rc = IRQ_NONE; /* Return device-level interrupt handling status */ return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; } /* lpfc_sli_intr_handler */ /** * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event * @phba: pointer to lpfc hba data structure. * * This routine is invoked by the worker thread to process all the pending * SLI4 els abort xri events. **/ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; unsigned long iflags; /* First, declare the els xri abort event has been handled */ spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; spin_unlock_irqrestore(&phba->hbalock, iflags); /* Now, handle all the els xri abort events */ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { /* Get the first event from the head of the event queue */ list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, cq_event, struct lpfc_cq_event, list); spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); /* Notify aborted XRI for ELS work queue */ lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); /* Free the event processed back to the free pool */ lpfc_sli4_cq_event_release(phba, cq_event); spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); } spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); } /** * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe * @phba: Pointer to HBA context object. * @irspiocbq: Pointer to work-queue completion queue entry. * * This routine handles an ELS work-queue completion event and construct * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common * discovery engine to handle. * * Return: Pointer to the receive IOCBQ, NULL otherwise. **/ static struct lpfc_iocbq * lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, struct lpfc_iocbq *irspiocbq) { struct lpfc_sli_ring *pring; struct lpfc_iocbq *cmdiocbq; struct lpfc_wcqe_complete *wcqe; unsigned long iflags; pring = lpfc_phba_elsring(phba); if (unlikely(!pring)) return NULL; wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; spin_lock_irqsave(&pring->ring_lock, iflags); pring->stats.iocb_event++; /* Look up the ELS command IOCB and create pseudo response IOCB */ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, bf_get(lpfc_wcqe_c_request_tag, wcqe)); if (unlikely(!cmdiocbq)) { spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0386 ELS complete with no corresponding " "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", wcqe->word0, wcqe->total_data_placed, wcqe->parameter, wcqe->word3); lpfc_sli_release_iocbq(phba, irspiocbq); return NULL; } memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128)); memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe)); /* Put the iocb back on the txcmplq */ lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); spin_unlock_irqrestore(&pring->ring_lock, iflags); if (bf_get(lpfc_wcqe_c_xb, wcqe)) { spin_lock_irqsave(&phba->hbalock, iflags); irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; spin_unlock_irqrestore(&phba->hbalock, iflags); } return irspiocbq; } inline struct lpfc_cq_event * lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) { struct lpfc_cq_event *cq_event; /* Allocate a new internal CQ_EVENT entry */ cq_event = lpfc_sli4_cq_event_alloc(phba); if (!cq_event) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0602 Failed to alloc CQ_EVENT entry\n"); return NULL; } /* Move the CQE into the event */ memcpy(&cq_event->cqe, entry, size); return cq_event; } /** * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event * @phba: Pointer to HBA context object. * @mcqe: Pointer to mailbox completion queue entry. * * This routine process a mailbox completion queue entry with asynchronous * event. * * Return: true if work posted to worker thread, otherwise false. **/ static bool lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) { struct lpfc_cq_event *cq_event; unsigned long iflags; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0392 Async Event: word0:x%x, word1:x%x, " "word2:x%x, word3:x%x\n", mcqe->word0, mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); if (!cq_event) return false; spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); /* Set the async event flag */ spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag |= ASYNC_EVENT; spin_unlock_irqrestore(&phba->hbalock, iflags); return true; } /** * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event * @phba: Pointer to HBA context object. * @mcqe: Pointer to mailbox completion queue entry. * * This routine process a mailbox completion queue entry with mailbox * completion event. * * Return: true if work posted to worker thread, otherwise false. **/ static bool lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) { uint32_t mcqe_status; MAILBOX_t *mbox, *pmbox; struct lpfc_mqe *mqe; struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; struct lpfc_dmabuf *mp; unsigned long iflags; LPFC_MBOXQ_t *pmb; bool workposted = false; int rc; /* If not a mailbox complete MCQE, out by checking mailbox consume */ if (!bf_get(lpfc_trailer_completed, mcqe)) goto out_no_mqe_complete; /* Get the reference to the active mbox command */ spin_lock_irqsave(&phba->hbalock, iflags); pmb = phba->sli.mbox_active; if (unlikely(!pmb)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1832 No pending MBOX command to handle\n"); spin_unlock_irqrestore(&phba->hbalock, iflags); goto out_no_mqe_complete; } spin_unlock_irqrestore(&phba->hbalock, iflags); mqe = &pmb->u.mqe; pmbox = (MAILBOX_t *)&pmb->u.mqe; mbox = phba->mbox; vport = pmb->vport; /* Reset heartbeat timer */ phba->last_completion_time = jiffies; del_timer(&phba->sli.mbox_tmo); /* Move mbox data to caller's mailbox region, do endian swapping */ if (pmb->mbox_cmpl && mbox) lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); /* * For mcqe errors, conditionally move a modified error code to * the mbox so that the error will not be missed. */ mcqe_status = bf_get(lpfc_mcqe_status, mcqe); if (mcqe_status != MB_CQE_STATUS_SUCCESS) { if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) bf_set(lpfc_mqe_status, mqe, (LPFC_MBX_ERROR_RANGE | mcqe_status)); } if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, "MBOX dflt rpi: status:x%x rpi:x%x", mcqe_status, pmbox->un.varWords[0], 0); if (mcqe_status == MB_CQE_STATUS_SUCCESS) { mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; /* Reg_LOGIN of dflt RPI was successful. Mark the * node as having an UNREG_LOGIN in progress to stop * an unsolicited PLOGI from the same NPortId from * starting another mailbox transaction. */ spin_lock_irqsave(&ndlp->lock, iflags); ndlp->nlp_flag |= NLP_UNREG_INP; spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_unreg_login(phba, vport->vpi, pmbox->un.varWords[0], pmb); pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; pmb->ctx_buf = mp; /* No reference taken here. This is a default * RPI reg/immediate unreg cycle. The reference was * taken in the reg rpi path and is released when * this mailbox completes. */ pmb->ctx_ndlp = ndlp; pmb->vport = vport; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc != MBX_BUSY) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0385 rc should " "have been MBX_BUSY\n"); if (rc != MBX_NOT_FINISHED) goto send_current_mbox; } } spin_lock_irqsave(&phba->pport->work_port_lock, iflags); phba->pport->work_port_events &= ~WORKER_MBOX_TMO; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */ if (pmbox->mbxCommand == MBX_HEARTBEAT) { spin_lock_irqsave(&phba->hbalock, iflags); /* Release the mailbox command posting token */ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; phba->sli.mbox_active = NULL; if (bf_get(lpfc_trailer_consumed, mcqe)) lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); spin_unlock_irqrestore(&phba->hbalock, iflags); /* Post the next mbox command, if there is one */ lpfc_sli4_post_async_mbox(phba); /* Process cmpl now */ if (pmb->mbox_cmpl) pmb->mbox_cmpl(phba, pmb); return false; } /* There is mailbox completion work to queue to the worker thread */ spin_lock_irqsave(&phba->hbalock, iflags); __lpfc_mbox_cmpl_put(phba, pmb); phba->work_ha |= HA_MBATT; spin_unlock_irqrestore(&phba->hbalock, iflags); workposted = true; send_current_mbox: spin_lock_irqsave(&phba->hbalock, iflags); /* Release the mailbox command posting token */ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; /* Setting active mailbox pointer need to be in sync to flag clear */ phba->sli.mbox_active = NULL; if (bf_get(lpfc_trailer_consumed, mcqe)) lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); spin_unlock_irqrestore(&phba->hbalock, iflags); /* Wake up worker thread to post the next pending mailbox command */ lpfc_worker_wake_up(phba); return workposted; out_no_mqe_complete: spin_lock_irqsave(&phba->hbalock, iflags); if (bf_get(lpfc_trailer_consumed, mcqe)) lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); spin_unlock_irqrestore(&phba->hbalock, iflags); return false; } /** * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry * @phba: Pointer to HBA context object. * @cq: Pointer to associated CQ * @cqe: Pointer to mailbox completion queue entry. * * This routine process a mailbox completion queue entry, it invokes the * proper mailbox complete handling or asynchronous event handling routine * according to the MCQE's async bit. * * Return: true if work posted to worker thread, otherwise false. **/ static bool lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) { struct lpfc_mcqe mcqe; bool workposted; cq->CQ_mbox++; /* Copy the mailbox MCQE and convert endian order as needed */ lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); /* Invoke the proper event handling routine */ if (!bf_get(lpfc_trailer_async, &mcqe)) workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); else workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); return workposted; } /** * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event * @phba: Pointer to HBA context object. * @cq: Pointer to associated CQ * @wcqe: Pointer to work-queue completion queue entry. * * This routine handles an ELS work-queue completion event. * * Return: true if work posted to worker thread, otherwise false. **/ static bool lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_wcqe_complete *wcqe) { struct lpfc_iocbq *irspiocbq; unsigned long iflags; struct lpfc_sli_ring *pring = cq->pring; int txq_cnt = 0; int txcmplq_cnt = 0; /* Check for response status */ if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { /* Log the error status */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0357 ELS CQE error: status=x%x: " "CQE: %08x %08x %08x %08x\n", bf_get(lpfc_wcqe_c_status, wcqe), wcqe->word0, wcqe->total_data_placed, wcqe->parameter, wcqe->word3); } /* Get an irspiocbq for later ELS response processing use */ irspiocbq = lpfc_sli_get_iocbq(phba); if (!irspiocbq) { if (!list_empty(&pring->txq)) txq_cnt++; if (!list_empty(&pring->txcmplq)) txcmplq_cnt++; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " "els_txcmplq_cnt=%d\n", txq_cnt, phba->iocb_cnt, txcmplq_cnt); return false; } /* Save off the slow-path queue event for work thread to process */ memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); spin_lock_irqsave(&phba->hbalock, iflags); list_add_tail(&irspiocbq->cq_event.list, &phba->sli4_hba.sp_queue_event); phba->hba_flag |= HBA_SP_QUEUE_EVT; spin_unlock_irqrestore(&phba->hbalock, iflags); return true; } /** * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event * @phba: Pointer to HBA context object. * @wcqe: Pointer to work-queue completion queue entry. * * This routine handles slow-path WQ entry consumed event by invoking the * proper WQ release routine to the slow-path WQ. **/ static void lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_wcqe_release *wcqe) { /* sanity check on queue memory */ if (unlikely(!phba->sli4_hba.els_wq)) return; /* Check for the slow-path ELS work queue */ if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) lpfc_sli4_wq_release(phba->sli4_hba.els_wq, bf_get(lpfc_wcqe_r_wqe_index, wcqe)); else lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2579 Slow-path wqe consume event carries " "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", bf_get(lpfc_wcqe_r_wqe_index, wcqe), phba->sli4_hba.els_wq->queue_id); } /** * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event * @phba: Pointer to HBA context object. * @cq: Pointer to a WQ completion queue. * @wcqe: Pointer to work-queue completion queue entry. * * This routine handles an XRI abort event. * * Return: true if work posted to worker thread, otherwise false. **/ static bool lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct sli4_wcqe_xri_aborted *wcqe) { bool workposted = false; struct lpfc_cq_event *cq_event; unsigned long iflags; switch (cq->subtype) { case LPFC_IO: lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { /* Notify aborted XRI for NVME work queue */ if (phba->nvmet_support) lpfc_sli4_nvmet_xri_aborted(phba, wcqe); } workposted = false; break; case LPFC_NVME_LS: /* NVME LS uses ELS resources */ case LPFC_ELS: cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe)); if (!cq_event) { workposted = false; break; } cq_event->hdwq = cq->hdwq; spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); list_add_tail(&cq_event->list, &phba->sli4_hba.sp_els_xri_aborted_work_queue); /* Set the els xri abort event flag */ phba->hba_flag |= ELS_XRI_ABORT_EVENT; spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); workposted = true; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0603 Invalid CQ subtype %d: " "%08x %08x %08x %08x\n", cq->subtype, wcqe->word0, wcqe->parameter, wcqe->word2, wcqe->word3); workposted = false; break; } return workposted; } #define FC_RCTL_MDS_DIAGS 0xF4 /** * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry * @phba: Pointer to HBA context object. * @rcqe: Pointer to receive-queue completion queue entry. * * This routine process a receive-queue completion queue entry. * * Return: true if work posted to worker thread, otherwise false. **/ static bool lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) { bool workposted = false; struct fc_frame_header *fc_hdr; struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; struct lpfc_queue *drq = phba->sli4_hba.dat_rq; struct lpfc_nvmet_tgtport *tgtp; struct hbq_dmabuf *dma_buf; uint32_t status, rq_id; unsigned long iflags; /* sanity check on queue memory */ if (unlikely(!hrq) || unlikely(!drq)) return workposted; if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); else rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); if (rq_id != hrq->queue_id) goto out; status = bf_get(lpfc_rcqe_status, rcqe); switch (status) { case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2537 Receive Frame Truncated!!\n"); fallthrough; case FC_STATUS_RQ_SUCCESS: spin_lock_irqsave(&phba->hbalock, iflags); lpfc_sli4_rq_release(hrq, drq); dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); if (!dma_buf) { hrq->RQ_no_buf_found++; spin_unlock_irqrestore(&phba->hbalock, iflags); goto out; } hrq->RQ_rcv_buf++; hrq->RQ_buf_posted--; memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { spin_unlock_irqrestore(&phba->hbalock, iflags); /* Handle MDS Loopback frames */ if (!(phba->pport->load_flag & FC_UNLOADING)) lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf); else lpfc_in_buf_free(phba, &dma_buf->dbuf); break; } /* save off the frame for the work thread to process */ list_add_tail(&dma_buf->cq_event.list, &phba->sli4_hba.sp_queue_event); /* Frame received */ phba->hba_flag |= HBA_SP_QUEUE_EVT; spin_unlock_irqrestore(&phba->hbalock, iflags); workposted = true; break; case FC_STATUS_INSUFF_BUF_FRM_DISC: if (phba->nvmet_support) { tgtp = phba->targetport->private; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6402 RQE Error x%x, posted %d err_cnt " "%d: %x %x %x\n", status, hrq->RQ_buf_posted, hrq->RQ_no_posted_buf, atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_out), atomic_read(&tgtp->xmt_fcp_release)); } fallthrough; case FC_STATUS_INSUFF_BUF_NEED_BUF: hrq->RQ_no_posted_buf++; /* Post more buffers if possible */ spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; spin_unlock_irqrestore(&phba->hbalock, iflags); workposted = true; break; case FC_STATUS_RQ_DMA_FAILURE: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2564 RQE DMA Error x%x, x%08x x%08x x%08x " "x%08x\n", status, rcqe->word0, rcqe->word1, rcqe->word2, rcqe->word3); /* If IV set, no further recovery */ if (bf_get(lpfc_rcqe_iv, rcqe)) break; /* recycle consumed resource */ spin_lock_irqsave(&phba->hbalock, iflags); lpfc_sli4_rq_release(hrq, drq); dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); if (!dma_buf) { hrq->RQ_no_buf_found++; spin_unlock_irqrestore(&phba->hbalock, iflags); break; } hrq->RQ_rcv_buf++; hrq->RQ_buf_posted--; spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_in_buf_free(phba, &dma_buf->dbuf); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2565 Unexpected RQE Status x%x, w0-3 x%08x " "x%08x x%08x x%08x\n", status, rcqe->word0, rcqe->word1, rcqe->word2, rcqe->word3); break; } out: return workposted; } /** * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry * @phba: Pointer to HBA context object. * @cq: Pointer to the completion queue. * @cqe: Pointer to a completion queue entry. * * This routine process a slow-path work-queue or receive queue completion queue * entry. * * Return: true if work posted to worker thread, otherwise false. **/ static bool lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) { struct lpfc_cqe cqevt; bool workposted = false; /* Copy the work queue CQE and convert endian order if needed */ lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); /* Check and process for different type of WCQE and dispatch */ switch (bf_get(lpfc_cqe_code, &cqevt)) { case CQE_CODE_COMPL_WQE: /* Process the WQ/RQ complete event */ phba->last_completion_time = jiffies; workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, (struct lpfc_wcqe_complete *)&cqevt); break; case CQE_CODE_RELEASE_WQE: /* Process the WQ release event */ lpfc_sli4_sp_handle_rel_wcqe(phba, (struct lpfc_wcqe_release *)&cqevt); break; case CQE_CODE_XRI_ABORTED: /* Process the WQ XRI abort event */ phba->last_completion_time = jiffies; workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, (struct sli4_wcqe_xri_aborted *)&cqevt); break; case CQE_CODE_RECEIVE: case CQE_CODE_RECEIVE_V1: /* Process the RQ event */ phba->last_completion_time = jiffies; workposted = lpfc_sli4_sp_handle_rcqe(phba, (struct lpfc_rcqe *)&cqevt); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0388 Not a valid WCQE code: x%x\n", bf_get(lpfc_cqe_code, &cqevt)); break; } return workposted; } /** * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry * @phba: Pointer to HBA context object. * @eqe: Pointer to fast-path event queue entry. * @speq: Pointer to slow-path event queue. * * This routine process a event queue entry from the slow-path event queue. * It will check the MajorCode and MinorCode to determine this is for a * completion event on a completion queue, if not, an error shall be logged * and just return. Otherwise, it will get to the corresponding completion * queue and process all the entries on that completion queue, rearm the * completion queue, and then return. * **/ static void lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, struct lpfc_queue *speq) { struct lpfc_queue *cq = NULL, *childq; uint16_t cqid; int ret = 0; /* Get the reference to the corresponding CQ */ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); list_for_each_entry(childq, &speq->child_list, list) { if (childq->queue_id == cqid) { cq = childq; break; } } if (unlikely(!cq)) { if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0365 Slow-path CQ identifier " "(%d) does not exist\n", cqid); return; } /* Save EQ associated with this CQ */ cq->assoc_qp = speq; if (is_kdump_kernel()) ret = queue_work(phba->wq, &cq->spwork); else ret = queue_work_on(cq->chann, phba->wq, &cq->spwork); if (!ret) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0390 Cannot schedule queue work " "for CQ eqcqid=%d, cqid=%d on CPU %d\n", cqid, cq->queue_id, raw_smp_processor_id()); } /** * __lpfc_sli4_process_cq - Process elements of a CQ * @phba: Pointer to HBA context object. * @cq: Pointer to CQ to be processed * @handler: Routine to process each cqe * @delay: Pointer to usdelay to set in case of rescheduling of the handler * * This routine processes completion queue entries in a CQ. While a valid * queue element is found, the handler is called. During processing checks * are made for periodic doorbell writes to let the hardware know of * element consumption. * * If the max limit on cqes to process is hit, or there are no more valid * entries, the loop stops. If we processed a sufficient number of elements, * meaning there is sufficient load, rather than rearming and generating * another interrupt, a cq rescheduling delay will be set. A delay of 0 * indicates no rescheduling. * * Returns True if work scheduled, False otherwise. **/ static bool __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_cqe *), unsigned long *delay) { struct lpfc_cqe *cqe; bool workposted = false; int count = 0, consumed = 0; bool arm = true; /* default - no reschedule */ *delay = 0; if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) goto rearm_and_exit; /* Process all the entries to the CQ */ cq->q_flag = 0; cqe = lpfc_sli4_cq_get(cq); while (cqe) { workposted |= handler(phba, cq, cqe); __lpfc_sli4_consume_cqe(phba, cq, cqe); consumed++; if (!(++count % cq->max_proc_limit)) break; if (!(count % cq->notify_interval)) { phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, LPFC_QUEUE_NOARM); consumed = 0; cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK; } if (count == LPFC_NVMET_CQ_NOTIFY) cq->q_flag |= HBA_NVMET_CQ_NOTIFY; cqe = lpfc_sli4_cq_get(cq); } if (count >= phba->cfg_cq_poll_threshold) { *delay = 1; arm = false; } /* Track the max number of CQEs processed in 1 EQ */ if (count > cq->CQ_max_cqe) cq->CQ_max_cqe = count; cq->assoc_qp->EQ_cqe_cnt += count; /* Catch the no cq entry condition */ if (unlikely(count == 0)) lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0369 No entry from completion queue " "qid=%d\n", cq->queue_id); xchg(&cq->queue_claimed, 0); rearm_and_exit: phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); return workposted; } /** * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry * @cq: pointer to CQ to process * * This routine calls the cq processing routine with a handler specific * to the type of queue bound to it. * * The CQ routine returns two values: the first is the calling status, * which indicates whether work was queued to the background discovery * thread. If true, the routine should wakeup the discovery thread; * the second is the delay parameter. If non-zero, rather than rearming * the CQ and yet another interrupt, the CQ handler should be queued so * that it is processed in a subsequent polling action. The value of * the delay indicates when to reschedule it. **/ static void __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) { struct lpfc_hba *phba = cq->phba; unsigned long delay; bool workposted = false; int ret = 0; /* Process and rearm the CQ */ switch (cq->type) { case LPFC_MCQ: workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_sp_handle_mcqe, &delay); break; case LPFC_WCQ: if (cq->subtype == LPFC_IO) workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, &delay); else workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_sp_handle_cqe, &delay); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0370 Invalid completion queue type (%d)\n", cq->type); return; } if (delay) { if (is_kdump_kernel()) ret = queue_delayed_work(phba->wq, &cq->sched_spwork, delay); else ret = queue_delayed_work_on(cq->chann, phba->wq, &cq->sched_spwork, delay); if (!ret) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0394 Cannot schedule queue work " "for cqid=%d on CPU %d\n", cq->queue_id, cq->chann); } /* wake up worker thread if there are works to be done */ if (workposted) lpfc_worker_wake_up(phba); } /** * lpfc_sli4_sp_process_cq - slow-path work handler when started by * interrupt * @work: pointer to work element * * translates from the work handler and calls the slow-path handler. **/ static void lpfc_sli4_sp_process_cq(struct work_struct *work) { struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); __lpfc_sli4_sp_process_cq(cq); } /** * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer * @work: pointer to work element * * translates from the work handler and calls the slow-path handler. **/ static void lpfc_sli4_dly_sp_process_cq(struct work_struct *work) { struct lpfc_queue *cq = container_of(to_delayed_work(work), struct lpfc_queue, sched_spwork); __lpfc_sli4_sp_process_cq(cq); } /** * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry * @phba: Pointer to HBA context object. * @cq: Pointer to associated CQ * @wcqe: Pointer to work-queue completion queue entry. * * This routine process a fast-path work queue completion entry from fast-path * event queue for FCP command response completion. **/ static void lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_wcqe_complete *wcqe) { struct lpfc_sli_ring *pring = cq->pring; struct lpfc_iocbq *cmdiocbq; unsigned long iflags; /* Check for response status */ if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { /* If resource errors reported from HBA, reduce queue * depth of the SCSI device. */ if (((bf_get(lpfc_wcqe_c_status, wcqe) == IOSTAT_LOCAL_REJECT)) && ((wcqe->parameter & IOERR_PARAM_MASK) == IOERR_NO_RESOURCES)) phba->lpfc_rampdown_queue_depth(phba); /* Log the cmpl status */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0373 FCP CQE cmpl: status=x%x: " "CQE: %08x %08x %08x %08x\n", bf_get(lpfc_wcqe_c_status, wcqe), wcqe->word0, wcqe->total_data_placed, wcqe->parameter, wcqe->word3); } /* Look up the FCP command IOCB and create pseudo response IOCB */ spin_lock_irqsave(&pring->ring_lock, iflags); pring->stats.iocb_event++; cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, bf_get(lpfc_wcqe_c_request_tag, wcqe)); spin_unlock_irqrestore(&pring->ring_lock, iflags); if (unlikely(!cmdiocbq)) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0374 FCP complete with no corresponding " "cmdiocb: iotag (%d)\n", bf_get(lpfc_wcqe_c_request_tag, wcqe)); return; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS cmdiocbq->isr_timestamp = cq->isr_timestamp; #endif if (bf_get(lpfc_wcqe_c_xb, wcqe)) { spin_lock_irqsave(&phba->hbalock, iflags); cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; spin_unlock_irqrestore(&phba->hbalock, iflags); } if (cmdiocbq->cmd_cmpl) { /* For FCP the flag is cleared in cmd_cmpl */ if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) && cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) { spin_lock_irqsave(&phba->hbalock, iflags); cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; spin_unlock_irqrestore(&phba->hbalock, iflags); } /* Pass the cmd_iocb and the wcqe to the upper layer */ memcpy(&cmdiocbq->wcqe_cmpl, wcqe, sizeof(struct lpfc_wcqe_complete)); cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq); } else { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0375 FCP cmdiocb not callback function " "iotag: (%d)\n", bf_get(lpfc_wcqe_c_request_tag, wcqe)); } } /** * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event * @phba: Pointer to HBA context object. * @cq: Pointer to completion queue. * @wcqe: Pointer to work-queue completion queue entry. * * This routine handles an fast-path WQ entry consumed event by invoking the * proper WQ release routine to the slow-path WQ. **/ static void lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_wcqe_release *wcqe) { struct lpfc_queue *childwq; bool wqid_matched = false; uint16_t hba_wqid; /* Check for fast-path FCP work queue release */ hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); list_for_each_entry(childwq, &cq->child_list, list) { if (childwq->queue_id == hba_wqid) { lpfc_sli4_wq_release(childwq, bf_get(lpfc_wcqe_r_wqe_index, wcqe)); if (childwq->q_flag & HBA_NVMET_WQFULL) lpfc_nvmet_wqfull_process(phba, childwq); wqid_matched = true; break; } } /* Report warning log message if no match found */ if (wqid_matched != true) lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2580 Fast-path wqe consume event carries " "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); } /** * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry * @phba: Pointer to HBA context object. * @cq: Pointer to completion queue. * @rcqe: Pointer to receive-queue completion queue entry. * * This routine process a receive-queue completion queue entry. * * Return: true if work posted to worker thread, otherwise false. **/ static bool lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_rcqe *rcqe) { bool workposted = false; struct lpfc_queue *hrq; struct lpfc_queue *drq; struct rqb_dmabuf *dma_buf; struct fc_frame_header *fc_hdr; struct lpfc_nvmet_tgtport *tgtp; uint32_t status, rq_id; unsigned long iflags; uint32_t fctl, idx; if ((phba->nvmet_support == 0) || (phba->sli4_hba.nvmet_cqset == NULL)) return workposted; idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; drq = phba->sli4_hba.nvmet_mrq_data[idx]; /* sanity check on queue memory */ if (unlikely(!hrq) || unlikely(!drq)) return workposted; if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); else rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); if ((phba->nvmet_support == 0) || (rq_id != hrq->queue_id)) return workposted; status = bf_get(lpfc_rcqe_status, rcqe); switch (status) { case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6126 Receive Frame Truncated!!\n"); fallthrough; case FC_STATUS_RQ_SUCCESS: spin_lock_irqsave(&phba->hbalock, iflags); lpfc_sli4_rq_release(hrq, drq); dma_buf = lpfc_sli_rqbuf_get(phba, hrq); if (!dma_buf) { hrq->RQ_no_buf_found++; spin_unlock_irqrestore(&phba->hbalock, iflags); goto out; } spin_unlock_irqrestore(&phba->hbalock, iflags); hrq->RQ_rcv_buf++; hrq->RQ_buf_posted--; fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; /* Just some basic sanity checks on FCP Command frame */ fctl = (fc_hdr->fh_f_ctl[0] << 16 | fc_hdr->fh_f_ctl[1] << 8 | fc_hdr->fh_f_ctl[2]); if (((fctl & (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ goto drop; if (fc_hdr->fh_type == FC_TYPE_FCP) { dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); lpfc_nvmet_unsol_fcp_event( phba, idx, dma_buf, cq->isr_timestamp, cq->q_flag & HBA_NVMET_CQ_NOTIFY); return false; } drop: lpfc_rq_buf_free(phba, &dma_buf->hbuf); break; case FC_STATUS_INSUFF_BUF_FRM_DISC: if (phba->nvmet_support) { tgtp = phba->targetport->private; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6401 RQE Error x%x, posted %d err_cnt " "%d: %x %x %x\n", status, hrq->RQ_buf_posted, hrq->RQ_no_posted_buf, atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_out), atomic_read(&tgtp->xmt_fcp_release)); } fallthrough; case FC_STATUS_INSUFF_BUF_NEED_BUF: hrq->RQ_no_posted_buf++; /* Post more buffers if possible */ break; case FC_STATUS_RQ_DMA_FAILURE: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2575 RQE DMA Error x%x, x%08x x%08x x%08x " "x%08x\n", status, rcqe->word0, rcqe->word1, rcqe->word2, rcqe->word3); /* If IV set, no further recovery */ if (bf_get(lpfc_rcqe_iv, rcqe)) break; /* recycle consumed resource */ spin_lock_irqsave(&phba->hbalock, iflags); lpfc_sli4_rq_release(hrq, drq); dma_buf = lpfc_sli_rqbuf_get(phba, hrq); if (!dma_buf) { hrq->RQ_no_buf_found++; spin_unlock_irqrestore(&phba->hbalock, iflags); break; } hrq->RQ_rcv_buf++; hrq->RQ_buf_posted--; spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_rq_buf_free(phba, &dma_buf->hbuf); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2576 Unexpected RQE Status x%x, w0-3 x%08x " "x%08x x%08x x%08x\n", status, rcqe->word0, rcqe->word1, rcqe->word2, rcqe->word3); break; } out: return workposted; } /** * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry * @phba: adapter with cq * @cq: Pointer to the completion queue. * @cqe: Pointer to fast-path completion queue entry. * * This routine process a fast-path work queue completion entry from fast-path * event queue for FCP command response completion. * * Return: true if work posted to worker thread, otherwise false. **/ static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) { struct lpfc_wcqe_release wcqe; bool workposted = false; /* Copy the work queue CQE and convert endian order if needed */ lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); /* Check and process for different type of WCQE and dispatch */ switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { case CQE_CODE_COMPL_WQE: case CQE_CODE_NVME_ERSP: cq->CQ_wq++; /* Process the WQ complete event */ phba->last_completion_time = jiffies; if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS) lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, (struct lpfc_wcqe_complete *)&wcqe); break; case CQE_CODE_RELEASE_WQE: cq->CQ_release_wqe++; /* Process the WQ release event */ lpfc_sli4_fp_handle_rel_wcqe(phba, cq, (struct lpfc_wcqe_release *)&wcqe); break; case CQE_CODE_XRI_ABORTED: cq->CQ_xri_aborted++; /* Process the WQ XRI abort event */ phba->last_completion_time = jiffies; workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, (struct sli4_wcqe_xri_aborted *)&wcqe); break; case CQE_CODE_RECEIVE_V1: case CQE_CODE_RECEIVE: phba->last_completion_time = jiffies; if (cq->subtype == LPFC_NVMET) { workposted = lpfc_sli4_nvmet_handle_rcqe( phba, cq, (struct lpfc_rcqe *)&wcqe); } break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0144 Not a valid CQE code: x%x\n", bf_get(lpfc_wcqe_c_code, &wcqe)); break; } return workposted; } /** * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry * @cq: Pointer to CQ to be processed * * This routine calls the cq processing routine with the handler for * fast path CQEs. * * The CQ routine returns two values: the first is the calling status, * which indicates whether work was queued to the background discovery * thread. If true, the routine should wakeup the discovery thread; * the second is the delay parameter. If non-zero, rather than rearming * the CQ and yet another interrupt, the CQ handler should be queued so * that it is processed in a subsequent polling action. The value of * the delay indicates when to reschedule it. **/ static void __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) { struct lpfc_hba *phba = cq->phba; unsigned long delay; bool workposted = false; int ret; /* process and rearm the CQ */ workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, &delay); if (delay) { if (is_kdump_kernel()) ret = queue_delayed_work(phba->wq, &cq->sched_irqwork, delay); else ret = queue_delayed_work_on(cq->chann, phba->wq, &cq->sched_irqwork, delay); if (!ret) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0367 Cannot schedule queue work " "for cqid=%d on CPU %d\n", cq->queue_id, cq->chann); } /* wake up worker thread if there are works to be done */ if (workposted) lpfc_worker_wake_up(phba); } /** * lpfc_sli4_hba_process_cq - fast-path work handler when started by * interrupt * @work: pointer to work element * * translates from the work handler and calls the fast-path handler. **/ static void lpfc_sli4_hba_process_cq(struct work_struct *work) { struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); __lpfc_sli4_hba_process_cq(cq); } /** * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry * @phba: Pointer to HBA context object. * @eq: Pointer to the queue structure. * @eqe: Pointer to fast-path event queue entry. * @poll_mode: poll_mode to execute processing the cq. * * This routine process a event queue entry from the fast-path event queue. * It will check the MajorCode and MinorCode to determine this is for a * completion event on a completion queue, if not, an error shall be logged * and just return. Otherwise, it will get to the corresponding completion * queue and process all the entries on the completion queue, rearm the * completion queue, and then return. **/ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode) { struct lpfc_queue *cq = NULL; uint32_t qidx = eq->hdwq; uint16_t cqid, id; int ret; if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0366 Not a valid completion " "event: majorcode=x%x, minorcode=x%x\n", bf_get_le32(lpfc_eqe_major_code, eqe), bf_get_le32(lpfc_eqe_minor_code, eqe)); return; } /* Get the reference to the corresponding CQ */ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); /* Use the fast lookup method first */ if (cqid <= phba->sli4_hba.cq_max) { cq = phba->sli4_hba.cq_lookup[cqid]; if (cq) goto work_cq; } /* Next check for NVMET completion */ if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { id = phba->sli4_hba.nvmet_cqset[0]->queue_id; if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { /* Process NVMET unsol rcv */ cq = phba->sli4_hba.nvmet_cqset[cqid - id]; goto process_cq; } } if (phba->sli4_hba.nvmels_cq && (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { /* Process NVME unsol rcv */ cq = phba->sli4_hba.nvmels_cq; } /* Otherwise this is a Slow path event */ if (cq == NULL) { lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hdwq[qidx].hba_eq); return; } process_cq: if (unlikely(cqid != cq->queue_id)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0368 Miss-matched fast-path completion " "queue identifier: eqcqid=%d, fcpcqid=%d\n", cqid, cq->queue_id); return; } work_cq: #if defined(CONFIG_SCSI_LPFC_DEBUG_FS) if (phba->ktime_on) cq->isr_timestamp = ktime_get_ns(); else cq->isr_timestamp = 0; #endif switch (poll_mode) { case LPFC_THREADED_IRQ: __lpfc_sli4_hba_process_cq(cq); break; case LPFC_QUEUE_WORK: default: if (is_kdump_kernel()) ret = queue_work(phba->wq, &cq->irqwork); else ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork); if (!ret) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0383 Cannot schedule queue work " "for CQ eqcqid=%d, cqid=%d on CPU %d\n", cqid, cq->queue_id, raw_smp_processor_id()); break; } } /** * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer * @work: pointer to work element * * translates from the work handler and calls the fast-path handler. **/ static void lpfc_sli4_dly_hba_process_cq(struct work_struct *work) { struct lpfc_queue *cq = container_of(to_delayed_work(work), struct lpfc_queue, sched_irqwork); __lpfc_sli4_hba_process_cq(cq); } /** * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device * @irq: Interrupt number. * @dev_id: The device context pointer. * * This function is directly called from the PCI layer as an interrupt * service routine when device with SLI-4 interface spec is enabled with * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB * ring event in the HBA. However, when the device is enabled with either * MSI or Pin-IRQ interrupt mode, this function is called as part of the * device-level interrupt handler. When the PCI slot is in error recovery * or the HBA is undergoing initialization, the interrupt handler will not * process the interrupt. The SCSI FCP fast-path ring event are handled in * the intrrupt context. This function is called without any lock held. * It gets the hbalock to access and update SLI data structures. Note that, * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is * equal to that of FCP CQ index. * * The link attention and ELS ring attention events are handled * by the worker thread. The interrupt handler signals the worker thread * and returns for these events. This function is called without any lock * held. It gets the hbalock to access and update SLI data structures. * * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD * when interrupt is scheduled to be handled from a threaded irq context, or * else returns IRQ_NONE. **/ irqreturn_t lpfc_sli4_hba_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_queue *fpeq; unsigned long iflag; int hba_eqidx; int ecount = 0; struct lpfc_eq_intr_info *eqi; /* Get the driver's phba structure from the dev_id */ hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; phba = hba_eq_hdl->phba; hba_eqidx = hba_eq_hdl->idx; if (unlikely(!phba)) return IRQ_NONE; if (unlikely(!phba->sli4_hba.hdwq)) return IRQ_NONE; /* Get to the EQ struct associated with this vector */ fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; if (unlikely(!fpeq)) return IRQ_NONE; /* Check device state for handling interrupt */ if (unlikely(lpfc_intr_state_check(phba))) { /* Check again for link_state with lock held */ spin_lock_irqsave(&phba->hbalock, iflag); if (phba->link_state < LPFC_LINK_DOWN) /* Flush, clear interrupt, and rearm the EQ */ lpfc_sli4_eqcq_flush(phba, fpeq); spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_NONE; } switch (fpeq->poll_mode) { case LPFC_THREADED_IRQ: /* CGN mgmt is mutually exclusive from irq processing */ if (phba->cmf_active_mode == LPFC_CFG_OFF) return IRQ_WAKE_THREAD; fallthrough; case LPFC_QUEUE_WORK: default: eqi = this_cpu_ptr(phba->sli4_hba.eq_info); eqi->icnt++; fpeq->last_cpu = raw_smp_processor_id(); if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && fpeq->q_flag & HBA_EQ_DELAY_CHK && phba->cfg_auto_imax && fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && phba->sli.sli_flag & LPFC_SLI_USE_EQDR) lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); /* process and rearm the EQ */ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, LPFC_QUEUE_WORK); if (unlikely(ecount == 0)) { fpeq->EQ_no_entry++; if (phba->intr_type == MSIX) /* MSI-X treated interrupt served as no EQ share INT */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0358 MSI-X interrupt with no EQE\n"); else /* Non MSI-X treated on interrupt as EQ share INT */ return IRQ_NONE; } } return IRQ_HANDLED; } /* lpfc_sli4_hba_intr_handler */ /** * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device * @irq: Interrupt number. * @dev_id: The device context pointer. * * This function is the device-level interrupt handler to device with SLI-4 * interface spec, called from the PCI layer when either MSI or Pin-IRQ * interrupt mode is enabled and there is an event in the HBA which requires * driver attention. This function invokes the slow-path interrupt attention * handling function and fast-path interrupt attention handling function in * turn to process the relevant HBA attention events. This function is called * without any lock held. It gets the hbalock to access and update SLI data * structures. * * This function returns IRQ_HANDLED when interrupt is handled, else it * returns IRQ_NONE. **/ irqreturn_t lpfc_sli4_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; irqreturn_t hba_irq_rc; bool hba_handled = false; int qidx; /* Get the driver's phba structure from the dev_id */ phba = (struct lpfc_hba *)dev_id; if (unlikely(!phba)) return IRQ_NONE; /* * Invoke fast-path host attention interrupt handling as appropriate. */ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, &phba->sli4_hba.hba_eq_hdl[qidx]); if (hba_irq_rc == IRQ_HANDLED) hba_handled |= true; } return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; } /* lpfc_sli4_intr_handler */ void lpfc_sli4_poll_hbtimer(struct timer_list *t) { struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer); struct lpfc_queue *eq; rcu_read_lock(); list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list) lpfc_sli4_poll_eq(eq); if (!list_empty(&phba->poll_list)) mod_timer(&phba->cpuhp_poll_timer, jiffies + msecs_to_jiffies(LPFC_POLL_HB)); rcu_read_unlock(); } static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq) { struct lpfc_hba *phba = eq->phba; /* kickstart slowpath processing if needed */ if (list_empty(&phba->poll_list)) mod_timer(&phba->cpuhp_poll_timer, jiffies + msecs_to_jiffies(LPFC_POLL_HB)); list_add_rcu(&eq->_poll_list, &phba->poll_list); synchronize_rcu(); } static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq) { struct lpfc_hba *phba = eq->phba; /* Disable slowpath processing for this eq. Kick start the eq * by RE-ARMING the eq's ASAP */ list_del_rcu(&eq->_poll_list); synchronize_rcu(); if (list_empty(&phba->poll_list)) del_timer_sync(&phba->cpuhp_poll_timer); } void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba) { struct lpfc_queue *eq, *next; list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) list_del(&eq->_poll_list); INIT_LIST_HEAD(&phba->poll_list); synchronize_rcu(); } static inline void __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode) { if (mode == eq->mode) return; /* * currently this function is only called during a hotplug * event and the cpu on which this function is executing * is going offline. By now the hotplug has instructed * the scheduler to remove this cpu from cpu active mask. * So we don't need to work about being put aside by the * scheduler for a high priority process. Yes, the inte- * rrupts could come but they are known to retire ASAP. */ /* Disable polling in the fastpath */ WRITE_ONCE(eq->mode, mode); /* flush out the store buffer */ smp_wmb(); /* * Add this eq to the polling list and start polling. For * a grace period both interrupt handler and poller will * try to process the eq _but_ that's fine. We have a * synchronization mechanism in place (queue_claimed) to * deal with it. This is just a draining phase for int- * errupt handler (not eq's) as we have guranteed through * barrier that all the CPUs have seen the new CQ_POLLED * state. which will effectively disable the REARMING of * the EQ. The whole idea is eq's die off eventually as * we are not rearming EQ's anymore. */ mode ? lpfc_sli4_add_to_poll_list(eq) : lpfc_sli4_remove_from_poll_list(eq); } void lpfc_sli4_start_polling(struct lpfc_queue *eq) { __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL); } void lpfc_sli4_stop_polling(struct lpfc_queue *eq) { struct lpfc_hba *phba = eq->phba; __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT); /* Kick start for the pending io's in h/w. * Once we switch back to interrupt processing on a eq * the io path completion will only arm eq's when it * receives a completion. But since eq's are in disa- * rmed state it doesn't receive a completion. This * creates a deadlock scenaro. */ phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM); } /** * lpfc_sli4_queue_free - free a queue structure and associated memory * @queue: The queue structure to free. * * This function frees a queue structure and the DMAable memory used for * the host resident queue. This function must be called after destroying the * queue on the HBA. **/ void lpfc_sli4_queue_free(struct lpfc_queue *queue) { struct lpfc_dmabuf *dmabuf; if (!queue) return; if (!list_empty(&queue->wq_list)) list_del(&queue->wq_list); while (!list_empty(&queue->page_list)) { list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, list); dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, dmabuf->virt, dmabuf->phys); kfree(dmabuf); } if (queue->rqbp) { lpfc_free_rq_buffer(queue->phba, queue); kfree(queue->rqbp); } if (!list_empty(&queue->cpu_list)) list_del(&queue->cpu_list); kfree(queue); return; } /** * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure * @phba: The HBA that this queue is being created on. * @page_size: The size of a queue page * @entry_size: The size of each queue entry for this queue. * @entry_count: The number of entries that this queue will handle. * @cpu: The cpu that will primarily utilize this queue. * * This function allocates a queue structure and the DMAable memory used for * the host resident queue. This function must be called before creating the * queue on the HBA. **/ struct lpfc_queue * lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, uint32_t entry_size, uint32_t entry_count, int cpu) { struct lpfc_queue *queue; struct lpfc_dmabuf *dmabuf; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; uint16_t x, pgcnt; if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = page_size; pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size; /* If needed, Adjust page count to match the max the adapter supports */ if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt) pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt; queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt), GFP_KERNEL, cpu_to_node(cpu)); if (!queue) return NULL; INIT_LIST_HEAD(&queue->list); INIT_LIST_HEAD(&queue->_poll_list); INIT_LIST_HEAD(&queue->wq_list); INIT_LIST_HEAD(&queue->wqfull_list); INIT_LIST_HEAD(&queue->page_list); INIT_LIST_HEAD(&queue->child_list); INIT_LIST_HEAD(&queue->cpu_list); /* Set queue parameters now. If the system cannot provide memory * resources, the free routine needs to know what was allocated. */ queue->page_count = pgcnt; queue->q_pgs = (void **)&queue[1]; queue->entry_cnt_per_pg = hw_page_size / entry_size; queue->entry_size = entry_size; queue->entry_count = entry_count; queue->page_size = hw_page_size; queue->phba = phba; for (x = 0; x < queue->page_count; x++) { dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL, dev_to_node(&phba->pcidev->dev)); if (!dmabuf) goto out_fail; dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, hw_page_size, &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); goto out_fail; } dmabuf->buffer_tag = x; list_add_tail(&dmabuf->list, &queue->page_list); /* use lpfc_sli4_qe to index a paritcular entry in this page */ queue->q_pgs[x] = dmabuf->virt; } INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); /* notify_interval will be set during q creation */ return queue; out_fail: lpfc_sli4_queue_free(queue); return NULL; } /** * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory * @phba: HBA structure that indicates port to create a queue on. * @pci_barset: PCI BAR set flag. * * This function shall perform iomap of the specified PCI BAR address to host * memory address if not already done so and return it. The returned host * memory address can be NULL. */ static void __iomem * lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) { if (!phba->pcidev) return NULL; switch (pci_barset) { case WQ_PCI_BAR_0_AND_1: return phba->pci_bar0_memmap_p; case WQ_PCI_BAR_2_AND_3: return phba->pci_bar2_memmap_p; case WQ_PCI_BAR_4_AND_5: return phba->pci_bar4_memmap_p; default: break; } return NULL; } /** * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs * @phba: HBA structure that EQs are on. * @startq: The starting EQ index to modify * @numq: The number of EQs (consecutive indexes) to modify * @usdelay: amount of delay * * This function revises the EQ delay on 1 or more EQs. The EQ delay * is set either by writing to a register (if supported by the SLI Port) * or by mailbox command. The mailbox command allows several EQs to be * updated at once. * * The @phba struct is used to send a mailbox command to HBA. The @startq * is used to get the starting EQ index to change. The @numq value is * used to specify how many consecutive EQ indexes, starting at EQ index, * are to be changed. This function is asynchronous and will wait for any * mailbox commands to finish before returning. * * On success this function will return a zero. If unable to allocate * enough memory this function will return -ENOMEM. If a mailbox command * fails this function will return -ENXIO. Note: on ENXIO, some EQs may * have had their delay multipler changed. **/ void lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, uint32_t numq, uint32_t usdelay) { struct lpfc_mbx_modify_eq_delay *eq_delay; LPFC_MBOXQ_t *mbox; struct lpfc_queue *eq; int cnt = 0, rc, length; uint32_t shdr_status, shdr_add_status; uint32_t dmult; int qidx; union lpfc_sli4_cfg_shdr *shdr; if (startq >= phba->cfg_irq_chann) return; if (usdelay > 0xFFFF) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, "6429 usdelay %d too large. Scaled down to " "0xFFFF.\n", usdelay); usdelay = 0xFFFF; } /* set values by EQ_DELAY register if supported */ if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; if (!eq) continue; lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay); if (++cnt >= numq) break; } return; } /* Otherwise, set values by mailbox cmd */ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6428 Failed allocating mailbox cmd buffer." " EQ delay was not set.\n"); return; } length = (sizeof(struct lpfc_mbx_modify_eq_delay) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, length, LPFC_SLI4_MBX_EMBED); eq_delay = &mbox->u.mqe.un.eq_delay; /* Calculate delay multiper from maximum interrupt per second */ dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; if (dmult) dmult--; if (dmult > LPFC_DMULT_MAX) dmult = LPFC_DMULT_MAX; for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; if (!eq) continue; eq->q_mode = usdelay; eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; eq_delay->u.request.eq[cnt].phase = 0; eq_delay->u.request.eq[cnt].delay_multi = dmult; if (++cnt >= numq) break; } eq_delay->u.request.num_eq = cnt; mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->ctx_ndlp = NULL; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2512 MODIFY_EQ_DELAY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); } mempool_free(mbox, phba->mbox_mem_pool); return; } /** * lpfc_eq_create - Create an Event Queue on the HBA * @phba: HBA structure that indicates port to create a queue on. * @eq: The queue structure to use to create the event queue. * @imax: The maximum interrupt per second limit. * * This function creates an event queue, as detailed in @eq, on a port, * described by @phba by sending an EQ_CREATE mailbox command to the HBA. * * The @phba struct is used to send mailbox command to HBA. The @eq struct * is used to get the entry count and entry size that are necessary to * determine the number of pages to allocate and use for this queue. This * function will send the EQ_CREATE mailbox command to the HBA to setup the * event queue. This function is asynchronous and will wait for the mailbox * command to finish before continuing. * * On success this function will return a zero. If unable to allocate enough * memory this function will return -ENOMEM. If the queue create mailbox command * fails this function will return -ENXIO. **/ int lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) { struct lpfc_mbx_eq_create *eq_create; LPFC_MBOXQ_t *mbox; int rc, length, status = 0; struct lpfc_dmabuf *dmabuf; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; uint16_t dmult; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; /* sanity check on queue memory */ if (!eq) return -ENODEV; if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = SLI4_PAGE_SIZE; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_eq_create) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_EQ_CREATE, length, LPFC_SLI4_MBX_EMBED); eq_create = &mbox->u.mqe.un.eq_create; shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, eq->page_count); bf_set(lpfc_eq_context_size, &eq_create->u.request.context, LPFC_EQE_SIZE); bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); /* Use version 2 of CREATE_EQ if eqav is set */ if (phba->sli4_hba.pc_sli4_params.eqav) { bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, phba->sli4_hba.pc_sli4_params.eqav); } /* don't setup delay multiplier using EQ_CREATE */ dmult = 0; bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, dmult); switch (eq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0360 Unsupported EQ count. (%d)\n", eq->entry_count); if (eq->entry_count < 256) { status = -EINVAL; goto out; } fallthrough; /* otherwise default to smallest count */ case 256: bf_set(lpfc_eq_context_count, &eq_create->u.request.context, LPFC_EQ_CNT_256); break; case 512: bf_set(lpfc_eq_context_count, &eq_create->u.request.context, LPFC_EQ_CNT_512); break; case 1024: bf_set(lpfc_eq_context_count, &eq_create->u.request.context, LPFC_EQ_CNT_1024); break; case 2048: bf_set(lpfc_eq_context_count, &eq_create->u.request.context, LPFC_EQ_CNT_2048); break; case 4096: bf_set(lpfc_eq_context_count, &eq_create->u.request.context, LPFC_EQ_CNT_4096); break; } list_for_each_entry(dmabuf, &eq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->ctx_buf = NULL; mbox->ctx_ndlp = NULL; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2500 EQ_CREATE mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); status = -ENXIO; } eq->type = LPFC_EQ; eq->subtype = LPFC_NONE; eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); if (eq->queue_id == 0xFFFF) status = -ENXIO; eq->host_index = 0; eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; out: mempool_free(mbox, phba->mbox_mem_pool); return status; } /** * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler * @irq: Interrupt number. * @dev_id: The device context pointer. * * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within * threaded irq context. * * Returns * IRQ_HANDLED - interrupt is handled * IRQ_NONE - otherwise **/ irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id) { struct lpfc_hba *phba; struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_queue *fpeq; int ecount = 0; int hba_eqidx; struct lpfc_eq_intr_info *eqi; /* Get the driver's phba structure from the dev_id */ hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; phba = hba_eq_hdl->phba; hba_eqidx = hba_eq_hdl->idx; if (unlikely(!phba)) return IRQ_NONE; if (unlikely(!phba->sli4_hba.hdwq)) return IRQ_NONE; /* Get to the EQ struct associated with this vector */ fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; if (unlikely(!fpeq)) return IRQ_NONE; eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id()); eqi->icnt++; fpeq->last_cpu = raw_smp_processor_id(); if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && fpeq->q_flag & HBA_EQ_DELAY_CHK && phba->cfg_auto_imax && fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && phba->sli.sli_flag & LPFC_SLI_USE_EQDR) lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); /* process and rearm the EQ */ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, LPFC_THREADED_IRQ); if (unlikely(ecount == 0)) { fpeq->EQ_no_entry++; if (phba->intr_type == MSIX) /* MSI-X treated interrupt served as no EQ share INT */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3358 MSI-X interrupt with no EQE\n"); else /* Non MSI-X treated on interrupt as EQ share INT */ return IRQ_NONE; } return IRQ_HANDLED; } /** * lpfc_cq_create - Create a Completion Queue on the HBA * @phba: HBA structure that indicates port to create a queue on. * @cq: The queue structure to use to create the completion queue. * @eq: The event queue to bind this completion queue to. * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc). * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). * * This function creates a completion queue, as detailed in @wq, on a port, * described by @phba by sending a CQ_CREATE mailbox command to the HBA. * * The @phba struct is used to send mailbox command to HBA. The @cq struct * is used to get the entry count and entry size that are necessary to * determine the number of pages to allocate and use for this queue. The @eq * is used to indicate which event queue to bind this completion queue to. This * function will send the CQ_CREATE mailbox command to the HBA to setup the * completion queue. This function is asynchronous and will wait for the mailbox * command to finish before continuing. * * On success this function will return a zero. If unable to allocate enough * memory this function will return -ENOMEM. If the queue create mailbox command * fails this function will return -ENXIO. **/ int lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_queue *eq, uint32_t type, uint32_t subtype) { struct lpfc_mbx_cq_create *cq_create; struct lpfc_dmabuf *dmabuf; LPFC_MBOXQ_t *mbox; int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; /* sanity check on queue memory */ if (!cq || !eq) return -ENODEV; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_cq_create) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_CQ_CREATE, length, LPFC_SLI4_MBX_EMBED); cq_create = &mbox->u.mqe.un.cq_create; shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, cq->page_count); bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); bf_set(lpfc_mbox_hdr_version, &shdr->request, phba->sli4_hba.pc_sli4_params.cqv); if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, (cq->page_size / SLI4_PAGE_SIZE)); bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, eq->queue_id); bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, phba->sli4_hba.pc_sli4_params.cqav); } else { bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); } switch (cq->entry_count) { case 2048: case 4096: if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { cq_create->u.request.context.lpfc_cq_context_count = cq->entry_count; bf_set(lpfc_cq_context_count, &cq_create->u.request.context, LPFC_CQ_CNT_WORD7); break; } fallthrough; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0361 Unsupported CQ count: " "entry cnt %d sz %d pg cnt %d\n", cq->entry_count, cq->entry_size, cq->page_count); if (cq->entry_count < 256) { status = -EINVAL; goto out; } fallthrough; /* otherwise default to smallest count */ case 256: bf_set(lpfc_cq_context_count, &cq_create->u.request.context, LPFC_CQ_CNT_256); break; case 512: bf_set(lpfc_cq_context_count, &cq_create->u.request.context, LPFC_CQ_CNT_512); break; case 1024: bf_set(lpfc_cq_context_count, &cq_create->u.request.context, LPFC_CQ_CNT_1024); break; } list_for_each_entry(dmabuf, &cq->page_list, list) { memset(dmabuf->virt, 0, cq->page_size); cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2501 CQ_CREATE mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); status = -ENXIO; goto out; } cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); if (cq->queue_id == 0xFFFF) { status = -ENXIO; goto out; } /* link the cq onto the parent eq child list */ list_add_tail(&cq->list, &eq->child_list); /* Set up completion queue's type and subtype */ cq->type = type; cq->subtype = subtype; cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); cq->assoc_qid = eq->queue_id; cq->assoc_qp = eq; cq->host_index = 0; cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); if (cq->queue_id > phba->sli4_hba.cq_max) phba->sli4_hba.cq_max = cq->queue_id; out: mempool_free(mbox, phba->mbox_mem_pool); return status; } /** * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ * @phba: HBA structure that indicates port to create a queue on. * @cqp: The queue structure array to use to create the completion queues. * @hdwq: The hardware queue array with the EQ to bind completion queues to. * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc). * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). * * This function creates a set of completion queue, s to support MRQ * as detailed in @cqp, on a port, * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. * * The @phba struct is used to send mailbox command to HBA. The @cq struct * is used to get the entry count and entry size that are necessary to * determine the number of pages to allocate and use for this queue. The @eq * is used to indicate which event queue to bind this completion queue to. This * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the * completion queue. This function is asynchronous and will wait for the mailbox * command to finish before continuing. * * On success this function will return a zero. If unable to allocate enough * memory this function will return -ENOMEM. If the queue create mailbox command * fails this function will return -ENXIO. **/ int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, uint32_t subtype) { struct lpfc_queue *cq; struct lpfc_queue *eq; struct lpfc_mbx_cq_create_set *cq_set; struct lpfc_dmabuf *dmabuf; LPFC_MBOXQ_t *mbox; int rc, length, alloclen, status = 0; int cnt, idx, numcq, page_idx = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; /* sanity check on queue memory */ numcq = phba->cfg_nvmet_mrq; if (!cqp || !hdwq || !numcq) return -ENODEV; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = sizeof(struct lpfc_mbx_cq_create_set); length += ((numcq * cqp[0]->page_count) * sizeof(struct dma_address)); alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, LPFC_SLI4_MBX_NEMBED); if (alloclen < length) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3098 Allocated DMA memory size (%d) is " "less than the requested DMA memory size " "(%d)\n", alloclen, length); status = -ENOMEM; goto out; } cq_set = mbox->sge_array->addr[0]; shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); for (idx = 0; idx < numcq; idx++) { cq = cqp[idx]; eq = hdwq[idx].hba_eq; if (!cq || !eq) { status = -ENOMEM; goto out; } if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = cq->page_size; switch (idx) { case 0: bf_set(lpfc_mbx_cq_create_set_page_size, &cq_set->u.request, (hw_page_size / SLI4_PAGE_SIZE)); bf_set(lpfc_mbx_cq_create_set_num_pages, &cq_set->u.request, cq->page_count); bf_set(lpfc_mbx_cq_create_set_evt, &cq_set->u.request, 1); bf_set(lpfc_mbx_cq_create_set_valid, &cq_set->u.request, 1); bf_set(lpfc_mbx_cq_create_set_cqe_size, &cq_set->u.request, 0); bf_set(lpfc_mbx_cq_create_set_num_cq, &cq_set->u.request, numcq); bf_set(lpfc_mbx_cq_create_set_autovalid, &cq_set->u.request, phba->sli4_hba.pc_sli4_params.cqav); switch (cq->entry_count) { case 2048: case 4096: if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { bf_set(lpfc_mbx_cq_create_set_cqe_cnt, &cq_set->u.request, cq->entry_count); bf_set(lpfc_mbx_cq_create_set_cqe_cnt, &cq_set->u.request, LPFC_CQ_CNT_WORD7); break; } fallthrough; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3118 Bad CQ count. (%d)\n", cq->entry_count); if (cq->entry_count < 256) { status = -EINVAL; goto out; } fallthrough; /* otherwise default to smallest */ case 256: bf_set(lpfc_mbx_cq_create_set_cqe_cnt, &cq_set->u.request, LPFC_CQ_CNT_256); break; case 512: bf_set(lpfc_mbx_cq_create_set_cqe_cnt, &cq_set->u.request, LPFC_CQ_CNT_512); break; case 1024: bf_set(lpfc_mbx_cq_create_set_cqe_cnt, &cq_set->u.request, LPFC_CQ_CNT_1024); break; } bf_set(lpfc_mbx_cq_create_set_eq_id0, &cq_set->u.request, eq->queue_id); break; case 1: bf_set(lpfc_mbx_cq_create_set_eq_id1, &cq_set->u.request, eq->queue_id); break; case 2: bf_set(lpfc_mbx_cq_create_set_eq_id2, &cq_set->u.request, eq->queue_id); break; case 3: bf_set(lpfc_mbx_cq_create_set_eq_id3, &cq_set->u.request, eq->queue_id); break; case 4: bf_set(lpfc_mbx_cq_create_set_eq_id4, &cq_set->u.request, eq->queue_id); break; case 5: bf_set(lpfc_mbx_cq_create_set_eq_id5, &cq_set->u.request, eq->queue_id); break; case 6: bf_set(lpfc_mbx_cq_create_set_eq_id6, &cq_set->u.request, eq->queue_id); break; case 7: bf_set(lpfc_mbx_cq_create_set_eq_id7, &cq_set->u.request, eq->queue_id); break; case 8: bf_set(lpfc_mbx_cq_create_set_eq_id8, &cq_set->u.request, eq->queue_id); break; case 9: bf_set(lpfc_mbx_cq_create_set_eq_id9, &cq_set->u.request, eq->queue_id); break; case 10: bf_set(lpfc_mbx_cq_create_set_eq_id10, &cq_set->u.request, eq->queue_id); break; case 11: bf_set(lpfc_mbx_cq_create_set_eq_id11, &cq_set->u.request, eq->queue_id); break; case 12: bf_set(lpfc_mbx_cq_create_set_eq_id12, &cq_set->u.request, eq->queue_id); break; case 13: bf_set(lpfc_mbx_cq_create_set_eq_id13, &cq_set->u.request, eq->queue_id); break; case 14: bf_set(lpfc_mbx_cq_create_set_eq_id14, &cq_set->u.request, eq->queue_id); break; case 15: bf_set(lpfc_mbx_cq_create_set_eq_id15, &cq_set->u.request, eq->queue_id); break; } /* link the cq onto the parent eq child list */ list_add_tail(&cq->list, &eq->child_list); /* Set up completion queue's type and subtype */ cq->type = type; cq->subtype = subtype; cq->assoc_qid = eq->queue_id; cq->assoc_qp = eq; cq->host_index = 0; cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); cq->chann = idx; rc = 0; list_for_each_entry(dmabuf, &cq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); cnt = page_idx + dmabuf->buffer_tag; cq_set->u.request.page[cnt].addr_lo = putPaddrLow(dmabuf->phys); cq_set->u.request.page[cnt].addr_hi = putPaddrHigh(dmabuf->phys); rc++; } page_idx += rc; } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3119 CQ_CREATE_SET mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); status = -ENXIO; goto out; } rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); if (rc == 0xFFFF) { status = -ENXIO; goto out; } for (idx = 0; idx < numcq; idx++) { cq = cqp[idx]; cq->queue_id = rc + idx; if (cq->queue_id > phba->sli4_hba.cq_max) phba->sli4_hba.cq_max = cq->queue_id; } out: lpfc_sli4_mbox_cmd_free(phba, mbox); return status; } /** * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration * @phba: HBA structure that indicates port to create a queue on. * @mq: The queue structure to use to create the mailbox queue. * @mbox: An allocated pointer to type LPFC_MBOXQ_t * @cq: The completion queue to associate with this cq. * * This function provides failback (fb) functionality when the * mq_create_ext fails on older FW generations. It's purpose is identical * to mq_create_ext otherwise. * * This routine cannot fail as all attributes were previously accessed and * initialized in mq_create_ext. **/ static void lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) { struct lpfc_mbx_mq_create *mq_create; struct lpfc_dmabuf *dmabuf; int length; length = (sizeof(struct lpfc_mbx_mq_create) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_MQ_CREATE, length, LPFC_SLI4_MBX_EMBED); mq_create = &mbox->u.mqe.un.mq_create; bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, mq->page_count); bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, cq->queue_id); bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); switch (mq->entry_count) { case 16: bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, LPFC_MQ_RING_SIZE_16); break; case 32: bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, LPFC_MQ_RING_SIZE_32); break; case 64: bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, LPFC_MQ_RING_SIZE_64); break; case 128: bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, LPFC_MQ_RING_SIZE_128); break; } list_for_each_entry(dmabuf, &mq->page_list, list) { mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } } /** * lpfc_mq_create - Create a mailbox Queue on the HBA * @phba: HBA structure that indicates port to create a queue on. * @mq: The queue structure to use to create the mailbox queue. * @cq: The completion queue to associate with this cq. * @subtype: The queue's subtype. * * This function creates a mailbox queue, as detailed in @mq, on a port, * described by @phba by sending a MQ_CREATE mailbox command to the HBA. * * The @phba struct is used to send mailbox command to HBA. The @cq struct * is used to get the entry count and entry size that are necessary to * determine the number of pages to allocate and use for this queue. This * function will send the MQ_CREATE mailbox command to the HBA to setup the * mailbox queue. This function is asynchronous and will wait for the mailbox * command to finish before continuing. * * On success this function will return a zero. If unable to allocate enough * memory this function will return -ENOMEM. If the queue create mailbox command * fails this function will return -ENXIO. **/ int32_t lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, struct lpfc_queue *cq, uint32_t subtype) { struct lpfc_mbx_mq_create *mq_create; struct lpfc_mbx_mq_create_ext *mq_create_ext; struct lpfc_dmabuf *dmabuf; LPFC_MBOXQ_t *mbox; int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; /* sanity check on queue memory */ if (!mq || !cq) return -ENODEV; if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = SLI4_PAGE_SIZE; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_mq_create_ext) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_MQ_CREATE_EXT, length, LPFC_SLI4_MBX_EMBED); mq_create_ext = &mbox->u.mqe.un.mq_create_ext; shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request, mq->page_count); bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request, 1); bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, &mq_create_ext->u.request, 1); bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, &mq_create_ext->u.request, 1); bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, &mq_create_ext->u.request, 1); bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, &mq_create_ext->u.request, 1); bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); bf_set(lpfc_mbox_hdr_version, &shdr->request, phba->sli4_hba.pc_sli4_params.mqv); if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, cq->queue_id); else bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, cq->queue_id); switch (mq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0362 Unsupported MQ count. (%d)\n", mq->entry_count); if (mq->entry_count < 16) { status = -EINVAL; goto out; } fallthrough; /* otherwise default to smallest count */ case 16: bf_set(lpfc_mq_context_ring_size, &mq_create_ext->u.request.context, LPFC_MQ_RING_SIZE_16); break; case 32: bf_set(lpfc_mq_context_ring_size, &mq_create_ext->u.request.context, LPFC_MQ_RING_SIZE_32); break; case 64: bf_set(lpfc_mq_context_ring_size, &mq_create_ext->u.request.context, LPFC_MQ_RING_SIZE_64); break; case 128: bf_set(lpfc_mq_context_ring_size, &mq_create_ext->u.request.context, LPFC_MQ_RING_SIZE_128); break; } list_for_each_entry(dmabuf, &mq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create_ext->u.response); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2795 MQ_CREATE_EXT failed with " "status x%x. Failback to MQ_CREATE.\n", rc); lpfc_mq_create_fb_init(phba, mq, mbox, cq); mq_create = &mbox->u.mqe.un.mq_create; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response); } /* The IOCTL status is embedded in the mailbox subheader. */ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2502 MQ_CREATE mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); status = -ENXIO; goto out; } if (mq->queue_id == 0xFFFF) { status = -ENXIO; goto out; } mq->type = LPFC_MQ; mq->assoc_qid = cq->queue_id; mq->subtype = subtype; mq->host_index = 0; mq->hba_index = 0; /* link the mq onto the parent cq child list */ list_add_tail(&mq->list, &cq->child_list); out: mempool_free(mbox, phba->mbox_mem_pool); return status; } /** * lpfc_wq_create - Create a Work Queue on the HBA * @phba: HBA structure that indicates port to create a queue on. * @wq: The queue structure to use to create the work queue. * @cq: The completion queue to bind this work queue to. * @subtype: The subtype of the work queue indicating its functionality. * * This function creates a work queue, as detailed in @wq, on a port, described * by @phba by sending a WQ_CREATE mailbox command to the HBA. * * The @phba struct is used to send mailbox command to HBA. The @wq struct * is used to get the entry count and entry size that are necessary to * determine the number of pages to allocate and use for this queue. The @cq * is used to indicate which completion queue to bind this work queue to. This * function will send the WQ_CREATE mailbox command to the HBA to setup the * work queue. This function is asynchronous and will wait for the mailbox * command to finish before continuing. * * On success this function will return a zero. If unable to allocate enough * memory this function will return -ENOMEM. If the queue create mailbox command * fails this function will return -ENXIO. **/ int lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, struct lpfc_queue *cq, uint32_t subtype) { struct lpfc_mbx_wq_create *wq_create; struct lpfc_dmabuf *dmabuf; LPFC_MBOXQ_t *mbox; int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; struct dma_address *page; void __iomem *bar_memmap_p; uint32_t db_offset; uint16_t pci_barset; uint8_t dpp_barset; uint32_t dpp_offset; uint8_t wq_create_version; #ifdef CONFIG_X86 unsigned long pg_addr; #endif /* sanity check on queue memory */ if (!wq || !cq) return -ENODEV; if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = wq->page_size; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_wq_create) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, length, LPFC_SLI4_MBX_EMBED); wq_create = &mbox->u.mqe.un.wq_create; shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, wq->page_count); bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, cq->queue_id); /* wqv is the earliest version supported, NOT the latest */ bf_set(lpfc_mbox_hdr_version, &shdr->request, phba->sli4_hba.pc_sli4_params.wqv); if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || (wq->page_size > SLI4_PAGE_SIZE)) wq_create_version = LPFC_Q_CREATE_VERSION_1; else wq_create_version = LPFC_Q_CREATE_VERSION_0; switch (wq_create_version) { case LPFC_Q_CREATE_VERSION_1: bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, wq->entry_count); bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_1); switch (wq->entry_size) { default: case 64: bf_set(lpfc_mbx_wq_create_wqe_size, &wq_create->u.request_1, LPFC_WQ_WQE_SIZE_64); break; case 128: bf_set(lpfc_mbx_wq_create_wqe_size, &wq_create->u.request_1, LPFC_WQ_WQE_SIZE_128); break; } /* Request DPP by default */ bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, (wq->page_size / SLI4_PAGE_SIZE)); page = wq_create->u.request_1.page; break; default: page = wq_create->u.request.page; break; } list_for_each_entry(dmabuf, &wq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2503 WQ_CREATE mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); status = -ENXIO; goto out; } if (wq_create_version == LPFC_Q_CREATE_VERSION_0) wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); else wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, &wq_create->u.response_1); if (wq->queue_id == 0xFFFF) { status = -ENXIO; goto out; } wq->db_format = LPFC_DB_LIST_FORMAT; if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, &wq_create->u.response); if ((wq->db_format != LPFC_DB_LIST_FORMAT) && (wq->db_format != LPFC_DB_RING_FORMAT)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3265 WQ[%d] doorbell format " "not supported: x%x\n", wq->queue_id, wq->db_format); status = -EINVAL; goto out; } pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, &wq_create->u.response); bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); if (!bar_memmap_p) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3263 WQ[%d] failed to memmap " "pci barset:x%x\n", wq->queue_id, pci_barset); status = -ENOMEM; goto out; } db_offset = wq_create->u.response.doorbell_offset; if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && (db_offset != LPFC_ULP1_WQ_DOORBELL)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3252 WQ[%d] doorbell offset " "not supported: x%x\n", wq->queue_id, db_offset); status = -EINVAL; goto out; } wq->db_regaddr = bar_memmap_p + db_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3264 WQ[%d]: barset:x%x, offset:x%x, " "format:x%x\n", wq->queue_id, pci_barset, db_offset, wq->db_format); } else wq->db_regaddr = phba->sli4_hba.WQDBregaddr; } else { /* Check if DPP was honored by the firmware */ wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, &wq_create->u.response_1); if (wq->dpp_enable) { pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, &wq_create->u.response_1); bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); if (!bar_memmap_p) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3267 WQ[%d] failed to memmap " "pci barset:x%x\n", wq->queue_id, pci_barset); status = -ENOMEM; goto out; } db_offset = wq_create->u.response_1.doorbell_offset; wq->db_regaddr = bar_memmap_p + db_offset; wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, &wq_create->u.response_1); dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, &wq_create->u.response_1); bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, dpp_barset); if (!bar_memmap_p) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3268 WQ[%d] failed to memmap " "pci barset:x%x\n", wq->queue_id, dpp_barset); status = -ENOMEM; goto out; } dpp_offset = wq_create->u.response_1.dpp_offset; wq->dpp_regaddr = bar_memmap_p + dpp_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3271 WQ[%d]: barset:x%x, offset:x%x, " "dpp_id:x%x dpp_barset:x%x " "dpp_offset:x%x\n", wq->queue_id, pci_barset, db_offset, wq->dpp_id, dpp_barset, dpp_offset); #ifdef CONFIG_X86 /* Enable combined writes for DPP aperture */ pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; rc = set_memory_wc(pg_addr, 1); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3272 Cannot setup Combined " "Write on WQ[%d] - disable DPP\n", wq->queue_id); phba->cfg_enable_dpp = 0; } #else phba->cfg_enable_dpp = 0; #endif } else wq->db_regaddr = phba->sli4_hba.WQDBregaddr; } wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); if (wq->pring == NULL) { status = -ENOMEM; goto out; } wq->type = LPFC_WQ; wq->assoc_qid = cq->queue_id; wq->subtype = subtype; wq->host_index = 0; wq->hba_index = 0; wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; /* link the wq onto the parent cq child list */ list_add_tail(&wq->list, &cq->child_list); out: mempool_free(mbox, phba->mbox_mem_pool); return status; } /** * lpfc_rq_create - Create a Receive Queue on the HBA * @phba: HBA structure that indicates port to create a queue on. * @hrq: The queue structure to use to create the header receive queue. * @drq: The queue structure to use to create the data receive queue. * @cq: The completion queue to bind this work queue to. * @subtype: The subtype of the work queue indicating its functionality. * * This function creates a receive buffer queue pair , as detailed in @hrq and * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command * to the HBA. * * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq * struct is used to get the entry count that is necessary to determine the * number of pages to use for this queue. The @cq is used to indicate which * completion queue to bind received buffers that are posted to these queues to. * This function will send the RQ_CREATE mailbox command to the HBA to setup the * receive queue pair. This function is asynchronous and will wait for the * mailbox command to finish before continuing. * * On success this function will return a zero. If unable to allocate enough * memory this function will return -ENOMEM. If the queue create mailbox command * fails this function will return -ENXIO. **/ int lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) { struct lpfc_mbx_rq_create *rq_create; struct lpfc_dmabuf *dmabuf; LPFC_MBOXQ_t *mbox; int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; void __iomem *bar_memmap_p; uint32_t db_offset; uint16_t pci_barset; /* sanity check on queue memory */ if (!hrq || !drq || !cq) return -ENODEV; if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = SLI4_PAGE_SIZE; if (hrq->entry_count != drq->entry_count) return -EINVAL; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_rq_create) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, LPFC_SLI4_MBX_EMBED); rq_create = &mbox->u.mqe.un.rq_create; shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; bf_set(lpfc_mbox_hdr_version, &shdr->request, phba->sli4_hba.pc_sli4_params.rqv); if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { bf_set(lpfc_rq_context_rqe_count_1, &rq_create->u.request.context, hrq->entry_count); rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, LPFC_RQE_SIZE_8); bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, LPFC_RQ_PAGE_SIZE_4096); } else { switch (hrq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2535 Unsupported RQ count. (%d)\n", hrq->entry_count); if (hrq->entry_count < 512) { status = -EINVAL; goto out; } fallthrough; /* otherwise default to smallest count */ case 512: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, LPFC_RQ_RING_SIZE_512); break; case 1024: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, LPFC_RQ_RING_SIZE_1024); break; case 2048: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, LPFC_RQ_RING_SIZE_2048); break; case 4096: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, LPFC_RQ_RING_SIZE_4096); break; } bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, LPFC_HDR_BUF_SIZE); } bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, cq->queue_id); bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, hrq->page_count); list_for_each_entry(dmabuf, &hrq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2504 RQ_CREATE mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); status = -ENXIO; goto out; } hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); if (hrq->queue_id == 0xFFFF) { status = -ENXIO; goto out; } if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, &rq_create->u.response); if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && (hrq->db_format != LPFC_DB_RING_FORMAT)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3262 RQ [%d] doorbell format not " "supported: x%x\n", hrq->queue_id, hrq->db_format); status = -EINVAL; goto out; } pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, &rq_create->u.response); bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); if (!bar_memmap_p) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3269 RQ[%d] failed to memmap pci " "barset:x%x\n", hrq->queue_id, pci_barset); status = -ENOMEM; goto out; } db_offset = rq_create->u.response.doorbell_offset; if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && (db_offset != LPFC_ULP1_RQ_DOORBELL)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3270 RQ[%d] doorbell offset not " "supported: x%x\n", hrq->queue_id, db_offset); status = -EINVAL; goto out; } hrq->db_regaddr = bar_memmap_p + db_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " "format:x%x\n", hrq->queue_id, pci_barset, db_offset, hrq->db_format); } else { hrq->db_format = LPFC_DB_RING_FORMAT; hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; } hrq->type = LPFC_HRQ; hrq->assoc_qid = cq->queue_id; hrq->subtype = subtype; hrq->host_index = 0; hrq->hba_index = 0; hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; /* now create the data queue */ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, LPFC_SLI4_MBX_EMBED); bf_set(lpfc_mbox_hdr_version, &shdr->request, phba->sli4_hba.pc_sli4_params.rqv); if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { bf_set(lpfc_rq_context_rqe_count_1, &rq_create->u.request.context, hrq->entry_count); if (subtype == LPFC_NVMET) rq_create->u.request.context.buffer_size = LPFC_NVMET_DATA_BUF_SIZE; else rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, LPFC_RQE_SIZE_8); bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, (PAGE_SIZE/SLI4_PAGE_SIZE)); } else { switch (drq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2536 Unsupported RQ count. (%d)\n", drq->entry_count); if (drq->entry_count < 512) { status = -EINVAL; goto out; } fallthrough; /* otherwise default to smallest count */ case 512: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, LPFC_RQ_RING_SIZE_512); break; case 1024: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, LPFC_RQ_RING_SIZE_1024); break; case 2048: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, LPFC_RQ_RING_SIZE_2048); break; case 4096: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, LPFC_RQ_RING_SIZE_4096); break; } if (subtype == LPFC_NVMET) bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, LPFC_NVMET_DATA_BUF_SIZE); else bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, LPFC_DATA_BUF_SIZE); } bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, cq->queue_id); bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, drq->page_count); list_for_each_entry(dmabuf, &drq->page_list, list) { rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { status = -ENXIO; goto out; } drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); if (drq->queue_id == 0xFFFF) { status = -ENXIO; goto out; } drq->type = LPFC_DRQ; drq->assoc_qid = cq->queue_id; drq->subtype = subtype; drq->host_index = 0; drq->hba_index = 0; drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; /* link the header and data RQs onto the parent cq child list */ list_add_tail(&hrq->list, &cq->child_list); list_add_tail(&drq->list, &cq->child_list); out: mempool_free(mbox, phba->mbox_mem_pool); return status; } /** * lpfc_mrq_create - Create MRQ Receive Queues on the HBA * @phba: HBA structure that indicates port to create a queue on. * @hrqp: The queue structure array to use to create the header receive queues. * @drqp: The queue structure array to use to create the data receive queues. * @cqp: The completion queue array to bind these receive queues to. * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). * * This function creates a receive buffer queue pair , as detailed in @hrq and * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command * to the HBA. * * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq * struct is used to get the entry count that is necessary to determine the * number of pages to use for this queue. The @cq is used to indicate which * completion queue to bind received buffers that are posted to these queues to. * This function will send the RQ_CREATE mailbox command to the HBA to setup the * receive queue pair. This function is asynchronous and will wait for the * mailbox command to finish before continuing. * * On success this function will return a zero. If unable to allocate enough * memory this function will return -ENOMEM. If the queue create mailbox command * fails this function will return -ENXIO. **/ int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, struct lpfc_queue **drqp, struct lpfc_queue **cqp, uint32_t subtype) { struct lpfc_queue *hrq, *drq, *cq; struct lpfc_mbx_rq_create_v2 *rq_create; struct lpfc_dmabuf *dmabuf; LPFC_MBOXQ_t *mbox; int rc, length, alloclen, status = 0; int cnt, idx, numrq, page_idx = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; numrq = phba->cfg_nvmet_mrq; /* sanity check on array memory */ if (!hrqp || !drqp || !cqp || !numrq) return -ENODEV; if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = SLI4_PAGE_SIZE; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = sizeof(struct lpfc_mbx_rq_create_v2); length += ((2 * numrq * hrqp[0]->page_count) * sizeof(struct dma_address)); alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, LPFC_SLI4_MBX_NEMBED); if (alloclen < length) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3099 Allocated DMA memory size (%d) is " "less than the requested DMA memory size " "(%d)\n", alloclen, length); status = -ENOMEM; goto out; } rq_create = mbox->sge_array->addr[0]; shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); cnt = 0; for (idx = 0; idx < numrq; idx++) { hrq = hrqp[idx]; drq = drqp[idx]; cq = cqp[idx]; /* sanity check on queue memory */ if (!hrq || !drq || !cq) { status = -ENODEV; goto out; } if (hrq->entry_count != drq->entry_count) { status = -EINVAL; goto out; } if (idx == 0) { bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, hrq->page_count); bf_set(lpfc_mbx_rq_create_rq_cnt, &rq_create->u.request, (numrq * 2)); bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 1); bf_set(lpfc_rq_context_base_cq, &rq_create->u.request.context, cq->queue_id); bf_set(lpfc_rq_context_data_size, &rq_create->u.request.context, LPFC_NVMET_DATA_BUF_SIZE); bf_set(lpfc_rq_context_hdr_size, &rq_create->u.request.context, LPFC_HDR_BUF_SIZE); bf_set(lpfc_rq_context_rqe_count_1, &rq_create->u.request.context, hrq->entry_count); bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, LPFC_RQE_SIZE_8); bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, (PAGE_SIZE/SLI4_PAGE_SIZE)); } rc = 0; list_for_each_entry(dmabuf, &hrq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); cnt = page_idx + dmabuf->buffer_tag; rq_create->u.request.page[cnt].addr_lo = putPaddrLow(dmabuf->phys); rq_create->u.request.page[cnt].addr_hi = putPaddrHigh(dmabuf->phys); rc++; } page_idx += rc; rc = 0; list_for_each_entry(dmabuf, &drq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); cnt = page_idx + dmabuf->buffer_tag; rq_create->u.request.page[cnt].addr_lo = putPaddrLow(dmabuf->phys); rq_create->u.request.page[cnt].addr_hi = putPaddrHigh(dmabuf->phys); rc++; } page_idx += rc; hrq->db_format = LPFC_DB_RING_FORMAT; hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; hrq->type = LPFC_HRQ; hrq->assoc_qid = cq->queue_id; hrq->subtype = subtype; hrq->host_index = 0; hrq->hba_index = 0; hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; drq->db_format = LPFC_DB_RING_FORMAT; drq->db_regaddr = phba->sli4_hba.RQDBregaddr; drq->type = LPFC_DRQ; drq->assoc_qid = cq->queue_id; drq->subtype = subtype; drq->host_index = 0; drq->hba_index = 0; drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; list_add_tail(&hrq->list, &cq->child_list); list_add_tail(&drq->list, &cq->child_list); } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3120 RQ_CREATE mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); status = -ENXIO; goto out; } rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); if (rc == 0xFFFF) { status = -ENXIO; goto out; } /* Initialize all RQs with associated queue id */ for (idx = 0; idx < numrq; idx++) { hrq = hrqp[idx]; hrq->queue_id = rc + (2 * idx); drq = drqp[idx]; drq->queue_id = rc + (2 * idx) + 1; } out: lpfc_sli4_mbox_cmd_free(phba, mbox); return status; } /** * lpfc_eq_destroy - Destroy an event Queue on the HBA * @phba: HBA structure that indicates port to destroy a queue on. * @eq: The queue structure associated with the queue to destroy. * * This function destroys a queue, as detailed in @eq by sending an mailbox * command, specific to the type of queue, to the HBA. * * The @eq struct is used to get the queue ID of the queue to destroy. * * On success this function will return a zero. If the queue destroy mailbox * command fails this function will return -ENXIO. **/ int lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) { LPFC_MBOXQ_t *mbox; int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; /* sanity check on queue memory */ if (!eq) return -ENODEV; mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_eq_destroy) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_EQ_DESTROY, length, LPFC_SLI4_MBX_EMBED); bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, eq->queue_id); mbox->vport = eq->phba->pport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr = (union lpfc_sli4_cfg_shdr *) &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2505 EQ_DESTROY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); status = -ENXIO; } /* Remove eq from any list */ list_del_init(&eq->list); mempool_free(mbox, eq->phba->mbox_mem_pool); return status; } /** * lpfc_cq_destroy - Destroy a Completion Queue on the HBA * @phba: HBA structure that indicates port to destroy a queue on. * @cq: The queue structure associated with the queue to destroy. * * This function destroys a queue, as detailed in @cq by sending an mailbox * command, specific to the type of queue, to the HBA. * * The @cq struct is used to get the queue ID of the queue to destroy. * * On success this function will return a zero. If the queue destroy mailbox * command fails this function will return -ENXIO. **/ int lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) { LPFC_MBOXQ_t *mbox; int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; /* sanity check on queue memory */ if (!cq) return -ENODEV; mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_cq_destroy) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_CQ_DESTROY, length, LPFC_SLI4_MBX_EMBED); bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, cq->queue_id); mbox->vport = cq->phba->pport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr = (union lpfc_sli4_cfg_shdr *) &mbox->u.mqe.un.wq_create.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2506 CQ_DESTROY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); status = -ENXIO; } /* Remove cq from any list */ list_del_init(&cq->list); mempool_free(mbox, cq->phba->mbox_mem_pool); return status; } /** * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA * @phba: HBA structure that indicates port to destroy a queue on. * @mq: The queue structure associated with the queue to destroy. * * This function destroys a queue, as detailed in @mq by sending an mailbox * command, specific to the type of queue, to the HBA. * * The @mq struct is used to get the queue ID of the queue to destroy. * * On success this function will return a zero. If the queue destroy mailbox * command fails this function will return -ENXIO. **/ int lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) { LPFC_MBOXQ_t *mbox; int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; /* sanity check on queue memory */ if (!mq) return -ENODEV; mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_mq_destroy) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_MQ_DESTROY, length, LPFC_SLI4_MBX_EMBED); bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, mq->queue_id); mbox->vport = mq->phba->pport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr = (union lpfc_sli4_cfg_shdr *) &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2507 MQ_DESTROY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); status = -ENXIO; } /* Remove mq from any list */ list_del_init(&mq->list); mempool_free(mbox, mq->phba->mbox_mem_pool); return status; } /** * lpfc_wq_destroy - Destroy a Work Queue on the HBA * @phba: HBA structure that indicates port to destroy a queue on. * @wq: The queue structure associated with the queue to destroy. * * This function destroys a queue, as detailed in @wq by sending an mailbox * command, specific to the type of queue, to the HBA. * * The @wq struct is used to get the queue ID of the queue to destroy. * * On success this function will return a zero. If the queue destroy mailbox * command fails this function will return -ENXIO. **/ int lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) { LPFC_MBOXQ_t *mbox; int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; /* sanity check on queue memory */ if (!wq) return -ENODEV; mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_wq_destroy) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, length, LPFC_SLI4_MBX_EMBED); bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, wq->queue_id); mbox->vport = wq->phba->pport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2508 WQ_DESTROY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); status = -ENXIO; } /* Remove wq from any list */ list_del_init(&wq->list); kfree(wq->pring); wq->pring = NULL; mempool_free(mbox, wq->phba->mbox_mem_pool); return status; } /** * lpfc_rq_destroy - Destroy a Receive Queue on the HBA * @phba: HBA structure that indicates port to destroy a queue on. * @hrq: The queue structure associated with the queue to destroy. * @drq: The queue structure associated with the queue to destroy. * * This function destroys a queue, as detailed in @rq by sending an mailbox * command, specific to the type of queue, to the HBA. * * The @rq struct is used to get the queue ID of the queue to destroy. * * On success this function will return a zero. If the queue destroy mailbox * command fails this function will return -ENXIO. **/ int lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, struct lpfc_queue *drq) { LPFC_MBOXQ_t *mbox; int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; /* sanity check on queue memory */ if (!hrq || !drq) return -ENODEV; mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_rq_destroy) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, length, LPFC_SLI4_MBX_EMBED); bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, hrq->queue_id); mbox->vport = hrq->phba->pport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ shdr = (union lpfc_sli4_cfg_shdr *) &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2509 RQ_DESTROY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); mempool_free(mbox, hrq->phba->mbox_mem_pool); return -ENXIO; } bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, drq->queue_id); rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2510 RQ_DESTROY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); status = -ENXIO; } list_del_init(&hrq->list); list_del_init(&drq->list); mempool_free(mbox, hrq->phba->mbox_mem_pool); return status; } /** * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA * @phba: The virtual port for which this call being executed. * @pdma_phys_addr0: Physical address of the 1st SGL page. * @pdma_phys_addr1: Physical address of the 2nd SGL page. * @xritag: the xritag that ties this io to the SGL pages. * * This routine will post the sgl pages for the IO that has the xritag * that is in the iocbq structure. The xritag is assigned during iocbq * creation and persists for as long as the driver is loaded. * if the caller has fewer than 256 scatter gather segments to map then * pdma_phys_addr1 should be 0. * If the caller needs to map more than 256 scatter gather segment then * pdma_phys_addr1 should be a valid physical address. * physical address for SGLs must be 64 byte aligned. * If you are going to map 2 SGL's then the first one must have 256 entries * the second sgl can have between 1 and 256 entries. * * Return codes: * 0 - Success * -ENXIO, -ENOMEM - Failure **/ int lpfc_sli4_post_sgl(struct lpfc_hba *phba, dma_addr_t pdma_phys_addr0, dma_addr_t pdma_phys_addr1, uint16_t xritag) { struct lpfc_mbx_post_sgl_pages *post_sgl_pages; LPFC_MBOXQ_t *mbox; int rc; uint32_t shdr_status, shdr_add_status; uint32_t mbox_tmo; union lpfc_sli4_cfg_shdr *shdr; if (xritag == NO_XRI) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0364 Invalid param:\n"); return -EINVAL; } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, sizeof(struct lpfc_mbx_post_sgl_pages) - sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) &mbox->u.mqe.un.post_sgl_pages; bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_addr0)); post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_addr1)); post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } /* The IOCTL status is embedded in the mailbox subheader. */ shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (!phba->sli4_hba.intr_enable) mempool_free(mbox, phba->mbox_mem_pool); else if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2511 POST_SGL mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); } return 0; } /** * lpfc_sli4_alloc_xri - Get an available rpi in the device's range * @phba: pointer to lpfc hba data structure. * * This routine is invoked to post rpi header templates to the * HBA consistent with the SLI-4 interface spec. This routine * posts a SLI4_PAGE_SIZE memory region to the port to hold up to * SLI4_PAGE_SIZE modulo 64 rpi context headers. * * Returns * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful * LPFC_RPI_ALLOC_ERROR if no rpis are available. **/ static uint16_t lpfc_sli4_alloc_xri(struct lpfc_hba *phba) { unsigned long xri; /* * Fetch the next logical xri. Because this index is logical, * the driver starts at 0 each time. */ spin_lock_irq(&phba->hbalock); xri = find_first_zero_bit(phba->sli4_hba.xri_bmask, phba->sli4_hba.max_cfg_param.max_xri); if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { spin_unlock_irq(&phba->hbalock); return NO_XRI; } else { set_bit(xri, phba->sli4_hba.xri_bmask); phba->sli4_hba.max_cfg_param.xri_used++; } spin_unlock_irq(&phba->hbalock); return xri; } /** * __lpfc_sli4_free_xri - Release an xri for reuse. * @phba: pointer to lpfc hba data structure. * @xri: xri to release. * * This routine is invoked to release an xri to the pool of * available rpis maintained by the driver. **/ static void __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) { if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { phba->sli4_hba.max_cfg_param.xri_used--; } } /** * lpfc_sli4_free_xri - Release an xri for reuse. * @phba: pointer to lpfc hba data structure. * @xri: xri to release. * * This routine is invoked to release an xri to the pool of * available rpis maintained by the driver. **/ void lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) { spin_lock_irq(&phba->hbalock); __lpfc_sli4_free_xri(phba, xri); spin_unlock_irq(&phba->hbalock); } /** * lpfc_sli4_next_xritag - Get an xritag for the io * @phba: Pointer to HBA context object. * * This function gets an xritag for the iocb. If there is no unused xritag * it will return 0xffff. * The function returns the allocated xritag if successful, else returns zero. * Zero is not a valid xritag. * The caller is not required to hold any lock. **/ uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *phba) { uint16_t xri_index; xri_index = lpfc_sli4_alloc_xri(phba); if (xri_index == NO_XRI) lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2004 Failed to allocate XRI.last XRITAG is %d" " Max XRI is %d, Used XRI is %d\n", xri_index, phba->sli4_hba.max_cfg_param.max_xri, phba->sli4_hba.max_cfg_param.xri_used); return xri_index; } /** * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. * @phba: pointer to lpfc hba data structure. * @post_sgl_list: pointer to els sgl entry list. * @post_cnt: number of els sgl entries on the list. * * This routine is invoked to post a block of driver's sgl pages to the * HBA using non-embedded mailbox command. No Lock is held. This routine * is only called when the driver is loading and after all IO has been * stopped. **/ static int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, struct list_head *post_sgl_list, int post_cnt) { struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; struct lpfc_mbx_post_uembed_sgl_page1 *sgl; struct sgl_page_pairs *sgl_pg_pairs; void *viraddr; LPFC_MBOXQ_t *mbox; uint32_t reqlen, alloclen, pg_pairs; uint32_t mbox_tmo; uint16_t xritag_start = 0; int rc = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; reqlen = post_cnt * sizeof(struct sgl_page_pairs) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); if (reqlen > SLI4_PAGE_SIZE) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2559 Block sgl registration required DMA " "size (%d) great than a page\n", reqlen); return -ENOMEM; } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; /* Allocate DMA memory and set up the non-embedded mailbox command */ alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, LPFC_SLI4_MBX_NEMBED); if (alloclen < reqlen) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0285 Allocated DMA memory size (%d) is " "less than the requested DMA memory " "size (%d)\n", alloclen, reqlen); lpfc_sli4_mbox_cmd_free(phba, mbox); return -ENOMEM; } /* Set up the SGL pages in the non-embedded DMA pages */ viraddr = mbox->sge_array->addr[0]; sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; sgl_pg_pairs = &sgl->sgl_pg_pairs; pg_pairs = 0; list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { /* Set up the sge entry */ sgl_pg_pairs->sgl_pg0_addr_lo = cpu_to_le32(putPaddrLow(sglq_entry->phys)); sgl_pg_pairs->sgl_pg0_addr_hi = cpu_to_le32(putPaddrHigh(sglq_entry->phys)); sgl_pg_pairs->sgl_pg1_addr_lo = cpu_to_le32(putPaddrLow(0)); sgl_pg_pairs->sgl_pg1_addr_hi = cpu_to_le32(putPaddrHigh(0)); /* Keep the first xritag on the list */ if (pg_pairs == 0) xritag_start = sglq_entry->sli4_xritag; sgl_pg_pairs++; pg_pairs++; } /* Complete initialization and perform endian conversion. */ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); sgl->word0 = cpu_to_le32(sgl->word0); if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (!phba->sli4_hba.intr_enable) lpfc_sli4_mbox_cmd_free(phba, mbox); else if (rc != MBX_TIMEOUT) lpfc_sli4_mbox_cmd_free(phba, mbox); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2513 POST_SGL_BLOCK mailbox command failed " "status x%x add_status x%x mbx status x%x\n", shdr_status, shdr_add_status, rc); rc = -ENXIO; } return rc; } /** * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware * @phba: pointer to lpfc hba data structure. * @nblist: pointer to nvme buffer list. * @count: number of scsi buffers on the list. * * This routine is invoked to post a block of @count scsi sgl pages from a * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. * No Lock is held. * **/ static int lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, int count) { struct lpfc_io_buf *lpfc_ncmd; struct lpfc_mbx_post_uembed_sgl_page1 *sgl; struct sgl_page_pairs *sgl_pg_pairs; void *viraddr; LPFC_MBOXQ_t *mbox; uint32_t reqlen, alloclen, pg_pairs; uint32_t mbox_tmo; uint16_t xritag_start = 0; int rc = 0; uint32_t shdr_status, shdr_add_status; dma_addr_t pdma_phys_bpl1; union lpfc_sli4_cfg_shdr *shdr; /* Calculate the requested length of the dma memory */ reqlen = count * sizeof(struct sgl_page_pairs) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); if (reqlen > SLI4_PAGE_SIZE) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "6118 Block sgl registration required DMA " "size (%d) great than a page\n", reqlen); return -ENOMEM; } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6119 Failed to allocate mbox cmd memory\n"); return -ENOMEM; } /* Allocate DMA memory and set up the non-embedded mailbox command */ alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, LPFC_SLI4_MBX_NEMBED); if (alloclen < reqlen) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6120 Allocated DMA memory size (%d) is " "less than the requested DMA memory " "size (%d)\n", alloclen, reqlen); lpfc_sli4_mbox_cmd_free(phba, mbox); return -ENOMEM; } /* Get the first SGE entry from the non-embedded DMA memory */ viraddr = mbox->sge_array->addr[0]; /* Set up the SGL pages in the non-embedded DMA pages */ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; sgl_pg_pairs = &sgl->sgl_pg_pairs; pg_pairs = 0; list_for_each_entry(lpfc_ncmd, nblist, list) { /* Set up the sge entry */ sgl_pg_pairs->sgl_pg0_addr_lo = cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); sgl_pg_pairs->sgl_pg0_addr_hi = cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + SGL_PAGE_SIZE; else pdma_phys_bpl1 = 0; sgl_pg_pairs->sgl_pg1_addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); sgl_pg_pairs->sgl_pg1_addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); /* Keep the first xritag on the list */ if (pg_pairs == 0) xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; sgl_pg_pairs++; pg_pairs++; } bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); /* Perform endian conversion if necessary */ sgl->word0 = cpu_to_le32(sgl->word0); if (!phba->sli4_hba.intr_enable) { rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); } else { mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (!phba->sli4_hba.intr_enable) lpfc_sli4_mbox_cmd_free(phba, mbox); else if (rc != MBX_TIMEOUT) lpfc_sli4_mbox_cmd_free(phba, mbox); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6125 POST_SGL_BLOCK mailbox command failed " "status x%x add_status x%x mbx status x%x\n", shdr_status, shdr_add_status, rc); rc = -ENXIO; } return rc; } /** * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list * @phba: pointer to lpfc hba data structure. * @post_nblist: pointer to the nvme buffer list. * @sb_count: number of nvme buffers. * * This routine walks a list of nvme buffers that was passed in. It attempts * to construct blocks of nvme buffer sgls which contains contiguous xris and * uses the non-embedded SGL block post mailbox commands to post to the port. * For single NVME buffer sgl with non-contiguous xri, if any, it shall use * embedded SGL post mailbox command for posting. The @post_nblist passed in * must be local list, thus no lock is needed when manipulate the list. * * Returns: 0 = failure, non-zero number of successfully posted buffers. **/ int lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, struct list_head *post_nblist, int sb_count) { struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; int status, sgl_size; int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; dma_addr_t pdma_phys_sgl1; int last_xritag = NO_XRI; int cur_xritag; LIST_HEAD(prep_nblist); LIST_HEAD(blck_nblist); LIST_HEAD(nvme_nblist); /* sanity check */ if (sb_count <= 0) return -EINVAL; sgl_size = phba->cfg_sg_dma_buf_size; list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { list_del_init(&lpfc_ncmd->list); block_cnt++; if ((last_xritag != NO_XRI) && (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { /* a hole in xri block, form a sgl posting block */ list_splice_init(&prep_nblist, &blck_nblist); post_cnt = block_cnt - 1; /* prepare list for next posting block */ list_add_tail(&lpfc_ncmd->list, &prep_nblist); block_cnt = 1; } else { /* prepare list for next posting block */ list_add_tail(&lpfc_ncmd->list, &prep_nblist); /* enough sgls for non-embed sgl mbox command */ if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { list_splice_init(&prep_nblist, &blck_nblist); post_cnt = block_cnt; block_cnt = 0; } } num_posting++; last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; /* end of repost sgl list condition for NVME buffers */ if (num_posting == sb_count) { if (post_cnt == 0) { /* last sgl posting block */ list_splice_init(&prep_nblist, &blck_nblist); post_cnt = block_cnt; } else if (block_cnt == 1) { /* last single sgl with non-contiguous xri */ if (sgl_size > SGL_PAGE_SIZE) pdma_phys_sgl1 = lpfc_ncmd->dma_phys_sgl + SGL_PAGE_SIZE; else pdma_phys_sgl1 = 0; cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; status = lpfc_sli4_post_sgl( phba, lpfc_ncmd->dma_phys_sgl, pdma_phys_sgl1, cur_xritag); if (status) { /* Post error. Buffer unavailable. */ lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; } else { /* Post success. Bffer available. */ lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; lpfc_ncmd->status = IOSTAT_SUCCESS; num_posted++; } /* success, put on NVME buffer sgl list */ list_add_tail(&lpfc_ncmd->list, &nvme_nblist); } } /* continue until a nembed page worth of sgls */ if (post_cnt == 0) continue; /* post block of NVME buffer list sgls */ status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, post_cnt); /* don't reset xirtag due to hole in xri block */ if (block_cnt == 0) last_xritag = NO_XRI; /* reset NVME buffer post count for next round of posting */ post_cnt = 0; /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ while (!list_empty(&blck_nblist)) { list_remove_head(&blck_nblist, lpfc_ncmd, struct lpfc_io_buf, list); if (status) { /* Post error. Mark buffer unavailable. */ lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; } else { /* Post success, Mark buffer available. */ lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; lpfc_ncmd->status = IOSTAT_SUCCESS; num_posted++; } list_add_tail(&lpfc_ncmd->list, &nvme_nblist); } } /* Push NVME buffers with sgl posted to the available list */ lpfc_io_buf_replenish(phba, &nvme_nblist); return num_posted; } /** * lpfc_fc_frame_check - Check that this frame is a valid frame to handle * @phba: pointer to lpfc_hba struct that the frame was received on * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) * * This function checks the fields in the @fc_hdr to see if the FC frame is a * valid type of frame that the LPFC driver will handle. This function will * return a zero if the frame is a valid frame or a non zero value when the * frame does not pass the check. **/ static int lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) { /* make rctl_names static to save stack space */ struct fc_vft_header *fc_vft_hdr; uint32_t *header = (uint32_t *) fc_hdr; #define FC_RCTL_MDS_DIAGS 0xF4 switch (fc_hdr->fh_r_ctl) { case FC_RCTL_DD_UNCAT: /* uncategorized information */ case FC_RCTL_DD_SOL_DATA: /* solicited data */ case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ case FC_RCTL_DD_DATA_DESC: /* data descriptor */ case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ case FC_RCTL_DD_CMD_STATUS: /* command status */ case FC_RCTL_ELS_REQ: /* extended link services request */ case FC_RCTL_ELS_REP: /* extended link services reply */ case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ case FC_RCTL_BA_ABTS: /* basic link service abort */ case FC_RCTL_BA_RMC: /* remove connection */ case FC_RCTL_BA_ACC: /* basic accept */ case FC_RCTL_BA_RJT: /* basic reject */ case FC_RCTL_BA_PRMT: case FC_RCTL_ACK_1: /* acknowledge_1 */ case FC_RCTL_ACK_0: /* acknowledge_0 */ case FC_RCTL_P_RJT: /* port reject */ case FC_RCTL_F_RJT: /* fabric reject */ case FC_RCTL_P_BSY: /* port busy */ case FC_RCTL_F_BSY: /* fabric busy to data frame */ case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ case FC_RCTL_LCR: /* link credit reset */ case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ case FC_RCTL_END: /* end */ break; case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ fc_vft_hdr = (struct fc_vft_header *)fc_hdr; fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; return lpfc_fc_frame_check(phba, fc_hdr); case FC_RCTL_BA_NOP: /* basic link service NOP */ default: goto drop; } switch (fc_hdr->fh_type) { case FC_TYPE_BLS: case FC_TYPE_ELS: case FC_TYPE_FCP: case FC_TYPE_CT: case FC_TYPE_NVME: break; case FC_TYPE_IP: case FC_TYPE_ILS: default: goto drop; } lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "2538 Received frame rctl:x%x, type:x%x, " "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", fc_hdr->fh_r_ctl, fc_hdr->fh_type, be32_to_cpu(header[0]), be32_to_cpu(header[1]), be32_to_cpu(header[2]), be32_to_cpu(header[3]), be32_to_cpu(header[4]), be32_to_cpu(header[5]), be32_to_cpu(header[6])); return 0; drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, "2539 Dropped frame rctl:x%x type:x%x\n", fc_hdr->fh_r_ctl, fc_hdr->fh_type); return 1; } /** * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) * * This function processes the FC header to retrieve the VFI from the VF * header, if one exists. This function will return the VFI if one exists * or 0 if no VSAN Header exists. **/ static uint32_t lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) { struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) return 0; return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); } /** * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to * @phba: Pointer to the HBA structure to search for the vport on * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) * @fcfi: The FC Fabric ID that the frame came from * @did: Destination ID to match against * * This function searches the @phba for a vport that matches the content of the * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function * returns the matching vport pointer or NULL if unable to match frame to a * vport. **/ static struct lpfc_vport * lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, uint16_t fcfi, uint32_t did) { struct lpfc_vport **vports; struct lpfc_vport *vport = NULL; int i; if (did == Fabric_DID) return phba->pport; if ((phba->pport->fc_flag & FC_PT2PT) && !(phba->link_state == LPFC_HBA_READY)) return phba->pport; vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { if (phba->fcf.fcfi == fcfi && vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && vports[i]->fc_myDID == did) { vport = vports[i]; break; } } } lpfc_destroy_vport_work_array(phba, vports); return vport; } /** * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp * @vport: The vport to work on. * * This function updates the receive sequence time stamp for this vport. The * receive sequence time stamp indicates the time that the last frame of the * the sequence that has been idle for the longest amount of time was received. * the driver uses this time stamp to indicate if any received sequences have * timed out. **/ static void lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) { struct lpfc_dmabuf *h_buf; struct hbq_dmabuf *dmabuf = NULL; /* get the oldest sequence on the rcv list */ h_buf = list_get_first(&vport->rcv_buffer_list, struct lpfc_dmabuf, list); if (!h_buf) return; dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); vport->rcv_buffer_time_stamp = dmabuf->time_stamp; } /** * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. * @vport: The vport that the received sequences were sent to. * * This function cleans up all outstanding received sequences. This is called * by the driver when a link event or user action invalidates all the received * sequences. **/ void lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) { struct lpfc_dmabuf *h_buf, *hnext; struct lpfc_dmabuf *d_buf, *dnext; struct hbq_dmabuf *dmabuf = NULL; /* start with the oldest sequence on the rcv list */ list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); list_del_init(&dmabuf->hbuf.list); list_for_each_entry_safe(d_buf, dnext, &dmabuf->dbuf.list, list) { list_del_init(&d_buf->list); lpfc_in_buf_free(vport->phba, d_buf); } lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); } } /** * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. * @vport: The vport that the received sequences were sent to. * * This function determines whether any received sequences have timed out by * first checking the vport's rcv_buffer_time_stamp. If this time_stamp * indicates that there is at least one timed out sequence this routine will * go through the received sequences one at a time from most inactive to most * active to determine which ones need to be cleaned up. Once it has determined * that a sequence needs to be cleaned up it will simply free up the resources * without sending an abort. **/ void lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) { struct lpfc_dmabuf *h_buf, *hnext; struct lpfc_dmabuf *d_buf, *dnext; struct hbq_dmabuf *dmabuf = NULL; unsigned long timeout; int abort_count = 0; timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + vport->rcv_buffer_time_stamp); if (list_empty(&vport->rcv_buffer_list) || time_before(jiffies, timeout)) return; /* start with the oldest sequence on the rcv list */ list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + dmabuf->time_stamp); if (time_before(jiffies, timeout)) break; abort_count++; list_del_init(&dmabuf->hbuf.list); list_for_each_entry_safe(d_buf, dnext, &dmabuf->dbuf.list, list) { list_del_init(&d_buf->list); lpfc_in_buf_free(vport->phba, d_buf); } lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); } if (abort_count) lpfc_update_rcv_time_stamp(vport); } /** * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences * @vport: pointer to a vitural port * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame * * This function searches through the existing incomplete sequences that have * been sent to this @vport. If the frame matches one of the incomplete * sequences then the dbuf in the @dmabuf is added to the list of frames that * make up that sequence. If no sequence is found that matches this frame then * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list * This function returns a pointer to the first dmabuf in the sequence list that * the frame was linked to. **/ static struct hbq_dmabuf * lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) { struct fc_frame_header *new_hdr; struct fc_frame_header *temp_hdr; struct lpfc_dmabuf *d_buf; struct lpfc_dmabuf *h_buf; struct hbq_dmabuf *seq_dmabuf = NULL; struct hbq_dmabuf *temp_dmabuf = NULL; uint8_t found = 0; INIT_LIST_HEAD(&dmabuf->dbuf.list); dmabuf->time_stamp = jiffies; new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; /* Use the hdr_buf to find the sequence that this frame belongs to */ list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { temp_hdr = (struct fc_frame_header *)h_buf->virt; if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) continue; /* found a pending sequence that matches this frame */ seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); break; } if (!seq_dmabuf) { /* * This indicates first frame received for this sequence. * Queue the buffer on the vport's rcv_buffer_list. */ list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); lpfc_update_rcv_time_stamp(vport); return dmabuf; } temp_hdr = seq_dmabuf->hbuf.virt; if (be16_to_cpu(new_hdr->fh_seq_cnt) < be16_to_cpu(temp_hdr->fh_seq_cnt)) { list_del_init(&seq_dmabuf->hbuf.list); list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); lpfc_update_rcv_time_stamp(vport); return dmabuf; } /* move this sequence to the tail to indicate a young sequence */ list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); seq_dmabuf->time_stamp = jiffies; lpfc_update_rcv_time_stamp(vport); if (list_empty(&seq_dmabuf->dbuf.list)) { list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); return seq_dmabuf; } /* find the correct place in the sequence to insert this frame */ d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); while (!found) { temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; /* * If the frame's sequence count is greater than the frame on * the list then insert the frame right after this frame */ if (be16_to_cpu(new_hdr->fh_seq_cnt) > be16_to_cpu(temp_hdr->fh_seq_cnt)) { list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); found = 1; break; } if (&d_buf->list == &seq_dmabuf->dbuf.list) break; d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); } if (found) return seq_dmabuf; return NULL; } /** * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence * @vport: pointer to a vitural port * @dmabuf: pointer to a dmabuf that describes the FC sequence * * This function tries to abort from the partially assembed sequence, described * by the information from basic abbort @dmabuf. It checks to see whether such * partially assembled sequence held by the driver. If so, it shall free up all * the frames from the partially assembled sequence. * * Return * true -- if there is matching partially assembled sequence present and all * the frames freed with the sequence; * false -- if there is no matching partially assembled sequence present so * nothing got aborted in the lower layer driver **/ static bool lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) { struct fc_frame_header *new_hdr; struct fc_frame_header *temp_hdr; struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; struct hbq_dmabuf *seq_dmabuf = NULL; /* Use the hdr_buf to find the sequence that matches this frame */ INIT_LIST_HEAD(&dmabuf->dbuf.list); INIT_LIST_HEAD(&dmabuf->hbuf.list); new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { temp_hdr = (struct fc_frame_header *)h_buf->virt; if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) continue; /* found a pending sequence that matches this frame */ seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); break; } /* Free up all the frames from the partially assembled sequence */ if (seq_dmabuf) { list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { list_del_init(&d_buf->list); lpfc_in_buf_free(vport->phba, d_buf); } return true; } return false; } /** * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp * @vport: pointer to a vitural port * @dmabuf: pointer to a dmabuf that describes the FC sequence * * This function tries to abort from the assembed sequence from upper level * protocol, described by the information from basic abbort @dmabuf. It * checks to see whether such pending context exists at upper level protocol. * If so, it shall clean up the pending context. * * Return * true -- if there is matching pending context of the sequence cleaned * at ulp; * false -- if there is no matching pending context of the sequence present * at ulp. **/ static bool lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) { struct lpfc_hba *phba = vport->phba; int handled; /* Accepting abort at ulp with SLI4 only */ if (phba->sli_rev < LPFC_SLI_REV4) return false; /* Register all caring upper level protocols to attend abort */ handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); if (handled) return true; return false; } /** * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler * @phba: Pointer to HBA context object. * @cmd_iocbq: pointer to the command iocbq structure. * @rsp_iocbq: pointer to the response iocbq structure. * * This function handles the sequence abort response iocb command complete * event. It properly releases the memory allocated to the sequence abort * accept iocb. **/ static void lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmd_iocbq, struct lpfc_iocbq *rsp_iocbq) { if (cmd_iocbq) { lpfc_nlp_put(cmd_iocbq->ndlp); lpfc_sli_release_iocbq(phba, cmd_iocbq); } /* Failure means BLS ABORT RSP did not get delivered to remote node*/ if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3154 BLS ABORT RSP failed, data: x%x/x%x\n", get_job_ulpstatus(phba, rsp_iocbq), get_job_word4(phba, rsp_iocbq)); } /** * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. * @phba: Pointer to HBA context object. * @xri: xri id in transaction. * * This function validates the xri maps to the known range of XRIs allocated an * used by the driver. **/ uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *phba, uint16_t xri) { uint16_t i; for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { if (xri == phba->sli4_hba.xri_ids[i]) return i; } return NO_XRI; } /** * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort * @vport: pointer to a virtual port. * @fc_hdr: pointer to a FC frame header. * @aborted: was the partially assembled receive sequence successfully aborted * * This function sends a basic response to a previous unsol sequence abort * event after aborting the sequence handling. **/ void lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, struct fc_frame_header *fc_hdr, bool aborted) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *ctiocb = NULL; struct lpfc_nodelist *ndlp; uint16_t oxid, rxid, xri, lxri; uint32_t sid, fctl; union lpfc_wqe128 *icmd; int rc; if (!lpfc_is_link_up(phba)) return; sid = sli4_sid_from_fc_hdr(fc_hdr); oxid = be16_to_cpu(fc_hdr->fh_ox_id); rxid = be16_to_cpu(fc_hdr->fh_rx_id); ndlp = lpfc_findnode_did(vport, sid); if (!ndlp) { ndlp = lpfc_nlp_init(vport, sid); if (!ndlp) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, "1268 Failed to allocate ndlp for " "oxid:x%x SID:x%x\n", oxid, sid); return; } /* Put ndlp onto pport node list */ lpfc_enqueue_node(vport, ndlp); } /* Allocate buffer for rsp iocb */ ctiocb = lpfc_sli_get_iocbq(phba); if (!ctiocb) return; icmd = &ctiocb->wqe; /* Extract the F_CTL field from FC_HDR */ fctl = sli4_fctl_from_fc_hdr(fc_hdr); ctiocb->ndlp = lpfc_nlp_get(ndlp); if (!ctiocb->ndlp) { lpfc_sli_release_iocbq(phba, ctiocb); return; } ctiocb->vport = phba->pport; ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; ctiocb->sli4_lxritag = NO_XRI; ctiocb->sli4_xritag = NO_XRI; ctiocb->abort_rctl = FC_RCTL_BA_ACC; if (fctl & FC_FC_EX_CTX) /* Exchange responder sent the abort so we * own the oxid. */ xri = oxid; else xri = rxid; lxri = lpfc_sli4_xri_inrange(phba, xri); if (lxri != NO_XRI) lpfc_set_rrq_active(phba, ndlp, lxri, (xri == oxid) ? rxid : oxid, 0); /* For BA_ABTS from exchange responder, if the logical xri with * the oxid maps to the FCP XRI range, the port no longer has * that exchange context, send a BLS_RJT. Override the IOCB for * a BA_RJT. */ if ((fctl & FC_FC_EX_CTX) && (lxri > lpfc_sli4_get_iocb_cnt(phba))) { ctiocb->abort_rctl = FC_RCTL_BA_RJT; bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0); bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp, FC_BA_RJT_INV_XID); bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp, FC_BA_RJT_UNABLE); } /* If BA_ABTS failed to abort a partially assembled receive sequence, * the driver no longer has that exchange, send a BLS_RJT. Override * the IOCB for a BA_RJT. */ if (aborted == false) { ctiocb->abort_rctl = FC_RCTL_BA_RJT; bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0); bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp, FC_BA_RJT_INV_XID); bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp, FC_BA_RJT_UNABLE); } if (fctl & FC_FC_EX_CTX) { /* ABTS sent by responder to CT exchange, construction * of BA_ACC will use OX_ID from ABTS for the XRI_TAG * field and RX_ID from ABTS for RX_ID field. */ ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP; bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid); } else { /* ABTS sent by initiator to CT exchange, construction * of BA_ACC will need to allocate a new XRI as for the * XRI_TAG field. */ ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT; } /* OX_ID is invariable to who sent ABTS to CT exchange */ bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid); bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid); /* Use CT=VPI */ bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest, ndlp->nlp_DID); bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX); /* Xmit CT abts response on exchange <xid> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", ctiocb->abort_rctl, oxid, phba->link_state); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); if (rc == IOCB_ERROR) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2925 Failed to issue CT ABTS RSP x%x on " "xri x%x, Data x%x\n", ctiocb->abort_rctl, oxid, phba->link_state); lpfc_nlp_put(ndlp); ctiocb->ndlp = NULL; lpfc_sli_release_iocbq(phba, ctiocb); } } /** * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event * @vport: Pointer to the vport on which this sequence was received * @dmabuf: pointer to a dmabuf that describes the FC sequence * * This function handles an SLI-4 unsolicited abort event. If the unsolicited * receive sequence is only partially assembed by the driver, it shall abort * the partially assembled frames for the sequence. Otherwise, if the * unsolicited receive sequence has been completely assembled and passed to * the Upper Layer Protocol (ULP), it then mark the per oxid status for the * unsolicited sequence has been aborted. After that, it will issue a basic * accept to accept the abort. **/ static void lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) { struct lpfc_hba *phba = vport->phba; struct fc_frame_header fc_hdr; uint32_t fctl; bool aborted; /* Make a copy of fc_hdr before the dmabuf being released */ memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); fctl = sli4_fctl_from_fc_hdr(&fc_hdr); if (fctl & FC_FC_EX_CTX) { /* ABTS by responder to exchange, no cleanup needed */ aborted = true; } else { /* ABTS by initiator to exchange, need to do cleanup */ aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); if (aborted == false) aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); } lpfc_in_buf_free(phba, &dmabuf->dbuf); if (phba->nvmet_support) { lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); return; } /* Respond with BA_ACC or BA_RJT accordingly */ lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); } /** * lpfc_seq_complete - Indicates if a sequence is complete * @dmabuf: pointer to a dmabuf that describes the FC sequence * * This function checks the sequence, starting with the frame described by * @dmabuf, to see if all the frames associated with this sequence are present. * the frames associated with this sequence are linked to the @dmabuf using the * dbuf list. This function looks for two major things. 1) That the first frame * has a sequence count of zero. 2) There is a frame with last frame of sequence * set. 3) That there are no holes in the sequence count. The function will * return 1 when the sequence is complete, otherwise it will return 0. **/ static int lpfc_seq_complete(struct hbq_dmabuf *dmabuf) { struct fc_frame_header *hdr; struct lpfc_dmabuf *d_buf; struct hbq_dmabuf *seq_dmabuf; uint32_t fctl; int seq_count = 0; hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; /* make sure first fame of sequence has a sequence count of zero */ if (hdr->fh_seq_cnt != seq_count) return 0; fctl = (hdr->fh_f_ctl[0] << 16 | hdr->fh_f_ctl[1] << 8 | hdr->fh_f_ctl[2]); /* If last frame of sequence we can return success. */ if (fctl & FC_FC_END_SEQ) return 1; list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; /* If there is a hole in the sequence count then fail. */ if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) return 0; fctl = (hdr->fh_f_ctl[0] << 16 | hdr->fh_f_ctl[1] << 8 | hdr->fh_f_ctl[2]); /* If last frame of sequence we can return success. */ if (fctl & FC_FC_END_SEQ) return 1; } return 0; } /** * lpfc_prep_seq - Prep sequence for ULP processing * @vport: Pointer to the vport on which this sequence was received * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence * * This function takes a sequence, described by a list of frames, and creates * a list of iocbq structures to describe the sequence. This iocbq list will be * used to issue to the generic unsolicited sequence handler. This routine * returns a pointer to the first iocbq in the list. If the function is unable * to allocate an iocbq then it throw out the received frames that were not * able to be described and return a pointer to the first iocbq. If unable to * allocate any iocbqs (including the first) this function will return NULL. **/ static struct lpfc_iocbq * lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) { struct hbq_dmabuf *hbq_buf; struct lpfc_dmabuf *d_buf, *n_buf; struct lpfc_iocbq *first_iocbq, *iocbq; struct fc_frame_header *fc_hdr; uint32_t sid; uint32_t len, tot_len; fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; /* remove from receive buffer list */ list_del_init(&seq_dmabuf->hbuf.list); lpfc_update_rcv_time_stamp(vport); /* get the Remote Port's SID */ sid = sli4_sid_from_fc_hdr(fc_hdr); tot_len = 0; /* Get an iocbq struct to fill in. */ first_iocbq = lpfc_sli_get_iocbq(vport->phba); if (first_iocbq) { /* Initialize the first IOCB. */ first_iocbq->wcqe_cmpl.total_data_placed = 0; bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl, IOSTAT_SUCCESS); first_iocbq->vport = vport; /* Check FC Header to see what TYPE of frame we are rcv'ing */ if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp, sli4_did_from_fc_hdr(fc_hdr)); } bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com, NO_XRI); bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com, be16_to_cpu(fc_hdr->fh_ox_id)); /* put the first buffer into the first iocb */ tot_len = bf_get(lpfc_rcqe_length, &seq_dmabuf->cq_event.cqe.rcqe_cmpl); first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf; first_iocbq->bpl_dmabuf = NULL; /* Keep track of the BDE count */ first_iocbq->wcqe_cmpl.word3 = 1; if (tot_len > LPFC_DATA_BUF_SIZE) first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = LPFC_DATA_BUF_SIZE; else first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len; first_iocbq->wcqe_cmpl.total_data_placed = tot_len; bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest, sid); } iocbq = first_iocbq; /* * Each IOCBq can have two Buffers assigned, so go through the list * of buffers for this sequence and save two buffers in each IOCBq */ list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { if (!iocbq) { lpfc_in_buf_free(vport->phba, d_buf); continue; } if (!iocbq->bpl_dmabuf) { iocbq->bpl_dmabuf = d_buf; iocbq->wcqe_cmpl.word3++; /* We need to get the size out of the right CQE */ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); len = bf_get(lpfc_rcqe_length, &hbq_buf->cq_event.cqe.rcqe_cmpl); iocbq->unsol_rcv_len = len; iocbq->wcqe_cmpl.total_data_placed += len; tot_len += len; } else { iocbq = lpfc_sli_get_iocbq(vport->phba); if (!iocbq) { if (first_iocbq) { bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl, IOSTAT_SUCCESS); first_iocbq->wcqe_cmpl.parameter = IOERR_NO_RESOURCES; } lpfc_in_buf_free(vport->phba, d_buf); continue; } /* We need to get the size out of the right CQE */ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); len = bf_get(lpfc_rcqe_length, &hbq_buf->cq_event.cqe.rcqe_cmpl); iocbq->cmd_dmabuf = d_buf; iocbq->bpl_dmabuf = NULL; iocbq->wcqe_cmpl.word3 = 1; if (len > LPFC_DATA_BUF_SIZE) iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize = LPFC_DATA_BUF_SIZE; else iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize = len; tot_len += len; iocbq->wcqe_cmpl.total_data_placed = tot_len; bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest, sid); list_add_tail(&iocbq->list, &first_iocbq->list); } } /* Free the sequence's header buffer */ if (!first_iocbq) lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf); return first_iocbq; } static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) { struct fc_frame_header *fc_hdr; struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; struct lpfc_hba *phba = vport->phba; fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; iocbq = lpfc_prep_seq(vport, seq_dmabuf); if (!iocbq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2707 Ring %d handler: Failed to allocate " "iocb Rctl x%x Type x%x received\n", LPFC_ELS_RING, fc_hdr->fh_r_ctl, fc_hdr->fh_type); return; } if (!lpfc_complete_unsol_iocb(phba, phba->sli4_hba.els_wq->pring, iocbq, fc_hdr->fh_r_ctl, fc_hdr->fh_type)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2540 Ring %d handler: unexpected Rctl " "x%x Type x%x received\n", LPFC_ELS_RING, fc_hdr->fh_r_ctl, fc_hdr->fh_type); lpfc_in_buf_free(phba, &seq_dmabuf->dbuf); } /* Free iocb created in lpfc_prep_seq */ list_for_each_entry_safe(curr_iocb, next_iocb, &iocbq->list, list) { list_del_init(&curr_iocb->list); lpfc_sli_release_iocbq(phba, curr_iocb); } lpfc_sli_release_iocbq(phba, iocbq); } static void lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; if (pcmd && pcmd->virt) dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); kfree(pcmd); lpfc_sli_release_iocbq(phba, cmdiocb); lpfc_drain_txq(phba); } static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) { struct fc_frame_header *fc_hdr; struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *iocbq = NULL; union lpfc_wqe128 *pwqe; struct lpfc_dmabuf *pcmd = NULL; uint32_t frame_len; int rc; unsigned long iflags; fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); /* Send the received frame back */ iocbq = lpfc_sli_get_iocbq(phba); if (!iocbq) { /* Queue cq event and wakeup worker thread to process it */ spin_lock_irqsave(&phba->hbalock, iflags); list_add_tail(&dmabuf->cq_event.list, &phba->sli4_hba.sp_queue_event); phba->hba_flag |= HBA_SP_QUEUE_EVT; spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_worker_wake_up(phba); return; } /* Allocate buffer for command payload */ pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (pcmd) pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, &pcmd->phys); if (!pcmd || !pcmd->virt) goto exit; INIT_LIST_HEAD(&pcmd->list); /* copyin the payload */ memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); iocbq->cmd_dmabuf = pcmd; iocbq->vport = vport; iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; iocbq->cmd_flag |= LPFC_USE_FCPWQIDX; iocbq->num_bdes = 0; pwqe = &iocbq->wqe; /* fill in BDE's for command */ pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys); pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys); pwqe->gen_req.bde.tus.f.bdeSize = frame_len; pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; pwqe->send_frame.frame_len = frame_len; pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr)); pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1)); pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2)); pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3)); pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4)); pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5)); pwqe->generic.wqe_com.word7 = 0; pwqe->generic.wqe_com.word10 = 0; bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME); bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */ bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */ bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1); bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1); bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1); bf_set(wqe_xc, &pwqe->generic.wqe_com, 1); bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA); bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag); bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag); bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3); pwqe->generic.wqe_com.abort_tag = iocbq->iotag; iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl; rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); if (rc == IOCB_ERROR) goto exit; lpfc_in_buf_free(phba, &dmabuf->dbuf); return; exit: lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2023 Unable to process MDS loopback frame\n"); if (pcmd && pcmd->virt) dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); kfree(pcmd); if (iocbq) lpfc_sli_release_iocbq(phba, iocbq); lpfc_in_buf_free(phba, &dmabuf->dbuf); } /** * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware * @phba: Pointer to HBA context object. * @dmabuf: Pointer to a dmabuf that describes the FC sequence. * * This function is called with no lock held. This function processes all * the received buffers and gives it to upper layers when a received buffer * indicates that it is the final frame in the sequence. The interrupt * service routine processes received buffers at interrupt contexts. * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the * appropriate receive function when the final frame in a sequence is received. **/ void lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) { struct hbq_dmabuf *seq_dmabuf; struct fc_frame_header *fc_hdr; struct lpfc_vport *vport; uint32_t fcfi; uint32_t did; /* Process each received buffer */ fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { vport = phba->pport; /* Handle MDS Loopback frames */ if (!(phba->pport->load_flag & FC_UNLOADING)) lpfc_sli4_handle_mds_loopback(vport, dmabuf); else lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } /* check to see if this a valid type of frame */ if (lpfc_fc_frame_check(phba, fc_hdr)) { lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } if ((bf_get(lpfc_cqe_code, &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) fcfi = bf_get(lpfc_rcqe_fcf_id_v1, &dmabuf->cq_event.cqe.rcqe_cmpl); else fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { vport = phba->pport; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2023 MDS Loopback %d bytes\n", bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl)); /* Handle MDS Loopback frames */ lpfc_sli4_handle_mds_loopback(vport, dmabuf); return; } /* d_id this frame is directed to */ did = sli4_did_from_fc_hdr(fc_hdr); vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); if (!vport) { /* throw out the frame */ lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && (did != Fabric_DID)) { /* * Throw out the frame if we are not pt2pt. * The pt2pt protocol allows for discovery frames * to be received without a registered VPI. */ if (!(vport->fc_flag & FC_PT2PT) || (phba->link_state == LPFC_HBA_READY)) { lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } } /* Handle the basic abort sequence (BA_ABTS) event */ if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { lpfc_sli4_handle_unsol_abort(vport, dmabuf); return; } /* Link this frame */ seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); if (!seq_dmabuf) { /* unable to add frame to vport - throw it out */ lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } /* If not last frame in sequence continue processing frames. */ if (!lpfc_seq_complete(seq_dmabuf)) return; /* Send the complete sequence to the upper layer protocol */ lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); } /** * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port * @phba: pointer to lpfc hba data structure. * * This routine is invoked to post rpi header templates to the * HBA consistent with the SLI-4 interface spec. This routine * posts a SLI4_PAGE_SIZE memory region to the port to hold up to * SLI4_PAGE_SIZE modulo 64 rpi context headers. * * This routine does not require any locks. It's usage is expected * to be driver load or reset recovery when the driver is * sequential. * * Return codes * 0 - successful * -EIO - The mailbox failed to complete successfully. * When this error occurs, the driver is not guaranteed * to have any rpi regions posted to the device and * must either attempt to repost the regions or take a * fatal error. **/ int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) { struct lpfc_rpi_hdr *rpi_page; uint32_t rc = 0; uint16_t lrpi = 0; /* SLI4 ports that support extents do not require RPI headers. */ if (!phba->sli4_hba.rpi_hdrs_in_use) goto exit; if (phba->sli4_hba.extents_in_use) return -EIO; list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { /* * Assign the rpi headers a physical rpi only if the driver * has not initialized those resources. A port reset only * needs the headers posted. */ if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != LPFC_RPI_RSRC_RDY) rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2008 Error %d posting all rpi " "headers\n", rc); rc = -EIO; break; } } exit: bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, LPFC_RPI_RSRC_RDY); return rc; } /** * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port * @phba: pointer to lpfc hba data structure. * @rpi_page: pointer to the rpi memory region. * * This routine is invoked to post a single rpi header to the * HBA consistent with the SLI-4 interface spec. This memory region * maps up to 64 rpi context regions. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) { LPFC_MBOXQ_t *mboxq; struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; uint32_t rc = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; /* SLI4 ports that support extents do not require RPI headers. */ if (!phba->sli4_hba.rpi_hdrs_in_use) return rc; if (phba->sli4_hba.extents_in_use) return -EIO; /* The port is notified of the header region via a mailbox command. */ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2001 Unable to allocate memory for issuing " "SLI_CONFIG_SPECIAL mailbox command\n"); return -ENOMEM; } /* Post all rpi memory regions to the port. */ hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, sizeof(struct lpfc_mbx_post_hdr_tmpl) - sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); /* Post the physical rpi to the port for this rpi header. */ bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, rpi_page->start_rpi); bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, hdr_tmpl, rpi_page->page_count); hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); mempool_free(mboxq, phba->mbox_mem_pool); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2514 POST_RPI_HDR mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); rc = -ENXIO; } else { /* * The next_rpi stores the next logical module-64 rpi value used * to post physical rpis in subsequent rpi postings. */ spin_lock_irq(&phba->hbalock); phba->sli4_hba.next_rpi = rpi_page->next_rpi; spin_unlock_irq(&phba->hbalock); } return rc; } /** * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range * @phba: pointer to lpfc hba data structure. * * This routine is invoked to post rpi header templates to the * HBA consistent with the SLI-4 interface spec. This routine * posts a SLI4_PAGE_SIZE memory region to the port to hold up to * SLI4_PAGE_SIZE modulo 64 rpi context headers. * * Returns * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful * LPFC_RPI_ALLOC_ERROR if no rpis are available. **/ int lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) { unsigned long rpi; uint16_t max_rpi, rpi_limit; uint16_t rpi_remaining, lrpi = 0; struct lpfc_rpi_hdr *rpi_hdr; unsigned long iflag; /* * Fetch the next logical rpi. Because this index is logical, * the driver starts at 0 each time. */ spin_lock_irqsave(&phba->hbalock, iflag); max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; rpi_limit = phba->sli4_hba.next_rpi; rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit); if (rpi >= rpi_limit) rpi = LPFC_RPI_ALLOC_ERROR; else { set_bit(rpi, phba->sli4_hba.rpi_bmask); phba->sli4_hba.max_cfg_param.rpi_used++; phba->sli4_hba.rpi_count++; } lpfc_printf_log(phba, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0001 Allocated rpi:x%x max:x%x lim:x%x\n", (int) rpi, max_rpi, rpi_limit); /* * Don't try to allocate more rpi header regions if the device limit * has been exhausted. */ if ((rpi == LPFC_RPI_ALLOC_ERROR) && (phba->sli4_hba.rpi_count >= max_rpi)) { spin_unlock_irqrestore(&phba->hbalock, iflag); return rpi; } /* * RPI header postings are not required for SLI4 ports capable of * extents. */ if (!phba->sli4_hba.rpi_hdrs_in_use) { spin_unlock_irqrestore(&phba->hbalock, iflag); return rpi; } /* * If the driver is running low on rpi resources, allocate another * page now. Note that the next_rpi value is used because * it represents how many are actually in use whereas max_rpi notes * how many are supported max by the device. */ rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; spin_unlock_irqrestore(&phba->hbalock, iflag); if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); if (!rpi_hdr) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2002 Error Could not grow rpi " "count\n"); } else { lrpi = rpi_hdr->start_rpi; rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); } } return rpi; } /** * __lpfc_sli4_free_rpi - Release an rpi for reuse. * @phba: pointer to lpfc hba data structure. * @rpi: rpi to free * * This routine is invoked to release an rpi to the pool of * available rpis maintained by the driver. **/ static void __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) { /* * if the rpi value indicates a prior unreg has already * been done, skip the unreg. */ if (rpi == LPFC_RPI_ALLOC_ERROR) return; if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { phba->sli4_hba.rpi_count--; phba->sli4_hba.max_cfg_param.rpi_used--; } else { lpfc_printf_log(phba, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "2016 rpi %x not inuse\n", rpi); } } /** * lpfc_sli4_free_rpi - Release an rpi for reuse. * @phba: pointer to lpfc hba data structure. * @rpi: rpi to free * * This routine is invoked to release an rpi to the pool of * available rpis maintained by the driver. **/ void lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) { spin_lock_irq(&phba->hbalock); __lpfc_sli4_free_rpi(phba, rpi); spin_unlock_irq(&phba->hbalock); } /** * lpfc_sli4_remove_rpis - Remove the rpi bitmask region * @phba: pointer to lpfc hba data structure. * * This routine is invoked to remove the memory region that * provided rpi via a bitmask. **/ void lpfc_sli4_remove_rpis(struct lpfc_hba *phba) { kfree(phba->sli4_hba.rpi_bmask); kfree(phba->sli4_hba.rpi_ids); bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); } /** * lpfc_sli4_resume_rpi - Remove the rpi bitmask region * @ndlp: pointer to lpfc nodelist data structure. * @cmpl: completion call-back. * @arg: data to load as MBox 'caller buffer information' * * This routine is invoked to remove the memory region that * provided rpi via a bitmask. **/ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) { LPFC_MBOXQ_t *mboxq; struct lpfc_hba *phba = ndlp->phba; int rc; /* The port is notified of the header region via a mailbox command. */ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return -ENOMEM; /* If cmpl assigned, then this nlp_get pairs with * lpfc_mbx_cmpl_resume_rpi. * * Else cmpl is NULL, then this nlp_get pairs with * lpfc_sli_def_mbox_cmpl. */ if (!lpfc_nlp_get(ndlp)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2122 %s: Failed to get nlp ref\n", __func__); mempool_free(mboxq, phba->mbox_mem_pool); return -EIO; } /* Post all rpi memory regions to the port. */ lpfc_resume_rpi(mboxq, ndlp); if (cmpl) { mboxq->mbox_cmpl = cmpl; mboxq->ctx_buf = arg; } else mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mboxq->ctx_ndlp = ndlp; mboxq->vport = ndlp->vport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2010 Resume RPI Mailbox failed " "status %d, mbxStatus x%x\n", rc, bf_get(lpfc_mqe_status, &mboxq->u.mqe)); lpfc_nlp_put(ndlp); mempool_free(mboxq, phba->mbox_mem_pool); return -EIO; } return 0; } /** * lpfc_sli4_init_vpi - Initialize a vpi with the port * @vport: Pointer to the vport for which the vpi is being initialized * * This routine is invoked to activate a vpi with the port. * * Returns: * 0 success * -Evalue otherwise **/ int lpfc_sli4_init_vpi(struct lpfc_vport *vport) { LPFC_MBOXQ_t *mboxq; int rc = 0; int retval = MBX_SUCCESS; uint32_t mbox_tmo; struct lpfc_hba *phba = vport->phba; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return -ENOMEM; lpfc_init_vpi(phba, mboxq, vport->vpi); mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); if (rc != MBX_SUCCESS) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2022 INIT VPI Mailbox failed " "status %d, mbxStatus x%x\n", rc, bf_get(lpfc_mqe_status, &mboxq->u.mqe)); retval = -EIO; } if (rc != MBX_TIMEOUT) mempool_free(mboxq, vport->phba->mbox_mem_pool); return retval; } /** * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. * @phba: pointer to lpfc hba data structure. * @mboxq: Pointer to mailbox object. * * This routine is invoked to manually add a single FCF record. The caller * must pass a completely initialized FCF_Record. This routine takes * care of the nonembedded mailbox operations. **/ static void lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { void *virt_addr; union lpfc_sli4_cfg_shdr *shdr; uint32_t shdr_status, shdr_add_status; virt_addr = mboxq->sge_array->addr[0]; /* The IOCTL status is embedded in the mailbox subheader. */ shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if ((shdr_status || shdr_add_status) && (shdr_status != STATUS_FCF_IN_USE)) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2558 ADD_FCF_RECORD mailbox failed with " "status x%x add_status x%x\n", shdr_status, shdr_add_status); lpfc_sli4_mbox_cmd_free(phba, mboxq); } /** * lpfc_sli4_add_fcf_record - Manually add an FCF Record. * @phba: pointer to lpfc hba data structure. * @fcf_record: pointer to the initialized fcf record to add. * * This routine is invoked to manually add a single FCF record. The caller * must pass a completely initialized FCF_Record. This routine takes * care of the nonembedded mailbox operations. **/ int lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) { int rc = 0; LPFC_MBOXQ_t *mboxq; uint8_t *bytep; void *virt_addr; struct lpfc_mbx_sge sge; uint32_t alloc_len, req_len; uint32_t fcfindex; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2009 Failed to allocate mbox for ADD_FCF cmd\n"); return -ENOMEM; } req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); /* Allocate DMA memory and set up the non-embedded mailbox command */ alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_ADD_FCF, req_len, LPFC_SLI4_MBX_NEMBED); if (alloc_len < req_len) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2523 Allocated DMA memory size (x%x) is " "less than the requested DMA memory " "size (x%x)\n", alloc_len, req_len); lpfc_sli4_mbox_cmd_free(phba, mboxq); return -ENOMEM; } /* * Get the first SGE entry from the non-embedded DMA memory. This * routine only uses a single SGE. */ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); virt_addr = mboxq->sge_array->addr[0]; /* * Configure the FCF record for FCFI 0. This is the driver's * hardcoded default and gets used in nonFIP mode. */ fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); /* * Copy the fcf_index and the FCF Record Data. The data starts after * the FCoE header plus word10. The data copy needs to be endian * correct. */ bytep += sizeof(uint32_t); lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); mboxq->vport = phba->pport; mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2515 ADD_FCF_RECORD mailbox failed with " "status 0x%x\n", rc); lpfc_sli4_mbox_cmd_free(phba, mboxq); rc = -EIO; } else rc = 0; return rc; } /** * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. * @phba: pointer to lpfc hba data structure. * @fcf_record: pointer to the fcf record to write the default data. * @fcf_index: FCF table entry index. * * This routine is invoked to build the driver's default FCF record. The * values used are hardcoded. This routine handles memory initialization. * **/ void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record, uint16_t fcf_index) { memset(fcf_record, 0, sizeof(struct fcf_record)); fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, LPFC_FCF_FPMA | LPFC_FCF_SPMA); /* Set the VLAN bit map */ if (phba->valid_vlan) { fcf_record->vlan_bitmap[phba->vlan_id / 8] = 1 << (phba->vlan_id % 8); } } /** * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. * @phba: pointer to lpfc hba data structure. * @fcf_index: FCF table entry offset. * * This routine is invoked to scan the entire FCF table by reading FCF * record and processing it one at a time starting from the @fcf_index * for initial FCF discovery or fast FCF failover rediscovery. * * Return 0 if the mailbox command is submitted successfully, none 0 * otherwise. **/ int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) { int rc = 0, error; LPFC_MBOXQ_t *mboxq; phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2000 Failed to allocate mbox for " "READ_FCF cmd\n"); error = -ENOMEM; goto fail_fcf_scan; } /* Construct the read FCF record mailbox command */ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); if (rc) { error = -EINVAL; goto fail_fcf_scan; } /* Issue the mailbox command asynchronously */ mboxq->vport = phba->pport; mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; spin_lock_irq(&phba->hbalock); phba->hba_flag |= FCF_TS_INPROG; spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) error = -EIO; else { /* Reset eligible FCF count for new scan */ if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) phba->fcf.eligible_fcf_cnt = 0; error = 0; } fail_fcf_scan: if (error) { if (mboxq) lpfc_sli4_mbox_cmd_free(phba, mboxq); /* FCF scan failed, clear FCF_TS_INPROG flag */ spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_TS_INPROG; spin_unlock_irq(&phba->hbalock); } return error; } /** * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. * @phba: pointer to lpfc hba data structure. * @fcf_index: FCF table entry offset. * * This routine is invoked to read an FCF record indicated by @fcf_index * and to use it for FLOGI roundrobin FCF failover. * * Return 0 if the mailbox command is submitted successfully, none 0 * otherwise. **/ int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) { int rc = 0, error; LPFC_MBOXQ_t *mboxq; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, "2763 Failed to allocate mbox for " "READ_FCF cmd\n"); error = -ENOMEM; goto fail_fcf_read; } /* Construct the read FCF record mailbox command */ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); if (rc) { error = -EINVAL; goto fail_fcf_read; } /* Issue the mailbox command asynchronously */ mboxq->vport = phba->pport; mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) error = -EIO; else error = 0; fail_fcf_read: if (error && mboxq) lpfc_sli4_mbox_cmd_free(phba, mboxq); return error; } /** * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. * @phba: pointer to lpfc hba data structure. * @fcf_index: FCF table entry offset. * * This routine is invoked to read an FCF record indicated by @fcf_index to * determine whether it's eligible for FLOGI roundrobin failover list. * * Return 0 if the mailbox command is submitted successfully, none 0 * otherwise. **/ int lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) { int rc = 0, error; LPFC_MBOXQ_t *mboxq; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, "2758 Failed to allocate mbox for " "READ_FCF cmd\n"); error = -ENOMEM; goto fail_fcf_read; } /* Construct the read FCF record mailbox command */ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); if (rc) { error = -EINVAL; goto fail_fcf_read; } /* Issue the mailbox command asynchronously */ mboxq->vport = phba->pport; mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) error = -EIO; else error = 0; fail_fcf_read: if (error && mboxq) lpfc_sli4_mbox_cmd_free(phba, mboxq); return error; } /** * lpfc_check_next_fcf_pri_level * @phba: pointer to the lpfc_hba struct for this port. * This routine is called from the lpfc_sli4_fcf_rr_next_index_get * routine when the rr_bmask is empty. The FCF indecies are put into the * rr_bmask based on their priority level. Starting from the highest priority * to the lowest. The most likely FCF candidate will be in the highest * priority group. When this routine is called it searches the fcf_pri list for * next lowest priority group and repopulates the rr_bmask with only those * fcf_indexes. * returns: * 1=success 0=failure **/ static int lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) { uint16_t next_fcf_pri; uint16_t last_index; struct lpfc_fcf_pri *fcf_pri; int rc; int ret = 0; last_index = find_first_bit(phba->fcf.fcf_rr_bmask, LPFC_SLI4_FCF_TBL_INDX_MAX); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "3060 Last IDX %d\n", last_index); /* Verify the priority list has 2 or more entries */ spin_lock_irq(&phba->hbalock); if (list_empty(&phba->fcf.fcf_pri_list) || list_is_singular(&phba->fcf.fcf_pri_list)) { spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "3061 Last IDX %d\n", last_index); return 0; /* Empty rr list */ } spin_unlock_irq(&phba->hbalock); next_fcf_pri = 0; /* * Clear the rr_bmask and set all of the bits that are at this * priority. */ memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); spin_lock_irq(&phba->hbalock); list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) continue; /* * the 1st priority that has not FLOGI failed * will be the highest. */ if (!next_fcf_pri) next_fcf_pri = fcf_pri->fcf_rec.priority; spin_unlock_irq(&phba->hbalock); if (fcf_pri->fcf_rec.priority == next_fcf_pri) { rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_pri->fcf_rec.fcf_index); if (rc) return 0; } spin_lock_irq(&phba->hbalock); } /* * if next_fcf_pri was not set above and the list is not empty then * we have failed flogis on all of them. So reset flogi failed * and start at the beginning. */ if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; /* * the 1st priority that has not FLOGI failed * will be the highest. */ if (!next_fcf_pri) next_fcf_pri = fcf_pri->fcf_rec.priority; spin_unlock_irq(&phba->hbalock); if (fcf_pri->fcf_rec.priority == next_fcf_pri) { rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_pri->fcf_rec.fcf_index); if (rc) return 0; } spin_lock_irq(&phba->hbalock); } } else ret = 1; spin_unlock_irq(&phba->hbalock); return ret; } /** * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index * @phba: pointer to lpfc hba data structure. * * This routine is to get the next eligible FCF record index in a round * robin fashion. If the next eligible FCF record index equals to the * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) * shall be returned, otherwise, the next eligible FCF record's index * shall be returned. **/ uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) { uint16_t next_fcf_index; initial_priority: /* Search start from next bit of currently registered FCF index */ next_fcf_index = phba->fcf.current_rec.fcf_indx; next_priority: /* Determine the next fcf index to check */ next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, LPFC_SLI4_FCF_TBL_INDX_MAX, next_fcf_index); /* Wrap around condition on phba->fcf.fcf_rr_bmask */ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { /* * If we have wrapped then we need to clear the bits that * have been tested so that we can detect when we should * change the priority level. */ next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask, LPFC_SLI4_FCF_TBL_INDX_MAX); } /* Check roundrobin failover list empty condition */ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || next_fcf_index == phba->fcf.current_rec.fcf_indx) { /* * If next fcf index is not found check if there are lower * Priority level fcf's in the fcf_priority list. * Set up the rr_bmask with all of the avaiable fcf bits * at that level and continue the selection process. */ if (lpfc_check_next_fcf_pri_level(phba)) goto initial_priority; lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2844 No roundrobin failover FCF available\n"); return LPFC_FCOE_FCF_NEXT_NONE; } if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) { if (list_is_singular(&phba->fcf.fcf_pri_list)) return LPFC_FCOE_FCF_NEXT_NONE; goto next_priority; } lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2845 Get next roundrobin failover FCF (x%x)\n", next_fcf_index); return next_fcf_index; } /** * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index * @phba: pointer to lpfc hba data structure. * @fcf_index: index into the FCF table to 'set' * * This routine sets the FCF record index in to the eligible bmask for * roundrobin failover search. It checks to make sure that the index * does not go beyond the range of the driver allocated bmask dimension * before setting the bit. * * Returns 0 if the index bit successfully set, otherwise, it returns * -EINVAL. **/ int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) { if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2610 FCF (x%x) reached driver's book " "keeping dimension:x%x\n", fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); return -EINVAL; } /* Set the eligible FCF record index bmask */ set_bit(fcf_index, phba->fcf.fcf_rr_bmask); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2790 Set FCF (x%x) to roundrobin FCF failover " "bmask\n", fcf_index); return 0; } /** * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index * @phba: pointer to lpfc hba data structure. * @fcf_index: index into the FCF table to 'clear' * * This routine clears the FCF record index from the eligible bmask for * roundrobin failover search. It checks to make sure that the index * does not go beyond the range of the driver allocated bmask dimension * before clearing the bit. **/ void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) { struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2762 FCF (x%x) reached driver's book " "keeping dimension:x%x\n", fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); return; } /* Clear the eligible FCF record index bmask */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, list) { if (fcf_pri->fcf_rec.fcf_index == fcf_index) { list_del_init(&fcf_pri->list); break; } } spin_unlock_irq(&phba->hbalock); clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2791 Clear FCF (x%x) from roundrobin failover " "bmask\n", fcf_index); } /** * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table * @phba: pointer to lpfc hba data structure. * @mbox: An allocated pointer to type LPFC_MBOXQ_t * * This routine is the completion routine for the rediscover FCF table mailbox * command. If the mailbox command returned failure, it will try to stop the * FCF rediscover wait timer. **/ static void lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; uint32_t shdr_status, shdr_add_status; redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; shdr_status = bf_get(lpfc_mbox_hdr_status, &redisc_fcf->header.cfg_shdr.response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &redisc_fcf->header.cfg_shdr.response); if (shdr_status || shdr_add_status) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2746 Requesting for FCF rediscovery failed " "status x%x add_status x%x\n", shdr_status, shdr_add_status); if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; spin_unlock_irq(&phba->hbalock); /* * CVL event triggered FCF rediscover request failed, * last resort to re-try current registered FCF entry. */ lpfc_retry_pport_discovery(phba); } else { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; spin_unlock_irq(&phba->hbalock); /* * DEAD FCF event triggered FCF rediscover request * failed, last resort to fail over as a link down * to FCF registration. */ lpfc_sli4_fcf_dead_failthrough(phba); } } else { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2775 Start FCF rediscover quiescent timer\n"); /* * Start FCF rediscovery wait timer for pending FCF * before rescan FCF record table. */ lpfc_fcf_redisc_wait_start_timer(phba); } mempool_free(mbox, phba->mbox_mem_pool); } /** * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to request for rediscovery of the entire FCF table * by the port. **/ int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mbox; struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; int rc, length; /* Cancel retry delay timers to all vports before FCF rediscover */ lpfc_cancel_all_vport_retry_delay_timer(phba); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2745 Failed to allocate mbox for " "requesting FCF rediscover.\n"); return -ENOMEM; } length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, length, LPFC_SLI4_MBX_EMBED); redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; /* Set count to 0 for invalidating the entire FCF database */ bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); /* Issue the mailbox command asynchronously */ mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); return -EIO; } return 0; } /** * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event * @phba: pointer to lpfc hba data structure. * * This function is the failover routine as a last resort to the FCF DEAD * event when driver failed to perform fast FCF failover. **/ void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) { uint32_t link_state; /* * Last resort as FCF DEAD event failover will treat this as * a link down, but save the link state because we don't want * it to be changed to Link Down unless it is already down. */ link_state = phba->link_state; lpfc_linkdown(phba); phba->link_state = link_state; /* Unregister FCF if no devices connected to it */ lpfc_unregister_unused_fcf(phba); } /** * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. * @phba: pointer to lpfc hba data structure. * @rgn23_data: pointer to configure region 23 data. * * This function gets SLI3 port configure region 23 data through memory dump * mailbox command. When it successfully retrieves data, the size of the data * will be returned, otherwise, 0 will be returned. **/ static uint32_t lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) { LPFC_MBOXQ_t *pmb = NULL; MAILBOX_t *mb; uint32_t offset = 0; int rc; if (!rgn23_data) return 0; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2600 failed to allocate mailbox memory\n"); return 0; } mb = &pmb->u.mb; do { lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2601 failed to read config " "region 23, rc 0x%x Status 0x%x\n", rc, mb->mbxStatus); mb->un.varDmp.word_cnt = 0; } /* * dump mem may return a zero when finished or we got a * mailbox error, either way we are done. */ if (mb->un.varDmp.word_cnt == 0) break; if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, rgn23_data + offset, mb->un.varDmp.word_cnt); offset += mb->un.varDmp.word_cnt; } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); mempool_free(pmb, phba->mbox_mem_pool); return offset; } /** * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. * @phba: pointer to lpfc hba data structure. * @rgn23_data: pointer to configure region 23 data. * * This function gets SLI4 port configure region 23 data through memory dump * mailbox command. When it successfully retrieves data, the size of the data * will be returned, otherwise, 0 will be returned. **/ static uint32_t lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) { LPFC_MBOXQ_t *mboxq = NULL; struct lpfc_dmabuf *mp = NULL; struct lpfc_mqe *mqe; uint32_t data_length = 0; int rc; if (!rgn23_data) return 0; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3105 failed to allocate mailbox memory\n"); return 0; } if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) goto out; mqe = &mboxq->u.mqe; mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc) goto out; data_length = mqe->un.mb_words[5]; if (data_length == 0) goto out; if (data_length > DMP_RGN23_SIZE) { data_length = 0; goto out; } lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); out: lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); return data_length; } /** * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. * @phba: pointer to lpfc hba data structure. * * This function read region 23 and parse TLV for port status to * decide if the user disaled the port. If the TLV indicates the * port is disabled, the hba_flag is set accordingly. **/ void lpfc_sli_read_link_ste(struct lpfc_hba *phba) { uint8_t *rgn23_data = NULL; uint32_t if_type, data_size, sub_tlv_len, tlv_offset; uint32_t offset = 0; /* Get adapter Region 23 data */ rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); if (!rgn23_data) goto out; if (phba->sli_rev < LPFC_SLI_REV4) data_size = lpfc_sli_get_config_region23(phba, rgn23_data); else { if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if (if_type == LPFC_SLI_INTF_IF_TYPE_0) goto out; data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); } if (!data_size) goto out; /* Check the region signature first */ if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2619 Config region 23 has bad signature\n"); goto out; } offset += 4; /* Check the data structure version */ if (rgn23_data[offset] != LPFC_REGION23_VERSION) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2620 Config region 23 has bad version\n"); goto out; } offset += 4; /* Parse TLV entries in the region */ while (offset < data_size) { if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) break; /* * If the TLV is not driver specific TLV or driver id is * not linux driver id, skip the record. */ if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || (rgn23_data[offset + 3] != 0)) { offset += rgn23_data[offset + 1] * 4 + 4; continue; } /* Driver found a driver specific TLV in the config region */ sub_tlv_len = rgn23_data[offset + 1] * 4; offset += 4; tlv_offset = 0; /* * Search for configured port state sub-TLV. */ while ((offset < data_size) && (tlv_offset < sub_tlv_len)) { if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { offset += 4; tlv_offset += 4; break; } if (rgn23_data[offset] != PORT_STE_TYPE) { offset += rgn23_data[offset + 1] * 4 + 4; tlv_offset += rgn23_data[offset + 1] * 4 + 4; continue; } /* This HBA contains PORT_STE configured */ if (!rgn23_data[offset + 2]) phba->hba_flag |= LINK_DISABLED; goto out; } } out: kfree(rgn23_data); return; } /** * lpfc_log_fw_write_cmpl - logs firmware write completion status * @phba: pointer to lpfc hba data structure * @shdr_status: wr_object rsp's status field * @shdr_add_status: wr_object rsp's add_status field * @shdr_add_status_2: wr_object rsp's add_status_2 field * @shdr_change_status: wr_object rsp's change_status field * @shdr_csf: wr_object rsp's csf bit * * This routine is intended to be called after a firmware write completes. * It will log next action items to be performed by the user to instantiate * the newly downloaded firmware or reason for incompatibility. **/ static void lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status, u32 shdr_add_status, u32 shdr_add_status_2, u32 shdr_change_status, u32 shdr_csf) { lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "4198 %s: flash_id x%02x, asic_rev x%02x, " "status x%02x, add_status x%02x, add_status_2 x%02x, " "change_status x%02x, csf %01x\n", __func__, phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev, shdr_status, shdr_add_status, shdr_add_status_2, shdr_change_status, shdr_csf); if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) { switch (shdr_add_status_2) { case LPFC_ADD_STATUS_2_INCOMPAT_FLASH: lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "4199 Firmware write failed: " "image incompatible with flash x%02x\n", phba->sli4_hba.flash_id); break; case LPFC_ADD_STATUS_2_INCORRECT_ASIC: lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "4200 Firmware write failed: " "image incompatible with ASIC " "architecture x%02x\n", phba->sli4_hba.asic_rev); break; default: lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "4210 Firmware write failed: " "add_status_2 x%02x\n", shdr_add_status_2); break; } } else if (!shdr_status && !shdr_add_status) { if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { if (shdr_csf) shdr_change_status = LPFC_CHANGE_STATUS_PCI_RESET; } switch (shdr_change_status) { case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, "3198 Firmware write complete: System " "reboot required to instantiate\n"); break; case (LPFC_CHANGE_STATUS_FW_RESET): lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, "3199 Firmware write complete: " "Firmware reset required to " "instantiate\n"); break; case (LPFC_CHANGE_STATUS_PORT_MIGRATION): lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, "3200 Firmware write complete: Port " "Migration or PCI Reset required to " "instantiate\n"); break; case (LPFC_CHANGE_STATUS_PCI_RESET): lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, "3201 Firmware write complete: PCI " "Reset required to instantiate\n"); break; default: break; } } } /** * lpfc_wr_object - write an object to the firmware * @phba: HBA structure that indicates port to create a queue on. * @dmabuf_list: list of dmabufs to write to the port. * @size: the total byte value of the objects to write to the port. * @offset: the current offset to be used to start the transfer. * * This routine will create a wr_object mailbox command to send to the port. * the mailbox command will be constructed using the dma buffers described in * @dmabuf_list to create a list of BDEs. This routine will fill in as many * BDEs that the imbedded mailbox can support. The @offset variable will be * used to indicate the starting offset of the transfer and will also return * the offset after the write object mailbox has completed. @size is used to * determine the end of the object and whether the eof bit should be set. * * Return 0 is successful and offset will contain the new offset to use * for the next write. * Return negative value for error cases. **/ int lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, uint32_t size, uint32_t *offset) { struct lpfc_mbx_wr_object *wr_object; LPFC_MBOXQ_t *mbox; int rc = 0, i = 0; int mbox_status = 0; uint32_t shdr_status, shdr_add_status, shdr_add_status_2; uint32_t shdr_change_status = 0, shdr_csf = 0; uint32_t mbox_tmo; struct lpfc_dmabuf *dmabuf; uint32_t written = 0; bool check_change_status = false; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_WRITE_OBJECT, sizeof(struct lpfc_mbx_wr_object) - sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; wr_object->u.request.write_offset = *offset; sprintf((uint8_t *)wr_object->u.request.object_name, "/"); wr_object->u.request.object_name[0] = cpu_to_le32(wr_object->u.request.object_name[0]); bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); list_for_each_entry(dmabuf, dmabuf_list, list) { if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) break; wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); wr_object->u.request.bde[i].addrHigh = putPaddrHigh(dmabuf->phys); if (written + SLI4_PAGE_SIZE >= size) { wr_object->u.request.bde[i].tus.f.bdeSize = (size - written); written += (size - written); bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); check_change_status = true; } else { wr_object->u.request.bde[i].tus.f.bdeSize = SLI4_PAGE_SIZE; written += SLI4_PAGE_SIZE; } i++; } wr_object->u.request.bde_count = i; bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); if (!phba->sli4_hba.intr_enable) mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } /* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */ rc = mbox_status; /* The IOCTL status is embedded in the mailbox subheader. */ shdr_status = bf_get(lpfc_mbox_hdr_status, &wr_object->header.cfg_shdr.response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &wr_object->header.cfg_shdr.response); shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2, &wr_object->header.cfg_shdr.response); if (check_change_status) { shdr_change_status = bf_get(lpfc_wr_object_change_status, &wr_object->u.response); shdr_csf = bf_get(lpfc_wr_object_csf, &wr_object->u.response); } if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3025 Write Object mailbox failed with " "status x%x add_status x%x, add_status_2 x%x, " "mbx status x%x\n", shdr_status, shdr_add_status, shdr_add_status_2, rc); rc = -ENXIO; *offset = shdr_add_status; } else { *offset += wr_object->u.response.actual_write_length; } if (rc || check_change_status) lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status, shdr_add_status_2, shdr_change_status, shdr_csf); if (!phba->sli4_hba.intr_enable) mempool_free(mbox, phba->mbox_mem_pool); else if (mbox_status != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); return rc; } /** * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. * @vport: pointer to vport data structure. * * This function iterate through the mailboxq and clean up all REG_LOGIN * and REG_VPI mailbox commands associated with the vport. This function * is called when driver want to restart discovery of the vport due to * a Clear Virtual Link event. **/ void lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mb, *nextmb; struct lpfc_nodelist *ndlp; struct lpfc_nodelist *act_mbx_ndlp = NULL; LIST_HEAD(mbox_cmd_list); uint8_t restart_loop; /* Clean up internally queued mailbox commands with the vport */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if (mb->vport != vport) continue; if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && (mb->u.mb.mbxCommand != MBX_REG_VPI)) continue; list_move_tail(&mb->list, &mbox_cmd_list); } /* Clean up active mailbox command with the vport */ mb = phba->sli.mbox_active; if (mb && (mb->vport == vport)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || (mb->u.mb.mbxCommand == MBX_REG_VPI)) mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; /* This reference is local to this routine. The * reference is removed at routine exit. */ act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); /* Unregister the RPI when mailbox complete */ mb->mbox_flag |= LPFC_MBX_IMED_UNREG; } } /* Cleanup any mailbox completions which are not yet processed */ do { restart_loop = 0; list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { /* * If this mailox is already processed or it is * for another vport ignore it. */ if ((mb->vport != vport) || (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) continue; if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && (mb->u.mb.mbxCommand != MBX_REG_VPI)) continue; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; /* Unregister the RPI when mailbox complete */ mb->mbox_flag |= LPFC_MBX_IMED_UNREG; restart_loop = 1; spin_unlock_irq(&phba->hbalock); spin_lock(&ndlp->lock); ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; spin_unlock(&ndlp->lock); spin_lock_irq(&phba->hbalock); break; } } } while (restart_loop); spin_unlock_irq(&phba->hbalock); /* Release the cleaned-up mailbox commands */ while (!list_empty(&mbox_cmd_list)) { list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; mb->ctx_ndlp = NULL; if (ndlp) { spin_lock(&ndlp->lock); ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; spin_unlock(&ndlp->lock); lpfc_nlp_put(ndlp); } } lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED); } /* Release the ndlp with the cleaned-up active mailbox command */ if (act_mbx_ndlp) { spin_lock(&act_mbx_ndlp->lock); act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; spin_unlock(&act_mbx_ndlp->lock); lpfc_nlp_put(act_mbx_ndlp); } } /** * lpfc_drain_txq - Drain the txq * @phba: Pointer to HBA context object. * * This function attempt to submit IOCBs on the txq * to the adapter. For SLI4 adapters, the txq contains * ELS IOCBs that have been deferred because the there * are no SGLs. This congestion can occur with large * vport counts during node discovery. **/ uint32_t lpfc_drain_txq(struct lpfc_hba *phba) { LIST_HEAD(completions); struct lpfc_sli_ring *pring; struct lpfc_iocbq *piocbq = NULL; unsigned long iflags = 0; char *fail_msg = NULL; uint32_t txq_cnt = 0; struct lpfc_queue *wq; int ret = 0; if (phba->link_flag & LS_MDS_LOOPBACK) { /* MDS WQE are posted only to first WQ*/ wq = phba->sli4_hba.hdwq[0].io_wq; if (unlikely(!wq)) return 0; pring = wq->pring; } else { wq = phba->sli4_hba.els_wq; if (unlikely(!wq)) return 0; pring = lpfc_phba_elsring(phba); } if (unlikely(!pring) || list_empty(&pring->txq)) return 0; spin_lock_irqsave(&pring->ring_lock, iflags); list_for_each_entry(piocbq, &pring->txq, list) { txq_cnt++; } if (txq_cnt > pring->txq_max) pring->txq_max = txq_cnt; spin_unlock_irqrestore(&pring->ring_lock, iflags); while (!list_empty(&pring->txq)) { spin_lock_irqsave(&pring->ring_lock, iflags); piocbq = lpfc_sli_ringtx_get(phba, pring); if (!piocbq) { spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2823 txq empty and txq_cnt is %d\n ", txq_cnt); break; } txq_cnt--; ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0); if (ret && ret != IOCB_BUSY) { fail_msg = " - Cannot send IO "; piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; } if (fail_msg) { piocbq->cmd_flag |= LPFC_DRIVER_ABORTED; /* Failed means we can't issue and need to cancel */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2822 IOCB failed %s iotag 0x%x " "xri 0x%x %d flg x%x\n", fail_msg, piocbq->iotag, piocbq->sli4_xritag, ret, piocbq->cmd_flag); list_add_tail(&piocbq->list, &completions); fail_msg = NULL; } spin_unlock_irqrestore(&pring->ring_lock, iflags); if (txq_cnt == 0 || ret == IOCB_BUSY) break; } /* Cancel all the IOCBs that cannot be issued */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); return txq_cnt; } /** * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. * @phba: Pointer to HBA context object. * @pwqeq: Pointer to command WQE. * @sglq: Pointer to the scatter gather queue object. * * This routine converts the bpl or bde that is in the WQE * to a sgl list for the sli4 hardware. The physical address * of the bpl/bde is converted back to a virtual address. * If the WQE contains a BPL then the list of BDE's is * converted to sli4_sge's. If the WQE contains a single * BDE then it is converted to a single sli_sge. * The WQE is still in cpu endianness so the contents of * the bpl can be used without byte swapping. * * Returns valid XRI = Success, NO_XRI = Failure. */ static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, struct lpfc_sglq *sglq) { uint16_t xritag = NO_XRI; struct ulp_bde64 *bpl = NULL; struct ulp_bde64 bde; struct sli4_sge *sgl = NULL; struct lpfc_dmabuf *dmabuf; union lpfc_wqe128 *wqe; int numBdes = 0; int i = 0; uint32_t offset = 0; /* accumulated offset in the sg request list */ int inbound = 0; /* number of sg reply entries inbound from firmware */ uint32_t cmd; if (!pwqeq || !sglq) return xritag; sgl = (struct sli4_sge *)sglq->sgl; wqe = &pwqeq->wqe; pwqeq->iocb.ulpIoTag = pwqeq->iotag; cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); if (cmd == CMD_XMIT_BLS_RSP64_WQE) return sglq->sli4_xritag; numBdes = pwqeq->num_bdes; if (numBdes) { /* The addrHigh and addrLow fields within the WQE * have not been byteswapped yet so there is no * need to swap them back. */ if (pwqeq->bpl_dmabuf) dmabuf = pwqeq->bpl_dmabuf; else return xritag; bpl = (struct ulp_bde64 *)dmabuf->virt; if (!bpl) return xritag; for (i = 0; i < numBdes; i++) { /* Should already be byte swapped. */ sgl->addr_hi = bpl->addrHigh; sgl->addr_lo = bpl->addrLow; sgl->word2 = le32_to_cpu(sgl->word2); if ((i+1) == numBdes) bf_set(lpfc_sli4_sge_last, sgl, 1); else bf_set(lpfc_sli4_sge_last, sgl, 0); /* swap the size field back to the cpu so we * can assign it to the sgl. */ bde.tus.w = le32_to_cpu(bpl->tus.w); sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); /* The offsets in the sgl need to be accumulated * separately for the request and reply lists. * The request is always first, the reply follows. */ switch (cmd) { case CMD_GEN_REQUEST64_WQE: /* add up the reply sg entries */ if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) inbound++; /* first inbound? reset the offset */ if (inbound == 1) offset = 0; bf_set(lpfc_sli4_sge_offset, sgl, offset); bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); offset += bde.tus.f.bdeSize; break; case CMD_FCP_TRSP64_WQE: bf_set(lpfc_sli4_sge_offset, sgl, 0); bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); break; case CMD_FCP_TSEND64_WQE: case CMD_FCP_TRECEIVE64_WQE: bf_set(lpfc_sli4_sge_type, sgl, bpl->tus.f.bdeFlags); if (i < 3) offset = 0; else offset += bde.tus.f.bdeSize; bf_set(lpfc_sli4_sge_offset, sgl, offset); break; } sgl->word2 = cpu_to_le32(sgl->word2); bpl++; sgl++; } } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { /* The addrHigh and addrLow fields of the BDE have not * been byteswapped yet so they need to be swapped * before putting them in the sgl. */ sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); sgl->word2 = le32_to_cpu(sgl->word2); bf_set(lpfc_sli4_sge_last, sgl, 1); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); } return sglq->sli4_xritag; } /** * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) * @phba: Pointer to HBA context object. * @qp: Pointer to HDW queue. * @pwqe: Pointer to command WQE. **/ int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, struct lpfc_iocbq *pwqe) { union lpfc_wqe128 *wqe = &pwqe->wqe; struct lpfc_async_xchg_ctx *ctxp; struct lpfc_queue *wq; struct lpfc_sglq *sglq; struct lpfc_sli_ring *pring; unsigned long iflags; uint32_t ret = 0; /* NVME_LS and NVME_LS ABTS requests. */ if (pwqe->cmd_flag & LPFC_IO_NVME_LS) { pring = phba->sli4_hba.nvmels_wq->pring; lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, qp, wq_access); sglq = __lpfc_sli_get_els_sglq(phba, pwqe); if (!sglq) { spin_unlock_irqrestore(&pring->ring_lock, iflags); return WQE_BUSY; } pwqe->sli4_lxritag = sglq->sli4_lxritag; pwqe->sli4_xritag = sglq->sli4_xritag; if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { spin_unlock_irqrestore(&pring->ring_lock, iflags); return WQE_ERROR; } bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, pwqe->sli4_xritag); ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); if (ret) { spin_unlock_irqrestore(&pring->ring_lock, iflags); return ret; } lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_sli4_poll_eq(qp->hba_eq); return 0; } /* NVME_FCREQ and NVME_ABTS requests */ if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) { /* Get the IO distribution (hba_wqidx) for WQ assignment. */ wq = qp->io_wq; pring = wq->pring; bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, qp, wq_access); ret = lpfc_sli4_wq_put(wq, wqe); if (ret) { spin_unlock_irqrestore(&pring->ring_lock, iflags); return ret; } lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_sli4_poll_eq(qp->hba_eq); return 0; } /* NVMET requests */ if (pwqe->cmd_flag & LPFC_IO_NVMET) { /* Get the IO distribution (hba_wqidx) for WQ assignment. */ wq = qp->io_wq; pring = wq->pring; ctxp = pwqe->context_un.axchg; sglq = ctxp->ctxbuf->sglq; if (pwqe->sli4_xritag == NO_XRI) { pwqe->sli4_lxritag = sglq->sli4_lxritag; pwqe->sli4_xritag = sglq->sli4_xritag; } bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, pwqe->sli4_xritag); bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, qp, wq_access); ret = lpfc_sli4_wq_put(wq, wqe); if (ret) { spin_unlock_irqrestore(&pring->ring_lock, iflags); return ret; } lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_sli4_poll_eq(qp->hba_eq); return 0; } return WQE_ERROR; } /** * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort * @phba: Pointer to HBA context object. * @cmdiocb: Pointer to driver command iocb object. * @cmpl: completion function. * * Fill the appropriate fields for the abort WQE and call * internal routine lpfc_sli4_issue_wqe to send the WQE * This function is called with hbalock held and no ring_lock held. * * RETURNS 0 - SUCCESS **/ int lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, void *cmpl) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_iocbq *abtsiocb = NULL; union lpfc_wqe128 *abtswqe; struct lpfc_io_buf *lpfc_cmd; int retval = IOCB_ERROR; u16 xritag = cmdiocb->sli4_xritag; /* * The scsi command can not be in txq and it is in flight because the * pCmd is still pointing at the SCSI command we have to abort. There * is no need to search the txcmplq. Just send an abort to the FW. */ abtsiocb = __lpfc_sli_get_iocbq(phba); if (!abtsiocb) return WQE_NORESOURCE; /* Indicate the IO is being aborted by the driver. */ cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED; abtswqe = &abtsiocb->wqe; memset(abtswqe, 0, sizeof(*abtswqe)); if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK)) bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1); bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG); abtswqe->abort_cmd.rsrvd5 = 0; abtswqe->abort_cmd.wqe_com.abort_tag = xritag; bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag); bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0); bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1); bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND); /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abtsiocb->hba_wqidx = cmdiocb->hba_wqidx; abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX; if (cmdiocb->cmd_flag & LPFC_IO_FCP) abtsiocb->cmd_flag |= LPFC_IO_FCP; if (cmdiocb->cmd_flag & LPFC_IO_NVME) abtsiocb->cmd_flag |= LPFC_IO_NVME; if (cmdiocb->cmd_flag & LPFC_IO_FOF) abtsiocb->cmd_flag |= LPFC_IO_FOF; abtsiocb->vport = vport; abtsiocb->cmd_cmpl = cmpl; lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq); retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb); lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP, "0359 Abort xri x%x, original iotag x%x, " "abort cmd iotag x%x retval x%x\n", xritag, cmdiocb->iotag, abtsiocb->iotag, retval); if (retval) { cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; __lpfc_sli_release_iocbq(phba, abtsiocb); } return retval; } #ifdef LPFC_MXP_STAT /** * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count * @phba: pointer to lpfc hba data structure. * @hwqid: belong to which HWQ. * * The purpose of this routine is to take a snapshot of pbl, pvt and busy count * 15 seconds after a test case is running. * * The user should call lpfc_debugfs_multixripools_write before running a test * case to clear stat_snapshot_taken. Then the user starts a test case. During * test case is running, stat_snapshot_taken is incremented by 1 every time when * this routine is called from heartbeat timer. When stat_snapshot_taken is * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. **/ void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) { struct lpfc_sli4_hdw_queue *qp; struct lpfc_multixri_pool *multixri_pool; struct lpfc_pvt_pool *pvt_pool; struct lpfc_pbl_pool *pbl_pool; u32 txcmplq_cnt; qp = &phba->sli4_hba.hdwq[hwqid]; multixri_pool = qp->p_multixri_pool; if (!multixri_pool) return; if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { pvt_pool = &qp->p_multixri_pool->pvt_pool; pbl_pool = &qp->p_multixri_pool->pbl_pool; txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; multixri_pool->stat_pbl_count = pbl_pool->count; multixri_pool->stat_pvt_count = pvt_pool->count; multixri_pool->stat_busy_count = txcmplq_cnt; } multixri_pool->stat_snapshot_taken++; } #endif /** * lpfc_adjust_pvt_pool_count - Adjust private pool count * @phba: pointer to lpfc hba data structure. * @hwqid: belong to which HWQ. * * This routine moves some XRIs from private to public pool when private pool * is not busy. **/ void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) { struct lpfc_multixri_pool *multixri_pool; u32 io_req_count; u32 prev_io_req_count; multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; if (!multixri_pool) return; io_req_count = multixri_pool->io_req_count; prev_io_req_count = multixri_pool->prev_io_req_count; if (prev_io_req_count != io_req_count) { /* Private pool is busy */ multixri_pool->prev_io_req_count = io_req_count; } else { /* Private pool is not busy. * Move XRIs from private to public pool. */ lpfc_move_xri_pvt_to_pbl(phba, hwqid); } } /** * lpfc_adjust_high_watermark - Adjust high watermark * @phba: pointer to lpfc hba data structure. * @hwqid: belong to which HWQ. * * This routine sets high watermark as number of outstanding XRIs, * but make sure the new value is between xri_limit/2 and xri_limit. **/ void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) { u32 new_watermark; u32 watermark_max; u32 watermark_min; u32 xri_limit; u32 txcmplq_cnt; u32 abts_io_bufs; struct lpfc_multixri_pool *multixri_pool; struct lpfc_sli4_hdw_queue *qp; qp = &phba->sli4_hba.hdwq[hwqid]; multixri_pool = qp->p_multixri_pool; if (!multixri_pool) return; xri_limit = multixri_pool->xri_limit; watermark_max = xri_limit; watermark_min = xri_limit / 2; txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; abts_io_bufs = qp->abts_scsi_io_bufs; abts_io_bufs += qp->abts_nvme_io_bufs; new_watermark = txcmplq_cnt + abts_io_bufs; new_watermark = min(watermark_max, new_watermark); new_watermark = max(watermark_min, new_watermark); multixri_pool->pvt_pool.high_watermark = new_watermark; #ifdef LPFC_MXP_STAT multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, new_watermark); #endif } /** * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool * @phba: pointer to lpfc hba data structure. * @hwqid: belong to which HWQ. * * This routine is called from hearbeat timer when pvt_pool is idle. * All free XRIs are moved from private to public pool on hwqid with 2 steps. * The first step moves (all - low_watermark) amount of XRIs. * The second step moves the rest of XRIs. **/ void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) { struct lpfc_pbl_pool *pbl_pool; struct lpfc_pvt_pool *pvt_pool; struct lpfc_sli4_hdw_queue *qp; struct lpfc_io_buf *lpfc_ncmd; struct lpfc_io_buf *lpfc_ncmd_next; unsigned long iflag; struct list_head tmp_list; u32 tmp_count; qp = &phba->sli4_hba.hdwq[hwqid]; pbl_pool = &qp->p_multixri_pool->pbl_pool; pvt_pool = &qp->p_multixri_pool->pvt_pool; tmp_count = 0; lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); if (pvt_pool->count > pvt_pool->low_watermark) { /* Step 1: move (all - low_watermark) from pvt_pool * to pbl_pool */ /* Move low watermark of bufs from pvt_pool to tmp_list */ INIT_LIST_HEAD(&tmp_list); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &pvt_pool->list, list) { list_move_tail(&lpfc_ncmd->list, &tmp_list); tmp_count++; if (tmp_count >= pvt_pool->low_watermark) break; } /* Move all bufs from pvt_pool to pbl_pool */ list_splice_init(&pvt_pool->list, &pbl_pool->list); /* Move all bufs from tmp_list to pvt_pool */ list_splice(&tmp_list, &pvt_pool->list); pbl_pool->count += (pvt_pool->count - tmp_count); pvt_pool->count = tmp_count; } else { /* Step 2: move the rest from pvt_pool to pbl_pool */ list_splice_init(&pvt_pool->list, &pbl_pool->list); pbl_pool->count += pvt_pool->count; pvt_pool->count = 0; } spin_unlock(&pvt_pool->lock); spin_unlock_irqrestore(&pbl_pool->lock, iflag); } /** * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool * @phba: pointer to lpfc hba data structure * @qp: pointer to HDW queue * @pbl_pool: specified public free XRI pool * @pvt_pool: specified private free XRI pool * @count: number of XRIs to move * * This routine tries to move some free common bufs from the specified pbl_pool * to the specified pvt_pool. It might move less than count XRIs if there's not * enough in public pool. * * Return: * true - if XRIs are successfully moved from the specified pbl_pool to the * specified pvt_pool * false - if the specified pbl_pool is empty or locked by someone else **/ static bool _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, struct lpfc_pbl_pool *pbl_pool, struct lpfc_pvt_pool *pvt_pool, u32 count) { struct lpfc_io_buf *lpfc_ncmd; struct lpfc_io_buf *lpfc_ncmd_next; unsigned long iflag; int ret; ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); if (ret) { if (pbl_pool->count) { /* Move a batch of XRIs from public to private pool */ lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &pbl_pool->list, list) { list_move_tail(&lpfc_ncmd->list, &pvt_pool->list); pvt_pool->count++; pbl_pool->count--; count--; if (count == 0) break; } spin_unlock(&pvt_pool->lock); spin_unlock_irqrestore(&pbl_pool->lock, iflag); return true; } spin_unlock_irqrestore(&pbl_pool->lock, iflag); } return false; } /** * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool * @phba: pointer to lpfc hba data structure. * @hwqid: belong to which HWQ. * @count: number of XRIs to move * * This routine tries to find some free common bufs in one of public pools with * Round Robin method. The search always starts from local hwqid, then the next * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, * a batch of free common bufs are moved to private pool on hwqid. * It might move less than count XRIs if there's not enough in public pool. **/ void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) { struct lpfc_multixri_pool *multixri_pool; struct lpfc_multixri_pool *next_multixri_pool; struct lpfc_pvt_pool *pvt_pool; struct lpfc_pbl_pool *pbl_pool; struct lpfc_sli4_hdw_queue *qp; u32 next_hwqid; u32 hwq_count; int ret; qp = &phba->sli4_hba.hdwq[hwqid]; multixri_pool = qp->p_multixri_pool; pvt_pool = &multixri_pool->pvt_pool; pbl_pool = &multixri_pool->pbl_pool; /* Check if local pbl_pool is available */ ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); if (ret) { #ifdef LPFC_MXP_STAT multixri_pool->local_pbl_hit_count++; #endif return; } hwq_count = phba->cfg_hdw_queue; /* Get the next hwqid which was found last time */ next_hwqid = multixri_pool->rrb_next_hwqid; do { /* Go to next hwq */ next_hwqid = (next_hwqid + 1) % hwq_count; next_multixri_pool = phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; pbl_pool = &next_multixri_pool->pbl_pool; /* Check if the public free xri pool is available */ ret = _lpfc_move_xri_pbl_to_pvt( phba, qp, pbl_pool, pvt_pool, count); /* Exit while-loop if success or all hwqid are checked */ } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); /* Starting point for the next time */ multixri_pool->rrb_next_hwqid = next_hwqid; if (!ret) { /* stats: all public pools are empty*/ multixri_pool->pbl_empty_count++; } #ifdef LPFC_MXP_STAT if (ret) { if (next_hwqid == hwqid) multixri_pool->local_pbl_hit_count++; else multixri_pool->other_pbl_hit_count++; } #endif } /** * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark * @phba: pointer to lpfc hba data structure. * @hwqid: belong to which HWQ. * * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than * low watermark. **/ void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) { struct lpfc_multixri_pool *multixri_pool; struct lpfc_pvt_pool *pvt_pool; multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; pvt_pool = &multixri_pool->pvt_pool; if (pvt_pool->count < pvt_pool->low_watermark) lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); } /** * lpfc_release_io_buf - Return one IO buf back to free pool * @phba: pointer to lpfc hba data structure. * @lpfc_ncmd: IO buf to be returned. * @qp: belong to which HWQ. * * This routine returns one IO buf back to free pool. If this is an urgent IO, * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, * the IO buf is returned to pbl_pool or pvt_pool based on watermark and * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to * lpfc_io_buf_list_put. **/ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, struct lpfc_sli4_hdw_queue *qp) { unsigned long iflag; struct lpfc_pbl_pool *pbl_pool; struct lpfc_pvt_pool *pvt_pool; struct lpfc_epd_pool *epd_pool; u32 txcmplq_cnt; u32 xri_owned; u32 xri_limit; u32 abts_io_bufs; /* MUST zero fields if buffer is reused by another protocol */ lpfc_ncmd->nvmeCmd = NULL; lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL; if (phba->cfg_xpsgl && !phba->nvmet_support && !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list)) lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); if (phba->cfg_xri_rebalancing) { if (lpfc_ncmd->expedite) { /* Return to expedite pool */ epd_pool = &phba->epd_pool; spin_lock_irqsave(&epd_pool->lock, iflag); list_add_tail(&lpfc_ncmd->list, &epd_pool->list); epd_pool->count++; spin_unlock_irqrestore(&epd_pool->lock, iflag); return; } /* Avoid invalid access if an IO sneaks in and is being rejected * just _after_ xri pools are destroyed in lpfc_offline. * Nothing much can be done at this point. */ if (!qp->p_multixri_pool) return; pbl_pool = &qp->p_multixri_pool->pbl_pool; pvt_pool = &qp->p_multixri_pool->pvt_pool; txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; abts_io_bufs = qp->abts_scsi_io_bufs; abts_io_bufs += qp->abts_nvme_io_bufs; xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; xri_limit = qp->p_multixri_pool->xri_limit; #ifdef LPFC_MXP_STAT if (xri_owned <= xri_limit) qp->p_multixri_pool->below_limit_count++; else qp->p_multixri_pool->above_limit_count++; #endif /* XRI goes to either public or private free xri pool * based on watermark and xri_limit */ if ((pvt_pool->count < pvt_pool->low_watermark) || (xri_owned < xri_limit && pvt_pool->count < pvt_pool->high_watermark)) { lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, free_pvt_pool); list_add_tail(&lpfc_ncmd->list, &pvt_pool->list); pvt_pool->count++; spin_unlock_irqrestore(&pvt_pool->lock, iflag); } else { lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, free_pub_pool); list_add_tail(&lpfc_ncmd->list, &pbl_pool->list); pbl_pool->count++; spin_unlock_irqrestore(&pbl_pool->lock, iflag); } } else { lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, qp, free_xri); list_add_tail(&lpfc_ncmd->list, &qp->lpfc_io_buf_list_put); qp->put_io_bufs++; spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); } } /** * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool * @phba: pointer to lpfc hba data structure. * @qp: pointer to HDW queue * @pvt_pool: pointer to private pool data structure. * @ndlp: pointer to lpfc nodelist data structure. * * This routine tries to get one free IO buf from private pool. * * Return: * pointer to one free IO buf - if private pool is not empty * NULL - if private pool is empty **/ static struct lpfc_io_buf * lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, struct lpfc_pvt_pool *pvt_pool, struct lpfc_nodelist *ndlp) { struct lpfc_io_buf *lpfc_ncmd; struct lpfc_io_buf *lpfc_ncmd_next; unsigned long iflag; lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &pvt_pool->list, list) { if (lpfc_test_rrq_active( phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) continue; list_del(&lpfc_ncmd->list); pvt_pool->count--; spin_unlock_irqrestore(&pvt_pool->lock, iflag); return lpfc_ncmd; } spin_unlock_irqrestore(&pvt_pool->lock, iflag); return NULL; } /** * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool * @phba: pointer to lpfc hba data structure. * * This routine tries to get one free IO buf from expedite pool. * * Return: * pointer to one free IO buf - if expedite pool is not empty * NULL - if expedite pool is empty **/ static struct lpfc_io_buf * lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) { struct lpfc_io_buf *lpfc_ncmd = NULL, *iter; struct lpfc_io_buf *lpfc_ncmd_next; unsigned long iflag; struct lpfc_epd_pool *epd_pool; epd_pool = &phba->epd_pool; spin_lock_irqsave(&epd_pool->lock, iflag); if (epd_pool->count > 0) { list_for_each_entry_safe(iter, lpfc_ncmd_next, &epd_pool->list, list) { list_del(&iter->list); epd_pool->count--; lpfc_ncmd = iter; break; } } spin_unlock_irqrestore(&epd_pool->lock, iflag); return lpfc_ncmd; } /** * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs * @phba: pointer to lpfc hba data structure. * @ndlp: pointer to lpfc nodelist data structure. * @hwqid: belong to which HWQ * @expedite: 1 means this request is urgent. * * This routine will do the following actions and then return a pointer to * one free IO buf. * * 1. If private free xri count is empty, move some XRIs from public to * private pool. * 2. Get one XRI from private free xri pool. * 3. If we fail to get one from pvt_pool and this is an expedite request, * get one free xri from expedite pool. * * Note: ndlp is only used on SCSI side for RRQ testing. * The caller should pass NULL for ndlp on NVME side. * * Return: * pointer to one free IO buf - if private pool is not empty * NULL - if private pool is empty **/ static struct lpfc_io_buf * lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int hwqid, int expedite) { struct lpfc_sli4_hdw_queue *qp; struct lpfc_multixri_pool *multixri_pool; struct lpfc_pvt_pool *pvt_pool; struct lpfc_io_buf *lpfc_ncmd; qp = &phba->sli4_hba.hdwq[hwqid]; lpfc_ncmd = NULL; if (!qp) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP, "5556 NULL qp for hwqid x%x\n", hwqid); return lpfc_ncmd; } multixri_pool = qp->p_multixri_pool; if (!multixri_pool) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP, "5557 NULL multixri for hwqid x%x\n", hwqid); return lpfc_ncmd; } pvt_pool = &multixri_pool->pvt_pool; if (!pvt_pool) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP, "5558 NULL pvt_pool for hwqid x%x\n", hwqid); return lpfc_ncmd; } multixri_pool->io_req_count++; /* If pvt_pool is empty, move some XRIs from public to private pool */ if (pvt_pool->count == 0) lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); /* Get one XRI from private free xri pool */ lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); if (lpfc_ncmd) { lpfc_ncmd->hdwq = qp; lpfc_ncmd->hdwq_no = hwqid; } else if (expedite) { /* If we fail to get one from pvt_pool and this is an expedite * request, get one free xri from expedite pool. */ lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); } return lpfc_ncmd; } static inline struct lpfc_io_buf * lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) { struct lpfc_sli4_hdw_queue *qp; struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; qp = &phba->sli4_hba.hdwq[idx]; list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, &qp->lpfc_io_buf_list_get, list) { if (lpfc_test_rrq_active(phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) continue; if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) continue; list_del_init(&lpfc_cmd->list); qp->get_io_bufs--; lpfc_cmd->hdwq = qp; lpfc_cmd->hdwq_no = idx; return lpfc_cmd; } return NULL; } /** * lpfc_get_io_buf - Get one IO buffer from free pool * @phba: The HBA for which this call is being executed. * @ndlp: pointer to lpfc nodelist data structure. * @hwqid: belong to which HWQ * @expedite: 1 means this request is urgent. * * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes * a IO buffer from head of @hdwq io_buf_list and returns to caller. * * Note: ndlp is only used on SCSI side for RRQ testing. * The caller should pass NULL for ndlp on NVME side. * * Return codes: * NULL - Error * Pointer to lpfc_io_buf - Success **/ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, u32 hwqid, int expedite) { struct lpfc_sli4_hdw_queue *qp; unsigned long iflag; struct lpfc_io_buf *lpfc_cmd; qp = &phba->sli4_hba.hdwq[hwqid]; lpfc_cmd = NULL; if (!qp) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI | LOG_NVME_ABTS | LOG_FCP, "5555 NULL qp for hwqid x%x\n", hwqid); return lpfc_cmd; } if (phba->cfg_xri_rebalancing) lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( phba, ndlp, hwqid, expedite); else { lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, qp, alloc_xri_get); if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); if (!lpfc_cmd) { lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, qp, alloc_xri_put); list_splice(&qp->lpfc_io_buf_list_put, &qp->lpfc_io_buf_list_get); qp->get_io_bufs += qp->put_io_bufs; INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); qp->put_io_bufs = 0; spin_unlock(&qp->io_buf_list_put_lock); if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); } spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); } return lpfc_cmd; } /** * lpfc_read_object - Retrieve object data from HBA * @phba: The HBA for which this call is being executed. * @rdobject: Pathname of object data we want to read. * @datap: Pointer to where data will be copied to. * @datasz: size of data area * * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less. * The data will be truncated if datasz is not large enough. * Version 1 is not supported with Embedded mbox cmd, so we must use version 0. * Returns the actual bytes read from the object. */ int lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap, uint32_t datasz) { struct lpfc_mbx_read_object *read_object; LPFC_MBOXQ_t *mbox; int rc, length, eof, j, byte_cnt = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; struct lpfc_dmabuf *pcmd; u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0}; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_read_object) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_READ_OBJECT, length, LPFC_SLI4_MBX_EMBED); read_object = &mbox->u.mqe.un.read_object; shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr; bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0); bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz); read_object->u.request.rd_object_offset = 0; read_object->u.request.rd_object_cnt = 1; memset((void *)read_object->u.request.rd_object_name, 0, LPFC_OBJ_NAME_SZ); scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject); for (j = 0; j < strlen(rdobject); j++) read_object->u.request.rd_object_name[j] = cpu_to_le32(rd_object_name[j]); pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); if (pcmd) pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); if (!pcmd || !pcmd->virt) { kfree(pcmd); mempool_free(mbox, phba->mbox_mem_pool); return -ENOMEM; } memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE); read_object->u.request.rd_object_hbuf[0].pa_lo = putPaddrLow(pcmd->phys); read_object->u.request.rd_object_hbuf[0].pa_hi = putPaddrHigh(pcmd->phys); read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE; mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->ctx_ndlp = NULL; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status == STATUS_FAILED && shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, "4674 No port cfg file in FW.\n"); byte_cnt = -ENOENT; } else if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, "2625 READ_OBJECT mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); byte_cnt = -ENXIO; } else { /* Success */ length = read_object->u.response.rd_object_actual_rlen; eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response); lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT, "2626 READ_OBJECT Success len %d:%d, EOF %d\n", length, datasz, eof); /* Detect the port config file exists but is empty */ if (!length && eof) { byte_cnt = 0; goto exit; } byte_cnt = length; lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt); } exit: /* This is an embedded SLI4 mailbox with an external buffer allocated. * Free the pcmd and then cleanup with the correct routine. */ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); kfree(pcmd); lpfc_sli4_mbox_cmd_free(phba, mbox); return byte_cnt; } /** * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool * @phba: The HBA for which this call is being executed. * @lpfc_buf: IO buf structure to append the SGL chunk * * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool, * and will allocate an SGL chunk if the pool is empty. * * Return codes: * NULL - Error * Pointer to sli4_hybrid_sgl - Success **/ struct sli4_hybrid_sgl * lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) { struct sli4_hybrid_sgl *list_entry = NULL; struct sli4_hybrid_sgl *tmp = NULL; struct sli4_hybrid_sgl *allocated_sgl = NULL; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct list_head *buf_list = &hdwq->sgl_list; unsigned long iflags; spin_lock_irqsave(&hdwq->hdwq_lock, iflags); if (likely(!list_empty(buf_list))) { /* break off 1 chunk from the sgl_list */ list_for_each_entry_safe(list_entry, tmp, buf_list, list_node) { list_move_tail(&list_entry->list_node, &lpfc_buf->dma_sgl_xtra_list); break; } } else { /* allocate more */ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, cpu_to_node(hdwq->io_wq->chann)); if (!tmp) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "8353 error kmalloc memory for HDWQ " "%d %s\n", lpfc_buf->hdwq_no, __func__); return NULL; } tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_ATOMIC, &tmp->dma_phys_sgl); if (!tmp->dma_sgl) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "8354 error pool_alloc memory for HDWQ " "%d %s\n", lpfc_buf->hdwq_no, __func__); kfree(tmp); return NULL; } spin_lock_irqsave(&hdwq->hdwq_lock, iflags); list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list); } allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list, struct sli4_hybrid_sgl, list_node); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); return allocated_sgl; } /** * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool * @phba: The HBA for which this call is being executed. * @lpfc_buf: IO buf structure with the SGL chunk * * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool. * * Return codes: * 0 - Success * -EINVAL - Error **/ int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) { int rc = 0; struct sli4_hybrid_sgl *list_entry = NULL; struct sli4_hybrid_sgl *tmp = NULL; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct list_head *buf_list = &hdwq->sgl_list; unsigned long iflags; spin_lock_irqsave(&hdwq->hdwq_lock, iflags); if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { list_for_each_entry_safe(list_entry, tmp, &lpfc_buf->dma_sgl_xtra_list, list_node) { list_move_tail(&list_entry->list_node, buf_list); } } else { rc = -EINVAL; } spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); return rc; } /** * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool * @phba: phba object * @hdwq: hdwq to cleanup sgl buff resources on * * This routine frees all SGL chunks of hdwq SGL chunk pool. * * Return codes: * None **/ void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *hdwq) { struct list_head *buf_list = &hdwq->sgl_list; struct sli4_hybrid_sgl *list_entry = NULL; struct sli4_hybrid_sgl *tmp = NULL; unsigned long iflags; spin_lock_irqsave(&hdwq->hdwq_lock, iflags); /* Free sgl pool */ list_for_each_entry_safe(list_entry, tmp, buf_list, list_node) { list_del(&list_entry->list_node); dma_pool_free(phba->lpfc_sg_dma_buf_pool, list_entry->dma_sgl, list_entry->dma_phys_sgl); kfree(list_entry); } spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); } /** * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq * @phba: The HBA for which this call is being executed. * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer * * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool, * and will allocate an CMD/RSP buffer if the pool is empty. * * Return codes: * NULL - Error * Pointer to fcp_cmd_rsp_buf - Success **/ struct fcp_cmd_rsp_buf * lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) { struct fcp_cmd_rsp_buf *list_entry = NULL; struct fcp_cmd_rsp_buf *tmp = NULL; struct fcp_cmd_rsp_buf *allocated_buf = NULL; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; unsigned long iflags; spin_lock_irqsave(&hdwq->hdwq_lock, iflags); if (likely(!list_empty(buf_list))) { /* break off 1 chunk from the list */ list_for_each_entry_safe(list_entry, tmp, buf_list, list_node) { list_move_tail(&list_entry->list_node, &lpfc_buf->dma_cmd_rsp_list); break; } } else { /* allocate more */ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, cpu_to_node(hdwq->io_wq->chann)); if (!tmp) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "8355 error kmalloc memory for HDWQ " "%d %s\n", lpfc_buf->hdwq_no, __func__); return NULL; } tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool, GFP_ATOMIC, &tmp->fcp_cmd_rsp_dma_handle); if (!tmp->fcp_cmnd) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "8356 error pool_alloc memory for HDWQ " "%d %s\n", lpfc_buf->hdwq_no, __func__); kfree(tmp); return NULL; } tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + sizeof(struct fcp_cmnd)); spin_lock_irqsave(&hdwq->hdwq_lock, iflags); list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); } allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list, struct fcp_cmd_rsp_buf, list_node); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); return allocated_buf; } /** * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool * @phba: The HBA for which this call is being executed. * @lpfc_buf: IO buf structure with the CMD/RSP buf * * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool. * * Return codes: * 0 - Success * -EINVAL - Error **/ int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) { int rc = 0; struct fcp_cmd_rsp_buf *list_entry = NULL; struct fcp_cmd_rsp_buf *tmp = NULL; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; unsigned long iflags; spin_lock_irqsave(&hdwq->hdwq_lock, iflags); if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { list_for_each_entry_safe(list_entry, tmp, &lpfc_buf->dma_cmd_rsp_list, list_node) { list_move_tail(&list_entry->list_node, buf_list); } } else { rc = -EINVAL; } spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); return rc; } /** * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool * @phba: phba object * @hdwq: hdwq to cleanup cmd rsp buff resources on * * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool. * * Return codes: * None **/ void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *hdwq) { struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; struct fcp_cmd_rsp_buf *list_entry = NULL; struct fcp_cmd_rsp_buf *tmp = NULL; unsigned long iflags; spin_lock_irqsave(&hdwq->hdwq_lock, iflags); /* Free cmd_rsp buf pool */ list_for_each_entry_safe(list_entry, tmp, buf_list, list_node) { list_del(&list_entry->list_node); dma_pool_free(phba->lpfc_cmd_rsp_buf_pool, list_entry->fcp_cmnd, list_entry->fcp_cmd_rsp_dma_handle); kfree(list_entry); } spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); } /** * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted * @phba: phba object * @job: job entry of the command to be posted. * * Fill the common fields of the wqe for each of the command. * * Return codes: * None **/ void lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job) { u8 cmnd; u32 *pcmd; u32 if_type = 0; u32 fip, abort_tag; struct lpfc_nodelist *ndlp = NULL; union lpfc_wqe128 *wqe = &job->wqe; u8 command_type = ELS_COMMAND_NON_FIP; fip = phba->hba_flag & HBA_FIP_SUPPORT; /* The fcp commands will set command type */ if (job->cmd_flag & LPFC_IO_FCP) command_type = FCP_COMMAND; else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK)) command_type = ELS_COMMAND_FIP; else command_type = ELS_COMMAND_NON_FIP; abort_tag = job->iotag; cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com); switch (cmnd) { case CMD_ELS_REQUEST64_WQE: ndlp = job->ndlp; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { pcmd = (u32 *)job->cmd_dmabuf->virt; if (pcmd && (*pcmd == ELS_CMD_FLOGI || *pcmd == ELS_CMD_SCR || *pcmd == ELS_CMD_RDF || *pcmd == ELS_CMD_EDC || *pcmd == ELS_CMD_RSCN_XMT || *pcmd == ELS_CMD_FDISC || *pcmd == ELS_CMD_LOGO || *pcmd == ELS_CMD_QFPA || *pcmd == ELS_CMD_UVEM || *pcmd == ELS_CMD_PLOGI)) { bf_set(els_req64_sp, &wqe->els_req, 1); bf_set(els_req64_sid, &wqe->els_req, job->vport->fc_myDID); if ((*pcmd == ELS_CMD_FLOGI) && !(phba->fc_topology == LPFC_TOPOLOGY_LOOP)) bf_set(els_req64_sid, &wqe->els_req, 0); bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, phba->vpi_ids[job->vport->vpi]); } else if (pcmd) { bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); } } bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); break; case CMD_XMIT_ELS_RSP64_WQE: ndlp = job->ndlp; /* word4 */ wqe->xmit_els_rsp.word4 = 0; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { if (job->vport->fc_flag & FC_PT2PT) { bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, job->vport->fc_myDID); if (job->vport->fc_myDID == Fabric_DID) { bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 0); } } } bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_LENLOC_WORD3); bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, job->vport->fc_myDID); bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); } if (phba->sli_rev == LPFC_SLI_REV4) { bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com)) bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, phba->vpi_ids[job->vport->vpi]); } command_type = OTHER_COMMAND; break; case CMD_GEN_REQUEST64_WQE: /* Word 10 */ bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); command_type = OTHER_COMMAND; break; case CMD_XMIT_SEQUENCE64_WQE: if (phba->link_flag & LS_LOOPBACK_MODE) bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); wqe->xmit_sequence.rsvd3 = 0; bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, LPFC_WQE_LENLOC_WORD12); bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); command_type = OTHER_COMMAND; break; case CMD_XMIT_BLS_RSP64_WQE: bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, phba->vpi_ids[phba->pport->vpi]); bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, LPFC_WQE_LENLOC_NONE); /* Overwrite the pre-set comnd type with OTHER_COMMAND */ command_type = OTHER_COMMAND; break; case CMD_FCP_ICMND64_WQE: /* task mgmt commands */ case CMD_ABORT_XRI_WQE: /* abort iotag */ case CMD_SEND_FRAME: /* mds loopback */ /* cases already formatted for sli4 wqe - no chgs necessary */ return; default: dump_stack(); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6207 Invalid command 0x%x\n", cmnd); break; } wqe->generic.wqe_com.abort_tag = abort_tag; bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag); bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); }
linux-master
drivers/scsi/lpfc/lpfc_sli.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * ********************************************************************/ #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <asm/unaligned.h> #include <linux/crc-t10dif.h> #include <net/checksum.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include "lpfc_version.h" #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc.h" #include "lpfc_scsi.h" #include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_debugfs.h" static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, struct lpfc_async_xchg_ctx *, dma_addr_t rspbuf, uint16_t rspsize); static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *, struct lpfc_async_xchg_ctx *); static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *, struct lpfc_async_xchg_ctx *, uint32_t, uint16_t); static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, struct lpfc_async_xchg_ctx *, uint32_t, uint16_t); static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_async_xchg_ctx *); static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *); static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf); static union lpfc_wqe128 lpfc_tsend_cmd_template; static union lpfc_wqe128 lpfc_treceive_cmd_template; static union lpfc_wqe128 lpfc_trsp_cmd_template; /* Setup WQE templates for NVME IOs */ void lpfc_nvmet_cmd_template(void) { union lpfc_wqe128 *wqe; /* TSEND template */ wqe = &lpfc_tsend_cmd_template; memset(wqe, 0, sizeof(union lpfc_wqe128)); /* Word 0, 1, 2 - BDE is variable */ /* Word 3 - payload_offset_len is zero */ /* Word 4 - relative_offset is variable */ /* Word 5 - is zero */ /* Word 6 - ctxt_tag, xri_tag is variable */ /* Word 7 - wqe_ar is variable */ bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF); bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3); bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI); bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); /* Word 8 - abort_tag is variable */ /* Word 9 - reqtag, rcvoxid is variable */ /* Word 10 - wqes, xc is variable */ bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG); bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE); bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12); /* Word 11 - sup, irsp, irsplen is variable */ bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND); bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0); /* Word 12 - fcp_data_len is variable */ /* Word 13, 14, 15 - PBDE is zero */ /* TRECEIVE template */ wqe = &lpfc_treceive_cmd_template; memset(wqe, 0, sizeof(union lpfc_wqe128)); /* Word 0, 1, 2 - BDE is variable */ /* Word 3 */ wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN; /* Word 4 - relative_offset is variable */ /* Word 5 - is zero */ /* Word 6 - ctxt_tag, xri_tag is variable */ /* Word 7 */ bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE); bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF); bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3); bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI); bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); /* Word 8 - abort_tag is variable */ /* Word 9 - reqtag, rcvoxid is variable */ /* Word 10 - xc is variable */ bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG); bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12); bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); /* Word 11 - pbde is variable */ bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE); bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0); bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1); /* Word 12 - fcp_data_len is variable */ /* Word 13, 14, 15 - PBDE is variable */ /* TRSP template */ wqe = &lpfc_trsp_cmd_template; memset(wqe, 0, sizeof(union lpfc_wqe128)); /* Word 0, 1, 2 - BDE is variable */ /* Word 3 - response_len is variable */ /* Word 4, 5 - is zero */ /* Word 6 - ctxt_tag, xri_tag is variable */ /* Word 7 */ bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED); bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3); bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI); bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */ /* Word 8 - abort_tag is variable */ /* Word 9 - reqtag is variable */ /* Word 10 wqes, xc is variable */ bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1); bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG); bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0); bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE); bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3); /* Word 11 irsp, irsplen is variable */ bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP); bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0); bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0); /* Word 12, 13, 14, 15 - is zero */ } #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) static struct lpfc_async_xchg_ctx * lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri) { struct lpfc_async_xchg_ctx *ctxp; unsigned long iflag; bool found = false; spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) { if (ctxp->ctxbuf->sglq->sli4_xritag != xri) continue; found = true; break; } spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); if (found) return ctxp; return NULL; } static struct lpfc_async_xchg_ctx * lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid) { struct lpfc_async_xchg_ctx *ctxp; unsigned long iflag; bool found = false; spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) { if (ctxp->oxid != oxid || ctxp->sid != sid) continue; found = true; break; } spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); if (found) return ctxp; return NULL; } #endif static void lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_async_xchg_ctx *ctxp) { lockdep_assert_held(&ctxp->ctxlock); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6313 NVMET Defer ctx release oxid x%x flg x%x\n", ctxp->oxid, ctxp->flag); if (ctxp->flag & LPFC_NVME_CTX_RLS) return; ctxp->flag |= LPFC_NVME_CTX_RLS; spin_lock(&phba->sli4_hba.t_active_list_lock); list_del(&ctxp->list); spin_unlock(&phba->sli4_hba.t_active_list_lock); spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); } /** * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the * transmission of an NVME LS response. * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. The function frees memory resources used for the command * used to send the NVME LS RSP. **/ void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe) { struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg; struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp; uint32_t status, result; status = bf_get(lpfc_wcqe_c_status, wcqe); result = wcqe->parameter; if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6410 NVMEx LS cmpl state mismatch IO x%x: " "%d %d\n", axchg->oxid, axchg->state, axchg->entry_cnt); } lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n", axchg->oxid, status, result); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n", status, result, axchg->oxid); lpfc_nlp_put(cmdwqe->ndlp); cmdwqe->context_un.axchg = NULL; cmdwqe->bpl_dmabuf = NULL; lpfc_sli_release_iocbq(phba, cmdwqe); ls_rsp->done(ls_rsp); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n", status, axchg->oxid); kfree(axchg); } /** * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME LS commands * The function updates any states and statistics, then calls the * generic completion handler to free resources. **/ static void lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe) { struct lpfc_nvmet_tgtport *tgtp; uint32_t status, result; struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; if (!phba->targetport) goto finish; status = bf_get(lpfc_wcqe_c_status, wcqe); result = wcqe->parameter; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; if (tgtp) { if (status) { atomic_inc(&tgtp->xmt_ls_rsp_error); if (result == IOERR_ABORT_REQUESTED) atomic_inc(&tgtp->xmt_ls_rsp_aborted); if (bf_get(lpfc_wcqe_c_xb, wcqe)) atomic_inc(&tgtp->xmt_ls_rsp_xb_set); } else { atomic_inc(&tgtp->xmt_ls_rsp_cmpl); } } finish: __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe); } /** * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context * @phba: HBA buffer is associated with * @ctx_buf: ctx buffer context * * Description: Frees the given DMA buffer in the appropriate way given by * reposting it to its associated RQ so it can be reused. * * Notes: Takes phba->hbalock. Can be called with or without other locks held. * * Returns: None **/ void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context; struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; struct rqb_dmabuf *nvmebuf; struct lpfc_nvmet_ctx_info *infop; uint32_t size, oxid, sid; int cpu; unsigned long iflag; if (ctxp->state == LPFC_NVME_STE_FREE) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6411 NVMET free, already free IO x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); } if (ctxp->rqb_buffer) { spin_lock_irqsave(&ctxp->ctxlock, iflag); nvmebuf = ctxp->rqb_buffer; /* check if freed in another path whilst acquiring lock */ if (nvmebuf) { ctxp->rqb_buffer = NULL; if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) { ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); } else { spin_unlock_irqrestore(&ctxp->ctxlock, iflag); /* repost */ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); } } else { spin_unlock_irqrestore(&ctxp->ctxlock, iflag); } } ctxp->state = LPFC_NVME_STE_FREE; spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); if (phba->sli4_hba.nvmet_io_wait_cnt) { list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list, nvmebuf, struct rqb_dmabuf, hbuf.list); phba->sli4_hba.nvmet_io_wait_cnt--; spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); oxid = be16_to_cpu(fc_hdr->fh_ox_id); tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; size = nvmebuf->bytes_recv; sid = sli4_sid_from_fc_hdr(fc_hdr); ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context; ctxp->wqeq = NULL; ctxp->offset = 0; ctxp->phba = phba; ctxp->size = size; ctxp->oxid = oxid; ctxp->sid = sid; ctxp->state = LPFC_NVME_STE_RCV; ctxp->entry_cnt = 1; ctxp->flag = 0; ctxp->ctxbuf = ctx_buf; ctxp->rqb_buffer = (void *)nvmebuf; spin_lock_init(&ctxp->ctxlock); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS /* NOTE: isr time stamp is stale when context is re-assigned*/ if (ctxp->ts_isr_cmd) { ctxp->ts_cmd_nvme = 0; ctxp->ts_nvme_data = 0; ctxp->ts_data_wqput = 0; ctxp->ts_isr_data = 0; ctxp->ts_data_nvme = 0; ctxp->ts_nvme_status = 0; ctxp->ts_status_wqput = 0; ctxp->ts_isr_status = 0; ctxp->ts_status_nvme = 0; } #endif atomic_inc(&tgtp->rcv_fcp_cmd_in); /* Indicate that a replacement buffer has been posted */ spin_lock_irqsave(&ctxp->ctxlock, iflag); ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); if (!queue_work(phba->wq, &ctx_buf->defer_work)) { atomic_inc(&tgtp->rcv_fcp_cmd_drop); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6181 Unable to queue deferred work " "for oxid x%x. " "FCP Drop IO [x%x x%x x%x]\n", ctxp->oxid, atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_out), atomic_read(&tgtp->xmt_fcp_release)); spin_lock_irqsave(&ctxp->ctxlock, iflag); lpfc_nvmet_defer_release(phba, ctxp); spin_unlock_irqrestore(&ctxp->ctxlock, iflag); lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); } return; } spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); /* * Use the CPU context list, from the MRQ the IO was received on * (ctxp->idx), to save context structure. */ spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); list_del_init(&ctxp->list); spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); cpu = raw_smp_processor_id(); infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx); spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag); list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); infop->nvmet_ctx_list_cnt++; spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag); #endif } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS static void lpfc_nvmet_ktime(struct lpfc_hba *phba, struct lpfc_async_xchg_ctx *ctxp) { uint64_t seg1, seg2, seg3, seg4, seg5; uint64_t seg6, seg7, seg8, seg9, seg10; uint64_t segsum; if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme || !ctxp->ts_nvme_data || !ctxp->ts_data_wqput || !ctxp->ts_isr_data || !ctxp->ts_data_nvme || !ctxp->ts_nvme_status || !ctxp->ts_status_wqput || !ctxp->ts_isr_status || !ctxp->ts_status_nvme) return; if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd) return; if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme) return; if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data) return; if (ctxp->ts_nvme_data > ctxp->ts_data_wqput) return; if (ctxp->ts_data_wqput > ctxp->ts_isr_data) return; if (ctxp->ts_isr_data > ctxp->ts_data_nvme) return; if (ctxp->ts_data_nvme > ctxp->ts_nvme_status) return; if (ctxp->ts_nvme_status > ctxp->ts_status_wqput) return; if (ctxp->ts_status_wqput > ctxp->ts_isr_status) return; if (ctxp->ts_isr_status > ctxp->ts_status_nvme) return; /* * Segment 1 - Time from FCP command received by MSI-X ISR * to FCP command is passed to NVME Layer. * Segment 2 - Time from FCP command payload handed * off to NVME Layer to Driver receives a Command op * from NVME Layer. * Segment 3 - Time from Driver receives a Command op * from NVME Layer to Command is put on WQ. * Segment 4 - Time from Driver WQ put is done * to MSI-X ISR for Command cmpl. * Segment 5 - Time from MSI-X ISR for Command cmpl to * Command cmpl is passed to NVME Layer. * Segment 6 - Time from Command cmpl is passed to NVME * Layer to Driver receives a RSP op from NVME Layer. * Segment 7 - Time from Driver receives a RSP op from * NVME Layer to WQ put is done on TRSP FCP Status. * Segment 8 - Time from Driver WQ put is done on TRSP * FCP Status to MSI-X ISR for TRSP cmpl. * Segment 9 - Time from MSI-X ISR for TRSP cmpl to * TRSP cmpl is passed to NVME Layer. * Segment 10 - Time from FCP command received by * MSI-X ISR to command is completed on wire. * (Segments 1 thru 8) for READDATA / WRITEDATA * (Segments 1 thru 4) for READDATA_RSP */ seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd; segsum = seg1; seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd; if (segsum > seg2) return; seg2 -= segsum; segsum += seg2; seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd; if (segsum > seg3) return; seg3 -= segsum; segsum += seg3; seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd; if (segsum > seg4) return; seg4 -= segsum; segsum += seg4; seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd; if (segsum > seg5) return; seg5 -= segsum; segsum += seg5; /* For auto rsp commands seg6 thru seg10 will be 0 */ if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) { seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd; if (segsum > seg6) return; seg6 -= segsum; segsum += seg6; seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd; if (segsum > seg7) return; seg7 -= segsum; segsum += seg7; seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd; if (segsum > seg8) return; seg8 -= segsum; segsum += seg8; seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd; if (segsum > seg9) return; seg9 -= segsum; segsum += seg9; if (ctxp->ts_isr_status < ctxp->ts_isr_cmd) return; seg10 = (ctxp->ts_isr_status - ctxp->ts_isr_cmd); } else { if (ctxp->ts_isr_data < ctxp->ts_isr_cmd) return; seg6 = 0; seg7 = 0; seg8 = 0; seg9 = 0; seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd); } phba->ktime_seg1_total += seg1; if (seg1 < phba->ktime_seg1_min) phba->ktime_seg1_min = seg1; else if (seg1 > phba->ktime_seg1_max) phba->ktime_seg1_max = seg1; phba->ktime_seg2_total += seg2; if (seg2 < phba->ktime_seg2_min) phba->ktime_seg2_min = seg2; else if (seg2 > phba->ktime_seg2_max) phba->ktime_seg2_max = seg2; phba->ktime_seg3_total += seg3; if (seg3 < phba->ktime_seg3_min) phba->ktime_seg3_min = seg3; else if (seg3 > phba->ktime_seg3_max) phba->ktime_seg3_max = seg3; phba->ktime_seg4_total += seg4; if (seg4 < phba->ktime_seg4_min) phba->ktime_seg4_min = seg4; else if (seg4 > phba->ktime_seg4_max) phba->ktime_seg4_max = seg4; phba->ktime_seg5_total += seg5; if (seg5 < phba->ktime_seg5_min) phba->ktime_seg5_min = seg5; else if (seg5 > phba->ktime_seg5_max) phba->ktime_seg5_max = seg5; phba->ktime_data_samples++; if (!seg6) goto out; phba->ktime_seg6_total += seg6; if (seg6 < phba->ktime_seg6_min) phba->ktime_seg6_min = seg6; else if (seg6 > phba->ktime_seg6_max) phba->ktime_seg6_max = seg6; phba->ktime_seg7_total += seg7; if (seg7 < phba->ktime_seg7_min) phba->ktime_seg7_min = seg7; else if (seg7 > phba->ktime_seg7_max) phba->ktime_seg7_max = seg7; phba->ktime_seg8_total += seg8; if (seg8 < phba->ktime_seg8_min) phba->ktime_seg8_min = seg8; else if (seg8 > phba->ktime_seg8_max) phba->ktime_seg8_max = seg8; phba->ktime_seg9_total += seg9; if (seg9 < phba->ktime_seg9_min) phba->ktime_seg9_min = seg9; else if (seg9 > phba->ktime_seg9_max) phba->ktime_seg9_max = seg9; out: phba->ktime_seg10_total += seg10; if (seg10 < phba->ktime_seg10_min) phba->ktime_seg10_min = seg10; else if (seg10 > phba->ktime_seg10_max) phba->ktime_seg10_max = seg10; phba->ktime_status_samples++; } #endif /** * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME FCP commands * The function frees memory resources used for the NVME commands. **/ static void lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe) { struct lpfc_nvmet_tgtport *tgtp; struct nvmefc_tgt_fcp_req *rsp; struct lpfc_async_xchg_ctx *ctxp; uint32_t status, result, op, logerr; struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS int id; #endif ctxp = cmdwqe->context_un.axchg; ctxp->flag &= ~LPFC_NVME_IO_INP; rsp = &ctxp->hdlrctx.fcp_req; op = rsp->op; status = bf_get(lpfc_wcqe_c_status, wcqe); result = wcqe->parameter; if (phba->targetport) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; else tgtp = NULL; lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n", ctxp->oxid, op, status); if (status) { rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; rsp->transferred_length = 0; if (tgtp) { atomic_inc(&tgtp->xmt_fcp_rsp_error); if (result == IOERR_ABORT_REQUESTED) atomic_inc(&tgtp->xmt_fcp_rsp_aborted); } logerr = LOG_NVME_IOERR; /* pick up SLI4 exhange busy condition */ if (bf_get(lpfc_wcqe_c_xb, wcqe)) { ctxp->flag |= LPFC_NVME_XBUSY; logerr |= LOG_NVME_ABTS; if (tgtp) atomic_inc(&tgtp->xmt_fcp_rsp_xb_set); } else { ctxp->flag &= ~LPFC_NVME_XBUSY; } lpfc_printf_log(phba, KERN_INFO, logerr, "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x " "XBUSY:x%x\n", ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag, status, result, ctxp->flag); } else { rsp->fcp_error = NVME_SC_SUCCESS; if (op == NVMET_FCOP_RSP) rsp->transferred_length = rsp->rsplen; else rsp->transferred_length = rsp->transfer_length; if (tgtp) atomic_inc(&tgtp->xmt_fcp_rsp_cmpl); } if ((op == NVMET_FCOP_READDATA_RSP) || (op == NVMET_FCOP_RSP)) { /* Sanity check */ ctxp->state = LPFC_NVME_STE_DONE; ctxp->entry_cnt++; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (ctxp->ts_cmd_nvme) { if (rsp->op == NVMET_FCOP_READDATA_RSP) { ctxp->ts_isr_data = cmdwqe->isr_timestamp; ctxp->ts_data_nvme = ktime_get_ns(); ctxp->ts_nvme_status = ctxp->ts_data_nvme; ctxp->ts_status_wqput = ctxp->ts_data_nvme; ctxp->ts_isr_status = ctxp->ts_data_nvme; ctxp->ts_status_nvme = ctxp->ts_data_nvme; } else { ctxp->ts_isr_status = cmdwqe->isr_timestamp; ctxp->ts_status_nvme = ktime_get_ns(); } } #endif rsp->done(rsp); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (ctxp->ts_cmd_nvme) lpfc_nvmet_ktime(phba, ctxp); #endif /* lpfc_nvmet_xmt_fcp_release() will recycle the context */ } else { ctxp->entry_cnt++; memset_startat(cmdwqe, 0, cmd_flag); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (ctxp->ts_cmd_nvme) { ctxp->ts_isr_data = cmdwqe->isr_timestamp; ctxp->ts_data_nvme = ktime_get_ns(); } #endif rsp->done(rsp); } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { id = raw_smp_processor_id(); this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); if (ctxp->cpu != id) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6704 CPU Check cmdcmpl: " "cpu %d expect %d\n", id, ctxp->cpu); } #endif } /** * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit * an NVME LS rsp for a prior NVME LS request that was received. * @axchg: pointer to exchange context for the NVME LS request the response * is for. * @ls_rsp: pointer to the transport LS RSP that is to be sent * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done * * This routine is used to format and send a WQE to transmit a NVME LS * Response. The response is for a prior NVME LS request that was * received and posted to the transport. * * Returns: * 0 : if response successfully transmit * non-zero : if response failed to transmit, of the form -Exxx. **/ int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, struct nvmefc_ls_rsp *ls_rsp, void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe)) { struct lpfc_hba *phba = axchg->phba; struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer; struct lpfc_iocbq *nvmewqeq; struct lpfc_dmabuf dmabuf; struct ulp_bde64 bpl; int rc; if (phba->pport->load_flag & FC_UNLOADING) return -ENODEV; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid); if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6412 NVMEx LS rsp state mismatch " "oxid x%x: %d %d\n", axchg->oxid, axchg->state, axchg->entry_cnt); return -EALREADY; } axchg->state = LPFC_NVME_STE_LS_RSP; axchg->entry_cnt++; nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma, ls_rsp->rsplen); if (nvmewqeq == NULL) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6150 NVMEx LS Drop Rsp x%x: Prep\n", axchg->oxid); rc = -ENOMEM; goto out_free_buf; } /* Save numBdes for bpl2sgl */ nvmewqeq->num_bdes = 1; nvmewqeq->hba_wqidx = 0; nvmewqeq->bpl_dmabuf = &dmabuf; dmabuf.virt = &bpl; bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; bpl.tus.f.bdeSize = ls_rsp->rsplen; bpl.tus.f.bdeFlags = 0; bpl.tus.w = le32_to_cpu(bpl.tus.w); /* * Note: although we're using stack space for the dmabuf, the * call to lpfc_sli4_issue_wqe is synchronous, so it will not * be referenced after it returns back to this routine. */ nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp; nvmewqeq->context_un.axchg = axchg; lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n", axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen); rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq); /* clear to be sure there's no reference */ nvmewqeq->bpl_dmabuf = NULL; if (rc == WQE_SUCCESS) { /* * Okay to repost buffer here, but wait till cmpl * before freeing ctxp and iocbq. */ lpfc_in_buf_free(phba, &nvmebuf->dbuf); return 0; } lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6151 NVMEx LS RSP x%x: failed to transmit %d\n", axchg->oxid, rc); rc = -ENXIO; lpfc_nlp_put(nvmewqeq->ndlp); out_free_buf: /* Give back resources */ lpfc_in_buf_free(phba, &nvmebuf->dbuf); /* * As transport doesn't track completions of responses, if the rsp * fails to send, the transport will effectively ignore the rsp * and consider the LS done. However, the driver has an active * exchange open for the LS - so be sure to abort the exchange * if the response isn't sent. */ lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid); return rc; } /** * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response * @tgtport: pointer to target port that NVME LS is to be transmit from. * @ls_rsp: pointer to the transport LS RSP that is to be sent * * Driver registers this routine to transmit responses for received NVME * LS requests. * * This routine is used to format and send a WQE to transmit a NVME LS * Response. The ls_rsp is used to reverse-map the LS to the original * NVME LS request sequence, which provides addressing information for * the remote port the LS to be sent to, as well as the exchange id * that is the LS is bound to. * * Returns: * 0 : if response successfully transmit * non-zero : if response failed to transmit, of the form -Exxx. **/ static int lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, struct nvmefc_ls_rsp *ls_rsp) { struct lpfc_async_xchg_ctx *axchg = container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp); struct lpfc_nvmet_tgtport *nvmep = tgtport->private; int rc; if (axchg->phba->pport->load_flag & FC_UNLOADING) return -ENODEV; rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp); if (rc) { atomic_inc(&nvmep->xmt_ls_drop); /* * unless the failure is due to having already sent * the response, an abort will be generated for the * exchange if the rsp can't be sent. */ if (rc != -EALREADY) atomic_inc(&nvmep->xmt_ls_abort); return rc; } atomic_inc(&nvmep->xmt_ls_rsp); return 0; } static int lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *rsp) { struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; struct lpfc_async_xchg_ctx *ctxp = container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); struct lpfc_hba *phba = ctxp->phba; struct lpfc_queue *wq; struct lpfc_iocbq *nvmewqeq; struct lpfc_sli_ring *pring; unsigned long iflags; int rc; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS int id; #endif if (phba->pport->load_flag & FC_UNLOADING) { rc = -ENODEV; goto aerr; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (ctxp->ts_cmd_nvme) { if (rsp->op == NVMET_FCOP_RSP) ctxp->ts_nvme_status = ktime_get_ns(); else ctxp->ts_nvme_data = ktime_get_ns(); } /* Setup the hdw queue if not already set */ if (!ctxp->hdwq) ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid]; if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { id = raw_smp_processor_id(); this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); if (rsp->hwqid != id) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6705 CPU Check OP: " "cpu %d expect %d\n", id, rsp->hwqid); ctxp->cpu = id; /* Setup cpu for cmpl check */ } #endif /* Sanity check */ if ((ctxp->flag & LPFC_NVME_ABTS_RCV) || (ctxp->state == LPFC_NVME_STE_ABORT)) { atomic_inc(&lpfc_nvmep->xmt_fcp_drop); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6102 IO oxid x%x aborted\n", ctxp->oxid); rc = -ENXIO; goto aerr; } nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); if (nvmewqeq == NULL) { atomic_inc(&lpfc_nvmep->xmt_fcp_drop); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6152 FCP Drop IO x%x: Prep\n", ctxp->oxid); rc = -ENXIO; goto aerr; } nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; nvmewqeq->context_un.axchg = ctxp; nvmewqeq->cmd_flag |= LPFC_IO_NVMET; ctxp->wqeq->hba_wqidx = rsp->hwqid; lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", ctxp->oxid, rsp->op, rsp->rsplen); ctxp->flag |= LPFC_NVME_IO_INP; rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); if (rc == WQE_SUCCESS) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (!ctxp->ts_cmd_nvme) return 0; if (rsp->op == NVMET_FCOP_RSP) ctxp->ts_status_wqput = ktime_get_ns(); else ctxp->ts_data_wqput = ktime_get_ns(); #endif return 0; } if (rc == -EBUSY) { /* * WQ was full, so queue nvmewqeq to be sent after * WQE release CQE */ ctxp->flag |= LPFC_NVME_DEFER_WQFULL; wq = ctxp->hdwq->io_wq; pring = wq->pring; spin_lock_irqsave(&pring->ring_lock, iflags); list_add_tail(&nvmewqeq->list, &wq->wqfull_list); wq->q_flag |= HBA_NVMET_WQFULL; spin_unlock_irqrestore(&pring->ring_lock, iflags); atomic_inc(&lpfc_nvmep->defer_wqfull); return 0; } /* Give back resources */ atomic_inc(&lpfc_nvmep->xmt_fcp_drop); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6153 FCP Drop IO x%x: Issue: %d\n", ctxp->oxid, rc); ctxp->wqeq->hba_wqidx = 0; nvmewqeq->context_un.axchg = NULL; nvmewqeq->bpl_dmabuf = NULL; rc = -EBUSY; aerr: return rc; } static void lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) { struct lpfc_nvmet_tgtport *tport = targetport->private; /* release any threads waiting for the unreg to complete */ if (tport->phba->targetport) complete(tport->tport_unreg_cmp); } static void lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *req) { struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; struct lpfc_async_xchg_ctx *ctxp = container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); struct lpfc_hba *phba = ctxp->phba; struct lpfc_queue *wq; unsigned long flags; if (phba->pport->load_flag & FC_UNLOADING) return; if (!ctxp->hdwq) ctxp->hdwq = &phba->sli4_hba.hdwq[0]; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n", ctxp->oxid, ctxp->flag, ctxp->state); lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n", ctxp->oxid, ctxp->flag, ctxp->state); atomic_inc(&lpfc_nvmep->xmt_fcp_abort); spin_lock_irqsave(&ctxp->ctxlock, flags); /* Since iaab/iaar are NOT set, we need to check * if the firmware is in process of aborting IO */ if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) { spin_unlock_irqrestore(&ctxp->ctxlock, flags); return; } ctxp->flag |= LPFC_NVME_ABORT_OP; if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) { spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); wq = ctxp->hdwq->io_wq; lpfc_nvmet_wqfull_flush(phba, wq, ctxp); return; } spin_unlock_irqrestore(&ctxp->ctxlock, flags); /* A state of LPFC_NVME_STE_RCV means we have just received * the NVME command and have not started processing it. * (by issuing any IO WQEs on this exchange yet) */ if (ctxp->state == LPFC_NVME_STE_RCV) lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); else lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); } static void lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *rsp) { struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; struct lpfc_async_xchg_ctx *ctxp = container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); struct lpfc_hba *phba = ctxp->phba; unsigned long flags; bool aborting = false; spin_lock_irqsave(&ctxp->ctxlock, flags); if (ctxp->flag & LPFC_NVME_XBUSY) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6027 NVMET release with XBUSY flag x%x" " oxid x%x\n", ctxp->flag, ctxp->oxid); else if (ctxp->state != LPFC_NVME_STE_DONE && ctxp->state != LPFC_NVME_STE_ABORT) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6413 NVMET release bad state %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); if ((ctxp->flag & LPFC_NVME_ABORT_OP) || (ctxp->flag & LPFC_NVME_XBUSY)) { aborting = true; /* let the abort path do the real release */ lpfc_nvmet_defer_release(phba, ctxp); } spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid, ctxp->state, aborting); atomic_inc(&lpfc_nvmep->xmt_fcp_release); ctxp->flag &= ~LPFC_NVME_TNOTIFY; if (aborting) return; lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); } static void lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *rsp) { struct lpfc_nvmet_tgtport *tgtp; struct lpfc_async_xchg_ctx *ctxp = container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; struct lpfc_hba *phba = ctxp->phba; unsigned long iflag; lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", ctxp->oxid, ctxp->size, raw_smp_processor_id()); if (!nvmebuf) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6425 Defer rcv: no buffer oxid x%x: " "flg %x ste %x\n", ctxp->oxid, ctxp->flag, ctxp->state); return; } tgtp = phba->targetport->private; if (tgtp) atomic_inc(&tgtp->rcv_fcp_cmd_defer); /* Free the nvmebuf since a new buffer already replaced it */ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); spin_lock_irqsave(&ctxp->ctxlock, iflag); ctxp->rqb_buffer = NULL; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); } /** * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request * @phba: Pointer to HBA context object * @cmdwqe: Pointer to driver command WQE object. * @rspwqe: Pointer to driver response WQE object. * * This function is the completion handler for NVME LS requests. * The function updates any states and statistics, then calls the * generic completion handler to finish completion of the request. **/ static void lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe) { struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe); } /** * lpfc_nvmet_ls_req - Issue an Link Service request * @targetport: pointer to target instance registered with nvmet transport. * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv. * Driver sets this value to the ndlp pointer. * @pnvme_lsreq: the transport nvme_ls_req structure for the LS * * Driver registers this routine to handle any link service request * from the nvme_fc transport to a remote nvme-aware port. * * Return value : * 0 - Success * non-zero: various error codes, in form of -Exxx **/ static int lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, struct nvmefc_ls_req *pnvme_lsreq) { struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private; struct lpfc_hba *phba; struct lpfc_nodelist *ndlp; int ret; u32 hstate; if (!lpfc_nvmet) return -EINVAL; phba = lpfc_nvmet->phba; if (phba->pport->load_flag & FC_UNLOADING) return -EINVAL; hstate = atomic_read(&lpfc_nvmet->state); if (hstate == LPFC_NVMET_INV_HOST_ACTIVE) return -EACCES; ndlp = (struct lpfc_nodelist *)hosthandle; ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq, lpfc_nvmet_ls_req_cmp); return ret; } /** * lpfc_nvmet_ls_abort - Abort a prior NVME LS request * @targetport: Transport targetport, that LS was issued from. * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv. * Driver sets this value to the ndlp pointer. * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted * * Driver registers this routine to abort an NVME LS request that is * in progress (from the transports perspective). **/ static void lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport, void *hosthandle, struct nvmefc_ls_req *pnvme_lsreq) { struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private; struct lpfc_hba *phba; struct lpfc_nodelist *ndlp; int ret; phba = lpfc_nvmet->phba; if (phba->pport->load_flag & FC_UNLOADING) return; ndlp = (struct lpfc_nodelist *)hosthandle; ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq); if (!ret) atomic_inc(&lpfc_nvmet->xmt_ls_abort); } static void lpfc_nvmet_host_release(void *hosthandle) { struct lpfc_nodelist *ndlp = hosthandle; struct lpfc_hba *phba = ndlp->phba; struct lpfc_nvmet_tgtport *tgtp; if (!phba->targetport || !phba->targetport->private) return; lpfc_printf_log(phba, KERN_ERR, LOG_NVME, "6202 NVMET XPT releasing hosthandle x%px " "DID x%x xflags x%x refcnt %d\n", hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; spin_lock_irq(&ndlp->lock); ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH; spin_unlock_irq(&ndlp->lock); lpfc_nlp_put(ndlp); atomic_set(&tgtp->state, 0); } static void lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport) { struct lpfc_nvmet_tgtport *tgtp; struct lpfc_hba *phba; uint32_t rc; tgtp = tgtport->private; phba = tgtp->phba; rc = lpfc_issue_els_rscn(phba->pport, 0); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6420 NVMET subsystem change: Notification %s\n", (rc) ? "Failed" : "Sent"); } static struct nvmet_fc_target_template lpfc_tgttemplate = { .targetport_delete = lpfc_nvmet_targetport_delete, .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, .fcp_op = lpfc_nvmet_xmt_fcp_op, .fcp_abort = lpfc_nvmet_xmt_fcp_abort, .fcp_req_release = lpfc_nvmet_xmt_fcp_release, .defer_rcv = lpfc_nvmet_defer_rcv, .discovery_event = lpfc_nvmet_discovery_event, .ls_req = lpfc_nvmet_ls_req, .ls_abort = lpfc_nvmet_ls_abort, .host_release = lpfc_nvmet_host_release, .max_hw_queues = 1, .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, .dma_boundary = 0xFFFFFFFF, /* optional features */ .target_features = 0, /* sizes of additional private data for data structures */ .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), .lsrqst_priv_sz = 0, }; static void __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba, struct lpfc_nvmet_ctx_info *infop) { struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf; unsigned long flags; spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags); list_for_each_entry_safe(ctx_buf, next_ctx_buf, &infop->nvmet_ctx_list, list) { spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctx_buf->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_lock(&phba->hbalock); __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); spin_unlock(&phba->hbalock); ctx_buf->sglq->state = SGL_FREED; ctx_buf->sglq->ndlp = NULL; spin_lock(&phba->sli4_hba.sgl_list_lock); list_add_tail(&ctx_buf->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list); spin_unlock(&phba->sli4_hba.sgl_list_lock); lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); kfree(ctx_buf->context); } spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags); } static void lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) { struct lpfc_nvmet_ctx_info *infop; int i, j; /* The first context list, MRQ 0 CPU 0 */ infop = phba->sli4_hba.nvmet_ctx_info; if (!infop) return; /* Cycle the entire CPU context list for every MRQ */ for (i = 0; i < phba->cfg_nvmet_mrq; i++) { for_each_present_cpu(j) { infop = lpfc_get_ctx_list(phba, j, i); __lpfc_nvmet_clean_io_for_cpu(phba, infop); } } kfree(phba->sli4_hba.nvmet_ctx_info); phba->sli4_hba.nvmet_ctx_info = NULL; } static int lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) { struct lpfc_nvmet_ctxbuf *ctx_buf; struct lpfc_iocbq *nvmewqe; union lpfc_wqe128 *wqe; struct lpfc_nvmet_ctx_info *last_infop; struct lpfc_nvmet_ctx_info *infop; int i, j, idx, cpu; lpfc_printf_log(phba, KERN_INFO, LOG_NVME, "6403 Allocate NVMET resources for %d XRIs\n", phba->sli4_hba.nvmet_xri_cnt); phba->sli4_hba.nvmet_ctx_info = kcalloc( phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq, sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); if (!phba->sli4_hba.nvmet_ctx_info) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6419 Failed allocate memory for " "nvmet context lists\n"); return -ENOMEM; } /* * Assuming X CPUs in the system, and Y MRQs, allocate some * lpfc_nvmet_ctx_info structures as follows: * * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1 * ... * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY * * Each line represents a MRQ "silo" containing an entry for * every CPU. * * MRQ X is initially assumed to be associated with CPU X, thus * contexts are initially distributed across all MRQs using * the MRQ index (N) as follows cpuN/mrqN. When contexts are * freed, the are freed to the MRQ silo based on the CPU number * of the IO completion. Thus a context that was allocated for MRQ A * whose IO completed on CPU B will be freed to cpuB/mrqA. */ for_each_possible_cpu(i) { for (j = 0; j < phba->cfg_nvmet_mrq; j++) { infop = lpfc_get_ctx_list(phba, i, j); INIT_LIST_HEAD(&infop->nvmet_ctx_list); spin_lock_init(&infop->nvmet_ctx_list_lock); infop->nvmet_ctx_list_cnt = 0; } } /* * Setup the next CPU context info ptr for each MRQ. * MRQ 0 will cycle thru CPUs 0 - X separately from * MRQ 1 cycling thru CPUs 0 - X, and so on. */ for (j = 0; j < phba->cfg_nvmet_mrq; j++) { last_infop = lpfc_get_ctx_list(phba, cpumask_first(cpu_present_mask), j); for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) { infop = lpfc_get_ctx_list(phba, i, j); infop->nvmet_ctx_next_cpu = last_infop; last_infop = infop; } } /* For all nvmet xris, allocate resources needed to process a * received command on a per xri basis. */ idx = 0; cpu = cpumask_first(cpu_present_mask); for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); if (!ctx_buf) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6404 Ran out of memory for NVMET\n"); return -ENOMEM; } ctx_buf->context = kzalloc(sizeof(*ctx_buf->context), GFP_KERNEL); if (!ctx_buf->context) { kfree(ctx_buf); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6405 Ran out of NVMET " "context memory\n"); return -ENOMEM; } ctx_buf->context->ctxbuf = ctx_buf; ctx_buf->context->state = LPFC_NVME_STE_FREE; ctx_buf->iocbq = lpfc_sli_get_iocbq(phba); if (!ctx_buf->iocbq) { kfree(ctx_buf->context); kfree(ctx_buf); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6406 Ran out of NVMET iocb/WQEs\n"); return -ENOMEM; } ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET; nvmewqe = ctx_buf->iocbq; wqe = &nvmewqe->wqe; /* Initialize WQE */ memset(wqe, 0, sizeof(union lpfc_wqe)); ctx_buf->iocbq->cmd_dmabuf = NULL; spin_lock(&phba->sli4_hba.sgl_list_lock); ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq); spin_unlock(&phba->sli4_hba.sgl_list_lock); if (!ctx_buf->sglq) { lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); kfree(ctx_buf->context); kfree(ctx_buf); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6407 Ran out of NVMET XRIs\n"); return -ENOMEM; } INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work); /* * Add ctx to MRQidx context list. Our initial assumption * is MRQidx will be associated with CPUidx. This association * can change on the fly. */ infop = lpfc_get_ctx_list(phba, cpu, idx); spin_lock(&infop->nvmet_ctx_list_lock); list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); infop->nvmet_ctx_list_cnt++; spin_unlock(&infop->nvmet_ctx_list_lock); /* Spread ctx structures evenly across all MRQs */ idx++; if (idx >= phba->cfg_nvmet_mrq) { idx = 0; cpu = cpumask_first(cpu_present_mask); continue; } cpu = lpfc_next_present_cpu(cpu); } for_each_present_cpu(i) { for (j = 0; j < phba->cfg_nvmet_mrq; j++) { infop = lpfc_get_ctx_list(phba, i, j); lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, "6408 TOTAL NVMET ctx for CPU %d " "MRQ %d: cnt %d nextcpu x%px\n", i, j, infop->nvmet_ctx_list_cnt, infop->nvmet_ctx_next_cpu); } } return 0; } int lpfc_nvmet_create_targetport(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_nvmet_tgtport *tgtp; struct nvmet_fc_port_info pinfo; int error; if (phba->targetport) return 0; error = lpfc_nvmet_setup_io_context(phba); if (error) return error; memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); pinfo.port_id = vport->fc_myDID; /* We need to tell the transport layer + 1 because it takes page * alignment into account. When space for the SGL is allocated we * allocate + 3, one for cmd, one for rsp and one for this alignment */ lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue; lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP; #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, &phba->pcidev->dev, &phba->targetport); #else error = -ENOENT; #endif if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6025 Cannot register NVME targetport x%x: " "portnm %llx nodenm %llx segs %d qs %d\n", error, pinfo.port_name, pinfo.node_name, lpfc_tgttemplate.max_sgl_segments, lpfc_tgttemplate.max_hw_queues); phba->targetport = NULL; phba->nvmet_support = 0; lpfc_nvmet_cleanup_io_context(phba); } else { tgtp = (struct lpfc_nvmet_tgtport *) phba->targetport->private; tgtp->phba = phba; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6026 Registered NVME " "targetport: x%px, private x%px " "portnm %llx nodenm %llx segs %d qs %d\n", phba->targetport, tgtp, pinfo.port_name, pinfo.node_name, lpfc_tgttemplate.max_sgl_segments, lpfc_tgttemplate.max_hw_queues); atomic_set(&tgtp->rcv_ls_req_in, 0); atomic_set(&tgtp->rcv_ls_req_out, 0); atomic_set(&tgtp->rcv_ls_req_drop, 0); atomic_set(&tgtp->xmt_ls_abort, 0); atomic_set(&tgtp->xmt_ls_abort_cmpl, 0); atomic_set(&tgtp->xmt_ls_rsp, 0); atomic_set(&tgtp->xmt_ls_drop, 0); atomic_set(&tgtp->xmt_ls_rsp_error, 0); atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0); atomic_set(&tgtp->xmt_ls_rsp_aborted, 0); atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); atomic_set(&tgtp->rcv_fcp_cmd_in, 0); atomic_set(&tgtp->rcv_fcp_cmd_out, 0); atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); atomic_set(&tgtp->xmt_fcp_drop, 0); atomic_set(&tgtp->xmt_fcp_read_rsp, 0); atomic_set(&tgtp->xmt_fcp_read, 0); atomic_set(&tgtp->xmt_fcp_write, 0); atomic_set(&tgtp->xmt_fcp_rsp, 0); atomic_set(&tgtp->xmt_fcp_release, 0); atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); atomic_set(&tgtp->xmt_fcp_rsp_error, 0); atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0); atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0); atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0); atomic_set(&tgtp->xmt_fcp_abort, 0); atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0); atomic_set(&tgtp->xmt_abort_unsol, 0); atomic_set(&tgtp->xmt_abort_sol, 0); atomic_set(&tgtp->xmt_abort_rsp, 0); atomic_set(&tgtp->xmt_abort_rsp_error, 0); atomic_set(&tgtp->defer_ctx, 0); atomic_set(&tgtp->defer_fod, 0); atomic_set(&tgtp->defer_wqfull, 0); } return error; } int lpfc_nvmet_update_targetport(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; if (!phba->targetport) return 0; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6007 Update NVMET port x%px did x%x\n", phba->targetport, vport->fc_myDID); phba->targetport->port_id = vport->fc_myDID; return 0; } /** * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort * @phba: pointer to lpfc hba data structure. * @axri: pointer to the nvmet xri abort wcqe structure. * * This routine is invoked by the worker thread to process a SLI4 fast-path * NVMET aborted xri. **/ void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); struct lpfc_async_xchg_ctx *ctxp, *next_ctxp; struct lpfc_nvmet_tgtport *tgtp; struct nvmefc_tgt_fcp_req *req = NULL; struct lpfc_nodelist *ndlp; unsigned long iflag = 0; int rrq_empty = 0; bool released = false; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6317 XB aborted xri x%x rxid x%x\n", xri, rxid); if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) return; if (phba->targetport) { tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe); } spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { if (ctxp->ctxbuf->sglq->sli4_xritag != xri) continue; spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); spin_lock_irqsave(&ctxp->ctxlock, iflag); /* Check if we already received a free context call * and we have completed processing an abort situation. */ if (ctxp->flag & LPFC_NVME_CTX_RLS && !(ctxp->flag & LPFC_NVME_ABORT_OP)) { spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctxp->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } ctxp->flag &= ~LPFC_NVME_XBUSY; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); rrq_empty = list_empty(&phba->active_rrq_list); ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); if (ndlp && (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { lpfc_set_rrq_active(phba, ndlp, ctxp->ctxbuf->sglq->sli4_lxritag, rxid, 1); lpfc_sli4_abts_err_handler(phba, ndlp, axri); } lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6318 XB aborted oxid x%x flg x%x (%x)\n", ctxp->oxid, ctxp->flag, released); if (released) lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); if (rrq_empty) lpfc_worker_wake_up(phba); return; } spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri); if (ctxp) { /* * Abort already done by FW, so BA_ACC sent. * However, the transport may be unaware. */ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6323 NVMET Rcv ABTS xri x%x ctxp state x%x " "flag x%x oxid x%x rxid x%x\n", xri, ctxp->state, ctxp->flag, ctxp->oxid, rxid); spin_lock_irqsave(&ctxp->ctxlock, iflag); ctxp->flag |= LPFC_NVME_ABTS_RCV; ctxp->state = LPFC_NVME_STE_ABORT; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", xri, raw_smp_processor_id(), 0); req = &ctxp->hdlrctx.fcp_req; if (req) nvmet_fc_rcv_fcp_abort(phba->targetport, req); } #endif } int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, struct fc_frame_header *fc_hdr) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_hba *phba = vport->phba; struct lpfc_async_xchg_ctx *ctxp, *next_ctxp; struct nvmefc_tgt_fcp_req *rsp; uint32_t sid; uint16_t oxid, xri; unsigned long iflag = 0; sid = sli4_sid_from_fc_hdr(fc_hdr); oxid = be16_to_cpu(fc_hdr->fh_ox_id); spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { if (ctxp->oxid != oxid || ctxp->sid != sid) continue; xri = ctxp->ctxbuf->sglq->sli4_xritag; spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); spin_lock_irqsave(&ctxp->ctxlock, iflag); ctxp->flag |= LPFC_NVME_ABTS_RCV; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", xri, raw_smp_processor_id(), 0); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6319 NVMET Rcv ABTS:acc xri x%x\n", xri); rsp = &ctxp->hdlrctx.fcp_req; nvmet_fc_rcv_fcp_abort(phba->targetport, rsp); /* Respond with BA_ACC accordingly */ lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); return 0; } spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); /* check the wait list */ if (phba->sli4_hba.nvmet_io_wait_cnt) { struct rqb_dmabuf *nvmebuf; struct fc_frame_header *fc_hdr_tmp; u32 sid_tmp; u16 oxid_tmp; bool found = false; spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); /* match by oxid and s_id */ list_for_each_entry(nvmebuf, &phba->sli4_hba.lpfc_nvmet_io_wait_list, hbuf.list) { fc_hdr_tmp = (struct fc_frame_header *) (nvmebuf->hbuf.virt); oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id); sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp); if (oxid_tmp != oxid || sid_tmp != sid) continue; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6321 NVMET Rcv ABTS oxid x%x from x%x " "is waiting for a ctxp\n", oxid, sid); list_del_init(&nvmebuf->hbuf.list); phba->sli4_hba.nvmet_io_wait_cnt--; found = true; break; } spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); /* free buffer since already posted a new DMA buffer to RQ */ if (found) { nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); /* Respond with BA_ACC accordingly */ lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); return 0; } } /* check active list */ ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid); if (ctxp) { xri = ctxp->ctxbuf->sglq->sli4_xritag; spin_lock_irqsave(&ctxp->ctxlock, iflag); ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP); spin_unlock_irqrestore(&ctxp->ctxlock, iflag); lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", xri, raw_smp_processor_id(), 0); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x " "flag x%x state x%x\n", ctxp->oxid, xri, ctxp->flag, ctxp->state); if (ctxp->flag & LPFC_NVME_TNOTIFY) { /* Notify the transport */ nvmet_fc_rcv_fcp_abort(phba->targetport, &ctxp->hdlrctx.fcp_req); } else { cancel_work_sync(&ctxp->ctxbuf->defer_work); spin_lock_irqsave(&ctxp->ctxlock, iflag); lpfc_nvmet_defer_release(phba, ctxp); spin_unlock_irqrestore(&ctxp->ctxlock, iflag); } lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); return 0; } lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n", oxid, raw_smp_processor_id(), 1); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid); /* Respond with BA_RJT accordingly */ lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0); #endif return 0; } static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, struct lpfc_async_xchg_ctx *ctxp) { struct lpfc_sli_ring *pring; struct lpfc_iocbq *nvmewqeq; struct lpfc_iocbq *next_nvmewqeq; unsigned long iflags; struct lpfc_wcqe_complete wcqe; struct lpfc_wcqe_complete *wcqep; pring = wq->pring; wcqep = &wcqe; /* Fake an ABORT error code back to cmpl routine */ memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete)); bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT); wcqep->parameter = IOERR_ABORT_REQUESTED; spin_lock_irqsave(&pring->ring_lock, iflags); list_for_each_entry_safe(nvmewqeq, next_nvmewqeq, &wq->wqfull_list, list) { if (ctxp) { /* Checking for a specific IO to flush */ if (nvmewqeq->context_un.axchg == ctxp) { list_del(&nvmewqeq->list); spin_unlock_irqrestore(&pring->ring_lock, iflags); memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep)); lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq); return; } continue; } else { /* Flush all IOs */ list_del(&nvmewqeq->list); spin_unlock_irqrestore(&pring->ring_lock, iflags); memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep)); lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq); spin_lock_irqsave(&pring->ring_lock, iflags); } } if (!ctxp) wq->q_flag &= ~HBA_NVMET_WQFULL; spin_unlock_irqrestore(&pring->ring_lock, iflags); } void lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, struct lpfc_queue *wq) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_sli_ring *pring; struct lpfc_iocbq *nvmewqeq; struct lpfc_async_xchg_ctx *ctxp; unsigned long iflags; int rc; /* * Some WQE slots are available, so try to re-issue anything * on the WQ wqfull_list. */ pring = wq->pring; spin_lock_irqsave(&pring->ring_lock, iflags); while (!list_empty(&wq->wqfull_list)) { list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq, list); spin_unlock_irqrestore(&pring->ring_lock, iflags); ctxp = nvmewqeq->context_un.axchg; rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); spin_lock_irqsave(&pring->ring_lock, iflags); if (rc == -EBUSY) { /* WQ was full again, so put it back on the list */ list_add(&nvmewqeq->list, &wq->wqfull_list); spin_unlock_irqrestore(&pring->ring_lock, iflags); return; } if (rc == WQE_SUCCESS) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (ctxp->ts_cmd_nvme) { if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP) ctxp->ts_status_wqput = ktime_get_ns(); else ctxp->ts_data_wqput = ktime_get_ns(); } #endif } else { WARN_ON(rc); } } wq->q_flag &= ~HBA_NVMET_WQFULL; spin_unlock_irqrestore(&pring->ring_lock, iflags); #endif } void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_tgtport *tgtp; struct lpfc_queue *wq; uint32_t qidx; DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp); if (phba->nvmet_support == 0) return; if (phba->targetport) { tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { wq = phba->sli4_hba.hdwq[qidx].io_wq; lpfc_nvmet_wqfull_flush(phba, wq, NULL); } tgtp->tport_unreg_cmp = &tport_unreg_cmp; nvmet_fc_unregister_targetport(phba->targetport); if (!wait_for_completion_timeout(&tport_unreg_cmp, msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6179 Unreg targetport x%px timeout " "reached.\n", phba->targetport); lpfc_nvmet_cleanup_io_context(phba); } phba->targetport = NULL; #endif } /** * lpfc_nvmet_handle_lsreq - Process an NVME LS request * @phba: pointer to lpfc hba data structure. * @axchg: pointer to exchange context for the NVME LS request * * This routine is used for processing an asychronously received NVME LS * request. Any remaining validation is done and the LS is then forwarded * to the nvmet-fc transport via nvmet_fc_rcv_ls_req(). * * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing) * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done. * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg. * * Returns 0 if LS was handled and delivered to the transport * Returns 1 if LS failed to be handled and should be dropped */ int lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba, struct lpfc_async_xchg_ctx *axchg) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private; uint32_t *payload = axchg->payload; int rc; atomic_inc(&tgtp->rcv_ls_req_in); /* * Driver passes the ndlp as the hosthandle argument allowing * the transport to generate LS requests for any associateions * that are created. */ rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp, axchg->payload, axchg->size); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x " "%08x %08x %08x\n", axchg->size, rc, *payload, *(payload+1), *(payload+2), *(payload+3), *(payload+4), *(payload+5)); if (!rc) { atomic_inc(&tgtp->rcv_ls_req_out); return 0; } atomic_inc(&tgtp->rcv_ls_req_drop); #endif return 1; } static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context; struct lpfc_hba *phba = ctxp->phba; struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; struct lpfc_nvmet_tgtport *tgtp; uint32_t *payload, qno; uint32_t rc; unsigned long iflags; if (!nvmebuf) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6159 process_rcv_fcp_req, nvmebuf is NULL, " "oxid: x%x flg: x%x state: x%x\n", ctxp->oxid, ctxp->flag, ctxp->state); spin_lock_irqsave(&ctxp->ctxlock, iflags); lpfc_nvmet_defer_release(phba, ctxp); spin_unlock_irqrestore(&ctxp->ctxlock, iflags); lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); return; } if (ctxp->flag & LPFC_NVME_ABTS_RCV) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6324 IO oxid x%x aborted\n", ctxp->oxid); return; } payload = (uint32_t *)(nvmebuf->dbuf.virt); tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; ctxp->flag |= LPFC_NVME_TNOTIFY; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (ctxp->ts_isr_cmd) ctxp->ts_cmd_nvme = ktime_get_ns(); #endif /* * The calling sequence should be: * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. * When we return from nvmet_fc_rcv_fcp_req, all relevant info * the NVME command / FC header is stored. * A buffer has already been reposted for this IO, so just free * the nvmebuf. */ rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req, payload, ctxp->size); /* Process FCP command */ if (rc == 0) { atomic_inc(&tgtp->rcv_fcp_cmd_out); spin_lock_irqsave(&ctxp->ctxlock, iflags); if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) || (nvmebuf != ctxp->rqb_buffer)) { spin_unlock_irqrestore(&ctxp->ctxlock, iflags); return; } ctxp->rqb_buffer = NULL; spin_unlock_irqrestore(&ctxp->ctxlock, iflags); lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ return; } /* Processing of FCP command is deferred */ if (rc == -EOVERFLOW) { lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d " "from %06x\n", ctxp->oxid, ctxp->size, ctxp->sid); atomic_inc(&tgtp->rcv_fcp_cmd_out); atomic_inc(&tgtp->defer_fod); spin_lock_irqsave(&ctxp->ctxlock, iflags); if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) { spin_unlock_irqrestore(&ctxp->ctxlock, iflags); return; } spin_unlock_irqrestore(&ctxp->ctxlock, iflags); /* * Post a replacement DMA buffer to RQ and defer * freeing rcv buffer till .defer_rcv callback */ qno = nvmebuf->idx; lpfc_post_rq_buffer( phba, phba->sli4_hba.nvmet_mrq_hdr[qno], phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); return; } ctxp->flag &= ~LPFC_NVME_TNOTIFY; atomic_inc(&tgtp->rcv_fcp_cmd_drop); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", ctxp->oxid, rc, atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_out), atomic_read(&tgtp->xmt_fcp_release)); lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", ctxp->oxid, ctxp->size, ctxp->sid); spin_lock_irqsave(&ctxp->ctxlock, iflags); lpfc_nvmet_defer_release(phba, ctxp); spin_unlock_irqrestore(&ctxp->ctxlock, iflags); lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); #endif } static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_ctxbuf *ctx_buf = container_of(work, struct lpfc_nvmet_ctxbuf, defer_work); lpfc_nvmet_process_rcv_fcp_req(ctx_buf); #endif } static struct lpfc_nvmet_ctxbuf * lpfc_nvmet_replenish_context(struct lpfc_hba *phba, struct lpfc_nvmet_ctx_info *current_infop) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_ctxbuf *ctx_buf = NULL; struct lpfc_nvmet_ctx_info *get_infop; int i; /* * The current_infop for the MRQ a NVME command IU was received * on is empty. Our goal is to replenish this MRQs context * list from a another CPUs. * * First we need to pick a context list to start looking on. * nvmet_ctx_start_cpu has available context the last time * we needed to replenish this CPU where nvmet_ctx_next_cpu * is just the next sequential CPU for this MRQ. */ if (current_infop->nvmet_ctx_start_cpu) get_infop = current_infop->nvmet_ctx_start_cpu; else get_infop = current_infop->nvmet_ctx_next_cpu; for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) { if (get_infop == current_infop) { get_infop = get_infop->nvmet_ctx_next_cpu; continue; } spin_lock(&get_infop->nvmet_ctx_list_lock); /* Just take the entire context list, if there are any */ if (get_infop->nvmet_ctx_list_cnt) { list_splice_init(&get_infop->nvmet_ctx_list, &current_infop->nvmet_ctx_list); current_infop->nvmet_ctx_list_cnt = get_infop->nvmet_ctx_list_cnt - 1; get_infop->nvmet_ctx_list_cnt = 0; spin_unlock(&get_infop->nvmet_ctx_list_lock); current_infop->nvmet_ctx_start_cpu = get_infop; list_remove_head(&current_infop->nvmet_ctx_list, ctx_buf, struct lpfc_nvmet_ctxbuf, list); return ctx_buf; } /* Otherwise, move on to the next CPU for this MRQ */ spin_unlock(&get_infop->nvmet_ctx_list_lock); get_infop = get_infop->nvmet_ctx_next_cpu; } #endif /* Nothing found, all contexts for the MRQ are in-flight */ return NULL; } /** * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer * @phba: pointer to lpfc hba data structure. * @idx: relative index of MRQ vector * @nvmebuf: pointer to lpfc nvme command HBQ data structure. * @isr_timestamp: in jiffies. * @cqflag: cq processing information regarding workload. * * This routine is used for processing the WQE associated with a unsolicited * event. It first determines whether there is an existing ndlp that matches * the DID from the unsolicited WQE. If not, it will create a new one with * the DID from the unsolicited WQE. The ELS command from the unsolicited * WQE is then used to invoke the proper routine and to set up proper state * of the discovery state machine. **/ static void lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, uint32_t idx, struct rqb_dmabuf *nvmebuf, uint64_t isr_timestamp, uint8_t cqflag) { struct lpfc_async_xchg_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; struct lpfc_nvmet_ctxbuf *ctx_buf; struct lpfc_nvmet_ctx_info *current_infop; uint32_t size, oxid, sid, qno; unsigned long iflag; int current_cpu; if (!IS_ENABLED(CONFIG_NVME_TARGET_FC)) return; ctx_buf = NULL; if (!nvmebuf || !phba->targetport) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6157 NVMET FCP Drop IO\n"); if (nvmebuf) lpfc_rq_buf_free(phba, &nvmebuf->hbuf); return; } /* * Get a pointer to the context list for this MRQ based on * the CPU this MRQ IRQ is associated with. If the CPU association * changes from our initial assumption, the context list could * be empty, thus it would need to be replenished with the * context list from another CPU for this MRQ. */ current_cpu = raw_smp_processor_id(); current_infop = lpfc_get_ctx_list(phba, current_cpu, idx); spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag); if (current_infop->nvmet_ctx_list_cnt) { list_remove_head(&current_infop->nvmet_ctx_list, ctx_buf, struct lpfc_nvmet_ctxbuf, list); current_infop->nvmet_ctx_list_cnt--; } else { ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop); } spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag); fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); oxid = be16_to_cpu(fc_hdr->fh_ox_id); size = nvmebuf->bytes_recv; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { this_cpu_inc(phba->sli4_hba.c_stat->rcv_io); if (idx != current_cpu) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6703 CPU Check rcv: " "cpu %d expect %d\n", current_cpu, idx); } #endif lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", oxid, size, raw_smp_processor_id()); tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; if (!ctx_buf) { /* Queue this NVME IO to process later */ spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); list_add_tail(&nvmebuf->hbuf.list, &phba->sli4_hba.lpfc_nvmet_io_wait_list); phba->sli4_hba.nvmet_io_wait_cnt++; phba->sli4_hba.nvmet_io_wait_total++; spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); /* Post a brand new DMA buffer to RQ */ qno = nvmebuf->idx; lpfc_post_rq_buffer( phba, phba->sli4_hba.nvmet_mrq_hdr[qno], phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); atomic_inc(&tgtp->defer_ctx); return; } sid = sli4_sid_from_fc_hdr(fc_hdr); ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context; spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list); spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); if (ctxp->state != LPFC_NVME_STE_FREE) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6414 NVMET Context corrupt %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); } ctxp->wqeq = NULL; ctxp->offset = 0; ctxp->phba = phba; ctxp->size = size; ctxp->oxid = oxid; ctxp->sid = sid; ctxp->idx = idx; ctxp->state = LPFC_NVME_STE_RCV; ctxp->entry_cnt = 1; ctxp->flag = 0; ctxp->ctxbuf = ctx_buf; ctxp->rqb_buffer = (void *)nvmebuf; ctxp->hdwq = NULL; spin_lock_init(&ctxp->ctxlock); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (isr_timestamp) ctxp->ts_isr_cmd = isr_timestamp; ctxp->ts_cmd_nvme = 0; ctxp->ts_nvme_data = 0; ctxp->ts_data_wqput = 0; ctxp->ts_isr_data = 0; ctxp->ts_data_nvme = 0; ctxp->ts_nvme_status = 0; ctxp->ts_status_wqput = 0; ctxp->ts_isr_status = 0; ctxp->ts_status_nvme = 0; #endif atomic_inc(&tgtp->rcv_fcp_cmd_in); /* check for cq processing load */ if (!cqflag) { lpfc_nvmet_process_rcv_fcp_req(ctx_buf); return; } if (!queue_work(phba->wq, &ctx_buf->defer_work)) { atomic_inc(&tgtp->rcv_fcp_cmd_drop); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6325 Unable to queue work for oxid x%x. " "FCP Drop IO [x%x x%x x%x]\n", ctxp->oxid, atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_out), atomic_read(&tgtp->xmt_fcp_release)); spin_lock_irqsave(&ctxp->ctxlock, iflag); lpfc_nvmet_defer_release(phba, ctxp); spin_unlock_irqrestore(&ctxp->ctxlock, iflag); lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); } } /** * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport * @phba: pointer to lpfc hba data structure. * @idx: relative index of MRQ vector * @nvmebuf: pointer to received nvme data structure. * @isr_timestamp: in jiffies. * @cqflag: cq processing information regarding workload. * * This routine is used to process an unsolicited event received from a SLI * (Service Level Interface) ring. The actual processing of the data buffer * associated with the unsolicited event is done by invoking the routine * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the * SLI RQ on which the unsolicited event was received. **/ void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx, struct rqb_dmabuf *nvmebuf, uint64_t isr_timestamp, uint8_t cqflag) { if (!nvmebuf) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3167 NVMET FCP Drop IO\n"); return; } if (phba->nvmet_support == 0) { lpfc_rq_buf_free(phba, &nvmebuf->hbuf); return; } lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag); } /** * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure * @phba: pointer to a host N_Port data structure. * @ctxp: Context info for NVME LS Request * @rspbuf: DMA buffer of NVME command. * @rspsize: size of the NVME command. * * This routine is used for allocating a lpfc-WQE data structure from * the driver lpfc-WQE free-list and prepare the WQE with the parameters * passed into the routine for discovery state machine to issue an Extended * Link Service (NVME) commands. It is a generic lpfc-WQE allocation * and preparation routine that is used by all the discovery state machine * routines and the NVME command-specific fields will be later set up by * the individual discovery machine routines after calling this routine * allocating and preparing a generic WQE data structure. It fills in the * Buffer Descriptor Entries (BDEs), allocates buffers for both command * payload and response payload (if expected). The reference count on the * ndlp is incremented by 1 and the reference to the ndlp is put into * context1 of the WQE data structure for this WQE to hold the ndlp * reference for the command's callback function to access later. * * Return code * Pointer to the newly allocated/prepared nvme wqe data structure * NULL - when nvme wqe data structure allocation/preparation failed **/ static struct lpfc_iocbq * lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, struct lpfc_async_xchg_ctx *ctxp, dma_addr_t rspbuf, uint16_t rspsize) { struct lpfc_nodelist *ndlp; struct lpfc_iocbq *nvmewqe; union lpfc_wqe128 *wqe; if (!lpfc_is_link_up(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6104 NVMET prep LS wqe: link err: " "NPORT x%x oxid:x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); return NULL; } /* Allocate buffer for command wqe */ nvmewqe = lpfc_sli_get_iocbq(phba); if (nvmewqe == NULL) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6105 NVMET prep LS wqe: No WQE: " "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); return NULL; } ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6106 NVMET prep LS wqe: No ndlp: " "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); goto nvme_wqe_free_wqeq_exit; } ctxp->wqeq = nvmewqe; /* prevent preparing wqe with NULL ndlp reference */ nvmewqe->ndlp = lpfc_nlp_get(ndlp); if (!nvmewqe->ndlp) goto nvme_wqe_free_wqeq_exit; nvmewqe->context_un.axchg = ctxp; wqe = &nvmewqe->wqe; memset(wqe, 0, sizeof(union lpfc_wqe)); /* Words 0 - 2 */ wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize; wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf)); wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf)); /* Word 3 */ /* Word 4 */ /* Word 5 */ bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1); bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0); bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP); bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME); /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag); /* Word 7 */ bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, CMD_XMIT_SEQUENCE64_WQE); bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI); bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); /* Word 8 */ wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag; /* Word 9 */ bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag); /* Needs to be set by caller */ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid); /* Word 10 */ bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, LPFC_WQE_LENLOC_WORD12); bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); /* Word 11 */ bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com, OTHER_COMMAND); /* Word 12 */ wqe->xmit_sequence.xmit_len = rspsize; nvmewqe->retry = 1; nvmewqe->vport = phba->pport; nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; nvmewqe->cmd_flag |= LPFC_IO_NVME_LS; /* Xmit NVMET response to remote NPORT <did> */ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6039 Xmit NVMET LS response to remote " "NPORT x%x iotag:x%x oxid:x%x size:x%x\n", ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid, rspsize); return nvmewqe; nvme_wqe_free_wqeq_exit: nvmewqe->context_un.axchg = NULL; nvmewqe->ndlp = NULL; nvmewqe->bpl_dmabuf = NULL; lpfc_sli_release_iocbq(phba, nvmewqe); return NULL; } static struct lpfc_iocbq * lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, struct lpfc_async_xchg_ctx *ctxp) { struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req; struct lpfc_nvmet_tgtport *tgtp; struct sli4_sge *sgl; struct lpfc_nodelist *ndlp; struct lpfc_iocbq *nvmewqe; struct scatterlist *sgel; union lpfc_wqe128 *wqe; struct ulp_bde64 *bde; dma_addr_t physaddr; int i, cnt, nsegs; bool use_pbde = false; int xc = 1; if (!lpfc_is_link_up(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6107 NVMET prep FCP wqe: link err:" "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); return NULL; } ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6108 NVMET prep FCP wqe: no ndlp: " "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); return NULL; } if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6109 NVMET prep FCP wqe: seg cnt err: " "NPORT x%x oxid x%x ste %d cnt %d\n", ctxp->sid, ctxp->oxid, ctxp->state, phba->cfg_nvme_seg_cnt); return NULL; } nsegs = rsp->sg_cnt; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; nvmewqe = ctxp->wqeq; if (nvmewqe == NULL) { /* Allocate buffer for command wqe */ nvmewqe = ctxp->ctxbuf->iocbq; if (nvmewqe == NULL) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6110 NVMET prep FCP wqe: No " "WQE: NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); return NULL; } ctxp->wqeq = nvmewqe; xc = 0; /* create new XRI */ nvmewqe->sli4_lxritag = NO_XRI; nvmewqe->sli4_xritag = NO_XRI; } /* Sanity check */ if (((ctxp->state == LPFC_NVME_STE_RCV) && (ctxp->entry_cnt == 1)) || (ctxp->state == LPFC_NVME_STE_DATA)) { wqe = &nvmewqe->wqe; } else { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6111 Wrong state NVMET FCP: %d cnt %d\n", ctxp->state, ctxp->entry_cnt); return NULL; } sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl; switch (rsp->op) { case NVMET_FCOP_READDATA: case NVMET_FCOP_READDATA_RSP: /* From the tsend template, initialize words 7 - 11 */ memcpy(&wqe->words[7], &lpfc_tsend_cmd_template.words[7], sizeof(uint32_t) * 5); /* Words 0 - 2 : The first sg segment */ sgel = &rsp->sg[0]; physaddr = sg_dma_address(sgel); wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel); wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr)); wqe->fcp_tsend.bde.addrHigh = cpu_to_le32(putPaddrHigh(physaddr)); /* Word 3 */ wqe->fcp_tsend.payload_offset_len = 0; /* Word 4 */ wqe->fcp_tsend.relative_offset = ctxp->offset; /* Word 5 */ wqe->fcp_tsend.reserved = 0; /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com, nvmewqe->sli4_xritag); /* Word 7 - set ar later */ /* Word 8 */ wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag; /* Word 9 */ bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag); bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid); /* Word 10 - set wqes later, in template xc=1 */ if (!xc) bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0); /* Word 12 */ wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; /* Setup 2 SKIP SGEs */ sgl->addr_hi = 0; sgl->addr_lo = 0; sgl->word2 = 0; bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = 0; sgl++; sgl->addr_hi = 0; sgl->addr_lo = 0; sgl->word2 = 0; bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = 0; sgl++; if (rsp->op == NVMET_FCOP_READDATA_RSP) { atomic_inc(&tgtp->xmt_fcp_read_rsp); /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { if (ndlp->nlp_flag & NLP_SUPPRESS_RSP) bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); } else { bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1); bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1); bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, ((rsp->rsplen >> 2) - 1)); memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); } } else { atomic_inc(&tgtp->xmt_fcp_read); /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0); } break; case NVMET_FCOP_WRITEDATA: /* From the treceive template, initialize words 3 - 11 */ memcpy(&wqe->words[3], &lpfc_treceive_cmd_template.words[3], sizeof(uint32_t) * 9); /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */ wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP; wqe->fcp_treceive.bde.tus.f.bdeSize = 0; wqe->fcp_treceive.bde.addrLow = 0; wqe->fcp_treceive.bde.addrHigh = 0; /* Word 4 */ wqe->fcp_treceive.relative_offset = ctxp->offset; /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com, nvmewqe->sli4_xritag); /* Word 7 */ /* Word 8 */ wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag; /* Word 9 */ bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag); bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid); /* Word 10 - in template xc=1 */ if (!xc) bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0); /* Word 11 - check for pbde */ if (nsegs == 1 && phba->cfg_enable_pbde) { use_pbde = true; /* Word 11 - PBDE bit already preset by template */ } else { /* Overwrite default template setting */ bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0); } /* Word 12 */ wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; /* Setup 2 SKIP SGEs */ sgl->addr_hi = 0; sgl->addr_lo = 0; sgl->word2 = 0; bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = 0; sgl++; sgl->addr_hi = 0; sgl->addr_lo = 0; sgl->word2 = 0; bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = 0; sgl++; atomic_inc(&tgtp->xmt_fcp_write); break; case NVMET_FCOP_RSP: /* From the treceive template, initialize words 4 - 11 */ memcpy(&wqe->words[4], &lpfc_trsp_cmd_template.words[4], sizeof(uint32_t) * 8); /* Words 0 - 2 */ physaddr = rsp->rspdma; wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen; wqe->fcp_trsp.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr)); wqe->fcp_trsp.bde.addrHigh = cpu_to_le32(putPaddrHigh(physaddr)); /* Word 3 */ wqe->fcp_trsp.response_len = rsp->rsplen; /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com, nvmewqe->sli4_xritag); /* Word 7 */ /* Word 8 */ wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag; /* Word 9 */ bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag); bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid); /* Word 10 */ if (xc) bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1); /* Word 11 */ /* In template wqes=0 irsp=0 irsplen=0 - good response */ if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) { /* Bad response - embed it */ bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1); bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1); bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, ((rsp->rsplen >> 2) - 1)); memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); } /* Word 12 */ wqe->fcp_trsp.rsvd_12_15[0] = 0; /* Use rspbuf, NOT sg list */ nsegs = 0; sgl->word2 = 0; atomic_inc(&tgtp->xmt_fcp_rsp); break; default: lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6064 Unknown Rsp Op %d\n", rsp->op); return NULL; } nvmewqe->retry = 1; nvmewqe->vport = phba->pport; nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; nvmewqe->ndlp = ndlp; for_each_sg(rsp->sg, sgel, nsegs, i) { physaddr = sg_dma_address(sgel); cnt = sg_dma_len(sgel); sgl->addr_hi = putPaddrHigh(physaddr); sgl->addr_lo = putPaddrLow(physaddr); sgl->word2 = 0; bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset); if ((i+1) == rsp->sg_cnt) bf_set(lpfc_sli4_sge_last, sgl, 1); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(cnt); sgl++; ctxp->offset += cnt; } bde = (struct ulp_bde64 *)&wqe->words[13]; if (use_pbde) { /* decrement sgl ptr backwards once to first data sge */ sgl--; /* Words 13-15 (PBDE) */ bde->addrLow = sgl->addr_lo; bde->addrHigh = sgl->addr_hi; bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len); bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; bde->tus.w = cpu_to_le32(bde->tus.w); } else { memset(bde, 0, sizeof(struct ulp_bde64)); } ctxp->state = LPFC_NVME_STE_DATA; ctxp->entry_cnt++; return nvmewqe; } /** * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME ABTS for FCP cmds * The function frees memory resources used for the NVME commands. **/ static void lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe) { struct lpfc_async_xchg_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; uint32_t result; unsigned long flags; bool released = false; struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; ctxp = cmdwqe->context_un.axchg; result = wcqe->parameter; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; if (ctxp->flag & LPFC_NVME_ABORT_OP) atomic_inc(&tgtp->xmt_fcp_abort_cmpl); spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->state = LPFC_NVME_STE_DONE; /* Check if we already received a free context call * and we have completed processing an abort situation. */ if ((ctxp->flag & LPFC_NVME_CTX_RLS) && !(ctxp->flag & LPFC_NVME_XBUSY)) { spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctxp->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); atomic_inc(&tgtp->xmt_abort_rsp); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6165 ABORT cmpl: oxid x%x flg x%x (%d) " "WCQE: %08x %08x %08x %08x\n", ctxp->oxid, ctxp->flag, released, wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); cmdwqe->rsp_dmabuf = NULL; cmdwqe->bpl_dmabuf = NULL; /* * if transport has released ctx, then can reuse it. Otherwise, * will be recycled by transport release call. */ if (released) lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); /* This is the iocbq for the abort, not the command */ lpfc_sli_release_iocbq(phba, cmdwqe); /* Since iaab/iaar are NOT set, there is no work left. * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted * should have been called already. */ } /** * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME ABTS for FCP cmds * The function frees memory resources used for the NVME commands. **/ static void lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe) { struct lpfc_async_xchg_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; unsigned long flags; uint32_t result; bool released = false; struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; ctxp = cmdwqe->context_un.axchg; result = wcqe->parameter; if (!ctxp) { /* if context is clear, related io alrady complete */ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n", wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); return; } tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; spin_lock_irqsave(&ctxp->ctxlock, flags); if (ctxp->flag & LPFC_NVME_ABORT_OP) atomic_inc(&tgtp->xmt_fcp_abort_cmpl); /* Sanity check */ if (ctxp->state != LPFC_NVME_STE_ABORT) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6112 ABTS Wrong state:%d oxid x%x\n", ctxp->state, ctxp->oxid); } /* Check if we already received a free context call * and we have completed processing an abort situation. */ ctxp->state = LPFC_NVME_STE_DONE; if ((ctxp->flag & LPFC_NVME_CTX_RLS) && !(ctxp->flag & LPFC_NVME_XBUSY)) { spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctxp->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); atomic_inc(&tgtp->xmt_abort_rsp); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6316 ABTS cmpl oxid x%x flg x%x (%x) " "WCQE: %08x %08x %08x %08x\n", ctxp->oxid, ctxp->flag, released, wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); cmdwqe->rsp_dmabuf = NULL; cmdwqe->bpl_dmabuf = NULL; /* * if transport has released ctx, then can reuse it. Otherwise, * will be recycled by transport release call. */ if (released) lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); /* Since iaab/iaar are NOT set, there is no work left. * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted * should have been called already. */ } /** * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME ABTS for LS cmds * The function frees memory resources used for the NVME commands. **/ static void lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe) { struct lpfc_async_xchg_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; uint32_t result; struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; ctxp = cmdwqe->context_un.axchg; result = wcqe->parameter; if (phba->nvmet_support) { tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; atomic_inc(&tgtp->xmt_ls_abort_cmpl); } lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n", ctxp, wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); if (!ctxp) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6415 NVMET LS Abort No ctx: WCQE: " "%08x %08x %08x %08x\n", wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); lpfc_sli_release_iocbq(phba, cmdwqe); return; } if (ctxp->state != LPFC_NVME_STE_LS_ABORT) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6416 NVMET LS abort cmpl state mismatch: " "oxid x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); } cmdwqe->rsp_dmabuf = NULL; cmdwqe->bpl_dmabuf = NULL; lpfc_sli_release_iocbq(phba, cmdwqe); kfree(ctxp); } static int lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, struct lpfc_async_xchg_ctx *ctxp, uint32_t sid, uint16_t xri) { struct lpfc_nvmet_tgtport *tgtp = NULL; struct lpfc_iocbq *abts_wqeq; union lpfc_wqe128 *wqe_abts; struct lpfc_nodelist *ndlp; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6067 ABTS: sid %x xri x%x/x%x\n", sid, xri, ctxp->wqeq->sli4_xritag); if (phba->nvmet_support && phba->targetport) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; ndlp = lpfc_findnode_did(phba->pport, sid); if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { if (tgtp) atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6134 Drop ABTS - wrong NDLP state x%x.\n", (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); /* No failure to an ABTS request. */ return 0; } abts_wqeq = ctxp->wqeq; wqe_abts = &abts_wqeq->wqe; /* * Since we zero the whole WQE, we need to ensure we set the WQE fields * that were initialized in lpfc_sli4_nvmet_alloc. */ memset(wqe_abts, 0, sizeof(union lpfc_wqe)); /* Word 5 */ bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0); bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1); bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0); bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS); bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS); /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->sli4_xritag); /* Word 7 */ bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com, CMD_XMIT_SEQUENCE64_WQE); bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI); bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3); bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0); /* Word 8 */ wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag; /* Word 9 */ bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag); /* Needs to be set by caller */ bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); /* Word 10 */ bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_LENLOC_WORD12); bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0); bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0); /* Word 11 */ bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com, OTHER_COMMAND); abts_wqeq->vport = phba->pport; abts_wqeq->ndlp = ndlp; abts_wqeq->context_un.axchg = ctxp; abts_wqeq->bpl_dmabuf = NULL; abts_wqeq->num_bdes = 0; /* hba_wqidx should already be setup from command we are aborting */ abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR; abts_wqeq->iocb.ulpLe = 1; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6069 Issue ABTS to xri x%x reqtag x%x\n", xri, abts_wqeq->iotag); return 1; } static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, struct lpfc_async_xchg_ctx *ctxp, uint32_t sid, uint16_t xri) { struct lpfc_nvmet_tgtport *tgtp; struct lpfc_iocbq *abts_wqeq; struct lpfc_nodelist *ndlp; unsigned long flags; bool ia; int rc; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; if (!ctxp->wqeq) { ctxp->wqeq = ctxp->ctxbuf->iocbq; ctxp->wqeq->hba_wqidx = 0; } ndlp = lpfc_findnode_did(phba->pport, sid); if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6160 Drop ABORT - wrong NDLP state x%x.\n", (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); /* No failure to an ABTS request. */ spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } /* Issue ABTS for this WQE based on iotag */ ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); spin_lock_irqsave(&ctxp->ctxlock, flags); if (!ctxp->abort_wqeq) { atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6161 ABORT failed: No wqeqs: " "xri: x%x\n", ctxp->oxid); /* No failure to an ABTS request. */ ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } abts_wqeq = ctxp->abort_wqeq; ctxp->state = LPFC_NVME_STE_ABORT; ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false; spin_unlock_irqrestore(&ctxp->ctxlock, flags); /* Announce entry to new IO submit field. */ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6162 ABORT Request to rport DID x%06x " "for xri x%x x%x\n", ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag); /* If the hba is getting reset, this flag is set. It is * cleared when the reset is complete and rings reestablished. */ spin_lock_irqsave(&phba->hbalock, flags); /* driver queued commands are in process of being flushed */ if (phba->hba_flag & HBA_IOQ_FLUSH) { spin_unlock_irqrestore(&phba->hbalock, flags); atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6163 Driver in reset cleanup - flushing " "NVME Req now. hba_flag x%x oxid x%x\n", phba->hba_flag, ctxp->oxid); lpfc_sli_release_iocbq(phba, abts_wqeq); spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } /* Outstanding abort is in progress */ if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) { spin_unlock_irqrestore(&phba->hbalock, flags); atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6164 Outstanding NVME I/O Abort Request " "still pending on oxid x%x\n", ctxp->oxid); lpfc_sli_release_iocbq(phba, abts_wqeq); spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } /* Ready - mark outstanding as aborted by driver. */ abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED; lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag, abts_wqeq->iotag, CLASS3, LPFC_WQE_CQ_ID_DEFAULT, ia, true); /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; abts_wqeq->cmd_flag |= LPFC_IO_NVME; abts_wqeq->context_un.axchg = ctxp; abts_wqeq->vport = phba->pport; if (!ctxp->hdwq) ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { atomic_inc(&tgtp->xmt_abort_sol); return 0; } atomic_inc(&tgtp->xmt_abort_rsp_error); spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_sli_release_iocbq(phba, abts_wqeq); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6166 Failed ABORT issue_wqe with status x%x " "for oxid x%x.\n", rc, ctxp->oxid); return 1; } static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, struct lpfc_async_xchg_ctx *ctxp, uint32_t sid, uint16_t xri) { struct lpfc_nvmet_tgtport *tgtp; struct lpfc_iocbq *abts_wqeq; unsigned long flags; bool released = false; int rc; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; if (!ctxp->wqeq) { ctxp->wqeq = ctxp->ctxbuf->iocbq; ctxp->wqeq->hba_wqidx = 0; } if (ctxp->state == LPFC_NVME_STE_FREE) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6417 NVMET ABORT ctx freed %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); rc = WQE_BUSY; goto aerr; } ctxp->state = LPFC_NVME_STE_ABORT; ctxp->entry_cnt++; rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); if (rc == 0) goto aerr; spin_lock_irqsave(&phba->hbalock, flags); abts_wqeq = ctxp->wqeq; abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; abts_wqeq->cmd_flag |= LPFC_IO_NVMET; if (!ctxp->hdwq) ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { return 0; } aerr: spin_lock_irqsave(&ctxp->ctxlock, flags); if (ctxp->flag & LPFC_NVME_CTX_RLS) { spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctxp->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS); spin_unlock_irqrestore(&ctxp->ctxlock, flags); atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6135 Failed to Issue ABTS for oxid x%x. Status x%x " "(%x)\n", ctxp->oxid, rc, released); if (released) lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); return 1; } /** * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received * via async frame receive where the frame is not handled. * @phba: pointer to adapter structure * @ctxp: pointer to the asynchronously received received sequence * @sid: address of the remote port to send the ABTS to * @xri: oxid value to for the ABTS (other side's exchange id). **/ int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba, struct lpfc_async_xchg_ctx *ctxp, uint32_t sid, uint16_t xri) { struct lpfc_nvmet_tgtport *tgtp = NULL; struct lpfc_iocbq *abts_wqeq; unsigned long flags; int rc; if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) || (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) { ctxp->state = LPFC_NVME_STE_LS_ABORT; ctxp->entry_cnt++; } else { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6418 NVMET LS abort state mismatch " "IO x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); ctxp->state = LPFC_NVME_STE_LS_ABORT; } if (phba->nvmet_support && phba->targetport) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; if (!ctxp->wqeq) { /* Issue ABTS for this WQE based on iotag */ ctxp->wqeq = lpfc_sli_get_iocbq(phba); if (!ctxp->wqeq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6068 Abort failed: No wqeqs: " "xri: x%x\n", xri); /* No failure to an ABTS request. */ kfree(ctxp); return 0; } } abts_wqeq = ctxp->wqeq; if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) { rc = WQE_BUSY; goto out; } spin_lock_irqsave(&phba->hbalock, flags); abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; abts_wqeq->cmd_flag |= LPFC_IO_NVME_LS; rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { if (tgtp) atomic_inc(&tgtp->xmt_abort_unsol); return 0; } out: if (tgtp) atomic_inc(&tgtp->xmt_abort_rsp_error); abts_wqeq->rsp_dmabuf = NULL; abts_wqeq->bpl_dmabuf = NULL; lpfc_sli_release_iocbq(phba, abts_wqeq); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6056 Failed to Issue ABTS. Status x%x\n", rc); return 1; } /** * lpfc_nvmet_invalidate_host * * @phba: pointer to the driver instance bound to an adapter port. * @ndlp: pointer to an lpfc_nodelist type * * This routine upcalls the nvmet transport to invalidate an NVME * host to which this target instance had active connections. */ void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { u32 ndlp_has_hh; struct lpfc_nvmet_tgtport *tgtp; lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC, "6203 Invalidating hosthandle x%px\n", ndlp); tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE); spin_lock_irq(&ndlp->lock); ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH; spin_unlock_irq(&ndlp->lock); /* Do not invalidate any nodes that do not have a hosthandle. * The host_release callbk will cause a node reference * count imbalance and a crash. */ if (!ndlp_has_hh) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC, "6204 Skip invalidate on node x%px DID x%x\n", ndlp, ndlp->nlp_DID); return; } #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) /* Need to get the nvmet_fc_target_port pointer here.*/ nvmet_fc_invalidate_host(phba->targetport, ndlp); #endif }
linux-master
drivers/scsi/lpfc/lpfc_nvmet.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/mempool.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc.h" #include "lpfc_scsi.h" #include "lpfc_crtn.h" #include "lpfc_logmsg.h" #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */ #define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */ #define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */ int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { size_t bytes; int max_xri = phba->sli4_hba.max_cfg_param.max_xri; if (max_xri <= 0) return -ENOMEM; bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * sizeof(unsigned long); phba->cfg_rrq_xri_bitmap_sz = bytes; phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, bytes); if (!phba->active_rrq_pool) return -ENOMEM; else return 0; } /** * lpfc_mem_alloc - create and allocate all PCI and memory pools * @phba: HBA to allocate pools for * @align: alignment requirement for blocks; must be a power of two * * Description: Creates and allocates PCI pools lpfc_mbuf_pool, * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. * * Notes: Not interrupt-safe. Must be called with no locks held. If any * allocation fails, frees all successfully allocated memory before returning. * * Returns: * 0 on success * -ENOMEM on failure (if any memory allocations fail) **/ int lpfc_mem_alloc(struct lpfc_hba *phba, int align) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int i; phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); if (!phba->lpfc_mbuf_pool) goto fail; pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!pool->elements) goto fail_free_lpfc_mbuf_pool; pool->max_count = 0; pool->current_count = 0; for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) { pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, &pool->elements[i].phys); if (!pool->elements[i].virt) goto fail_free_mbuf_pool; pool->max_count++; pool->current_count++; } phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE, sizeof(LPFC_MBOXQ_t)); if (!phba->mbox_mem_pool) goto fail_free_mbuf_pool; phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, sizeof(struct lpfc_nodelist)); if (!phba->nlp_mem_pool) goto fail_free_mbox_pool; if (phba->sli_rev == LPFC_SLI_REV4) { phba->rrq_pool = mempool_create_kmalloc_pool(LPFC_RRQ_POOL_SIZE, sizeof(struct lpfc_node_rrq)); if (!phba->rrq_pool) goto fail_free_nlp_mem_pool; phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool", &phba->pcidev->dev, LPFC_HDR_BUF_SIZE, align, 0); if (!phba->lpfc_hrb_pool) goto fail_free_rrq_mem_pool; phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool", &phba->pcidev->dev, LPFC_DATA_BUF_SIZE, align, 0); if (!phba->lpfc_drb_pool) goto fail_free_hrb_pool; phba->lpfc_hbq_pool = NULL; } else { phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool", &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); if (!phba->lpfc_hbq_pool) goto fail_free_nlp_mem_pool; phba->lpfc_hrb_pool = NULL; phba->lpfc_drb_pool = NULL; } if (phba->cfg_EnableXLane) { phba->device_data_mem_pool = mempool_create_kmalloc_pool( LPFC_DEVICE_DATA_POOL_SIZE, sizeof(struct lpfc_device_data)); if (!phba->device_data_mem_pool) goto fail_free_drb_pool; } else { phba->device_data_mem_pool = NULL; } return 0; fail_free_drb_pool: dma_pool_destroy(phba->lpfc_drb_pool); phba->lpfc_drb_pool = NULL; fail_free_hrb_pool: dma_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; fail_free_rrq_mem_pool: mempool_destroy(phba->rrq_pool); phba->rrq_pool = NULL; fail_free_nlp_mem_pool: mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; fail_free_mbox_pool: mempool_destroy(phba->mbox_mem_pool); phba->mbox_mem_pool = NULL; fail_free_mbuf_pool: while (i--) dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, pool->elements[i].phys); kfree(pool->elements); fail_free_lpfc_mbuf_pool: dma_pool_destroy(phba->lpfc_mbuf_pool); phba->lpfc_mbuf_pool = NULL; fail: return -ENOMEM; } int lpfc_nvmet_mem_alloc(struct lpfc_hba *phba) { phba->lpfc_nvmet_drb_pool = dma_pool_create("lpfc_nvmet_drb_pool", &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE, SGL_ALIGN_SZ, 0); if (!phba->lpfc_nvmet_drb_pool) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "6024 Can't enable NVME Target - no memory\n"); return -ENOMEM; } return 0; } /** * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc * @phba: HBA to free memory for * * Description: Free the memory allocated by lpfc_mem_alloc routine. This * routine is a the counterpart of lpfc_mem_alloc. * * Returns: None **/ void lpfc_mem_free(struct lpfc_hba *phba) { int i; struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; struct lpfc_device_data *device_data; /* Free HBQ pools */ lpfc_sli_hbqbuf_free_all(phba); dma_pool_destroy(phba->lpfc_nvmet_drb_pool); phba->lpfc_nvmet_drb_pool = NULL; dma_pool_destroy(phba->lpfc_drb_pool); phba->lpfc_drb_pool = NULL; dma_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; dma_pool_destroy(phba->lpfc_hbq_pool); phba->lpfc_hbq_pool = NULL; mempool_destroy(phba->rrq_pool); phba->rrq_pool = NULL; /* Free NLP memory pool */ mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { mempool_destroy(phba->active_rrq_pool); phba->active_rrq_pool = NULL; } /* Free mbox memory pool */ mempool_destroy(phba->mbox_mem_pool); phba->mbox_mem_pool = NULL; /* Free MBUF memory pool */ for (i = 0; i < pool->current_count; i++) dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, pool->elements[i].phys); kfree(pool->elements); dma_pool_destroy(phba->lpfc_mbuf_pool); phba->lpfc_mbuf_pool = NULL; /* Free Device Data memory pool */ if (phba->device_data_mem_pool) { /* Ensure all objects have been returned to the pool */ while (!list_empty(&phba->luns)) { device_data = list_first_entry(&phba->luns, struct lpfc_device_data, listentry); list_del(&device_data->listentry); mempool_free(device_data, phba->device_data_mem_pool); } mempool_destroy(phba->device_data_mem_pool); } phba->device_data_mem_pool = NULL; return; } /** * lpfc_mem_free_all - Frees all PCI and driver memory * @phba: HBA to free memory for * * Description: Free memory from PCI and driver memory pools and also those * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees * the VPI bitmask. * * Returns: None **/ void lpfc_mem_free_all(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; LPFC_MBOXQ_t *mbox, *next_mbox; struct lpfc_dmabuf *mp; /* Free memory used in mailbox queue back to mailbox memory pool */ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); if (mp) { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } /* Free memory used in mailbox cmpl list back to mailbox memory pool */ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); if (mp) { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } /* Free the active mailbox command back to the mailbox memory pool */ spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irq(&phba->hbalock); if (psli->mbox_active) { mbox = psli->mbox_active; mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); if (mp) { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } mempool_free(mbox, phba->mbox_mem_pool); psli->mbox_active = NULL; } /* Free and destroy all the allocated memory pools */ lpfc_mem_free(phba); /* Free DMA buffer memory pool */ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); phba->lpfc_sg_dma_buf_pool = NULL; dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); phba->lpfc_cmd_rsp_buf_pool = NULL; /* Free Congestion Data buffer */ if (phba->cgn_i) { dma_free_coherent(&phba->pcidev->dev, sizeof(struct lpfc_cgn_info), phba->cgn_i->virt, phba->cgn_i->phys); kfree(phba->cgn_i); phba->cgn_i = NULL; } /* Free RX Monitor */ if (phba->rx_monitor) { lpfc_rx_monitor_destroy_ring(phba->rx_monitor); kfree(phba->rx_monitor); phba->rx_monitor = NULL; } /* Free the iocb lookup array */ kfree(psli->iocbq_lookup); psli->iocbq_lookup = NULL; return; } /** * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool * @phba: HBA which owns the pool to allocate from * @mem_flags: indicates if this is a priority (MEM_PRI) allocation * @handle: used to return the DMA-mapped address of the mbuf * * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool. * Allocates from generic dma_pool_alloc function first and if that fails and * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the * HBA's pool. * * Notes: Not interrupt-safe. Must be called with no locks held. Takes * phba->hbalock. * * Returns: * pointer to the allocated mbuf on success * NULL on failure **/ void * lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; unsigned long iflags; void *ret; ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); spin_lock_irqsave(&phba->hbalock, iflags); if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { pool->current_count--; ret = pool->elements[pool->current_count].virt; *handle = pool->elements[pool->current_count].phys; } spin_unlock_irqrestore(&phba->hbalock, iflags); return ret; } /** * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked) * @phba: HBA which owns the pool to return to * @virt: mbuf to free * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed * * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if * it is below its max_count, frees the mbuf otherwise. * * Notes: Must be called with phba->hbalock held to synchronize access to * lpfc_mbuf_safety_pool. * * Returns: None **/ void __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; if (pool->current_count < pool->max_count) { pool->elements[pool->current_count].virt = virt; pool->elements[pool->current_count].phys = dma; pool->current_count++; } else { dma_pool_free(phba->lpfc_mbuf_pool, virt, dma); } return; } /** * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked) * @phba: HBA which owns the pool to return to * @virt: mbuf to free * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed * * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if * it is below its max_count, frees the mbuf otherwise. * * Notes: Takes phba->hbalock. Can be called with or without other locks held. * * Returns: None **/ void lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) { unsigned long iflags; spin_lock_irqsave(&phba->hbalock, iflags); __lpfc_mbuf_free(phba, virt, dma); spin_unlock_irqrestore(&phba->hbalock, iflags); return; } /** * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the * lpfc_sg_dma_buf_pool PCI pool * @phba: HBA which owns the pool to allocate from * @mem_flags: indicates if this is a priority (MEM_PRI) allocation * @handle: used to return the DMA-mapped address of the nvmet_buf * * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool * PCI pool. Allocates from generic dma_pool_alloc function. * * Returns: * pointer to the allocated nvmet_buf on success * NULL on failure **/ void * lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) { void *ret; ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); return ret; } /** * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool * PCI pool * @phba: HBA which owns the pool to return to * @virt: nvmet_buf to free * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed * * Returns: None **/ void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma) { dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); } /** * lpfc_els_hbq_alloc - Allocate an HBQ buffer * @phba: HBA to allocate HBQ buffer for * * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI * pool along a non-DMA-mapped container for it. * * Notes: Not interrupt-safe. Must be called with no locks held. * * Returns: * pointer to HBQ on success * NULL on failure **/ struct hbq_dmabuf * lpfc_els_hbq_alloc(struct lpfc_hba *phba) { struct hbq_dmabuf *hbqbp; hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); if (!hbqbp) return NULL; hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, &hbqbp->dbuf.phys); if (!hbqbp->dbuf.virt) { kfree(hbqbp); return NULL; } hbqbp->total_size = LPFC_BPL_SIZE; return hbqbp; } /** * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc * @phba: HBA buffer was allocated for * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc * * Description: Frees both the container and the DMA-mapped buffer returned by * lpfc_els_hbq_alloc. * * Notes: Can be called with or without locks held. * * Returns: None **/ void lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) { dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); kfree(hbqbp); return; } /** * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer * @phba: HBA to allocate a receive buffer for * * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI * pool along a non-DMA-mapped container for it. * * Notes: Not interrupt-safe. Must be called with no locks held. * * Returns: * pointer to HBQ on success * NULL on failure **/ struct hbq_dmabuf * lpfc_sli4_rb_alloc(struct lpfc_hba *phba) { struct hbq_dmabuf *dma_buf; dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); if (!dma_buf) return NULL; dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, &dma_buf->hbuf.phys); if (!dma_buf->hbuf.virt) { kfree(dma_buf); return NULL; } dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, &dma_buf->dbuf.phys); if (!dma_buf->dbuf.virt) { dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, dma_buf->hbuf.phys); kfree(dma_buf); return NULL; } dma_buf->total_size = LPFC_DATA_BUF_SIZE; return dma_buf; } /** * lpfc_sli4_rb_free - Frees a receive buffer * @phba: HBA buffer was allocated for * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc * * Description: Frees both the container and the DMA-mapped buffers returned by * lpfc_sli4_rb_alloc. * * Notes: Can be called with or without locks held. * * Returns: None **/ void lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) { dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); kfree(dmab); } /** * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer * @phba: HBA to allocate a receive buffer for * * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI * pool along a non-DMA-mapped container for it. * * Returns: * pointer to HBQ on success * NULL on failure **/ struct rqb_dmabuf * lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) { struct rqb_dmabuf *dma_buf; dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL); if (!dma_buf) return NULL; dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, &dma_buf->hbuf.phys); if (!dma_buf->hbuf.virt) { kfree(dma_buf); return NULL; } dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool, GFP_KERNEL, &dma_buf->dbuf.phys); if (!dma_buf->dbuf.virt) { dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, dma_buf->hbuf.phys); kfree(dma_buf); return NULL; } dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; return dma_buf; } /** * lpfc_sli4_nvmet_free - Frees a receive buffer * @phba: HBA buffer was allocated for * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc * * Description: Frees both the container and the DMA-mapped buffers returned by * lpfc_sli4_nvmet_alloc. * * Notes: Can be called with or without locks held. * * Returns: None **/ void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) { dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); dma_pool_free(phba->lpfc_nvmet_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); kfree(dmab); } /** * lpfc_in_buf_free - Free a DMA buffer * @phba: HBA buffer is associated with * @mp: Buffer to free * * Description: Frees the given DMA buffer in the appropriate way given if the * HBA is running in SLI3 mode with HBQs enabled. * * Notes: Takes phba->hbalock. Can be called with or without other locks held. * * Returns: None **/ void lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) { struct hbq_dmabuf *hbq_entry; unsigned long flags; if (!mp) return; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); /* Check whether HBQ is still in use */ spin_lock_irqsave(&phba->hbalock, flags); if (!phba->hbq_in_use) { spin_unlock_irqrestore(&phba->hbalock, flags); return; } list_del(&hbq_entry->dbuf.list); if (hbq_entry->tag == -1) { (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) (phba, hbq_entry); } else { lpfc_sli_free_hbq(phba, hbq_entry); } spin_unlock_irqrestore(&phba->hbalock, flags); } else { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } return; } /** * lpfc_rq_buf_free - Free a RQ DMA buffer * @phba: HBA buffer is associated with * @mp: Buffer to free * * Description: Frees the given DMA buffer in the appropriate way given by * reposting it to its associated RQ so it can be reused. * * Notes: Takes phba->hbalock. Can be called with or without other locks held. * * Returns: None **/ void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) { struct lpfc_rqb *rqbp; struct lpfc_rqe hrqe; struct lpfc_rqe drqe; struct rqb_dmabuf *rqb_entry; unsigned long flags; int rc; if (!mp) return; rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf); rqbp = rqb_entry->hrq->rqbp; spin_lock_irqsave(&phba->hbalock, flags); list_del(&rqb_entry->hbuf.list); hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); if (rc < 0) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "6409 Cannot post to HRQ %d: %x %x %x " "DRQ %x %x\n", rqb_entry->hrq->queue_id, rqb_entry->hrq->host_index, rqb_entry->hrq->hba_index, rqb_entry->hrq->entry_count, rqb_entry->drq->host_index, rqb_entry->drq->hba_index); (rqbp->rqb_free_buffer)(phba, rqb_entry); } else { list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); rqbp->buffer_count++; } spin_unlock_irqrestore(&phba->hbalock, flags); }
linux-master
drivers/scsi/lpfc/lpfc_mem.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/ctype.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/aer.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc.h" #include "lpfc_scsi.h" #include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_version.h" #include "lpfc_compat.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_attr.h" #define LPFC_DEF_DEVLOSS_TMO 30 #define LPFC_MIN_DEVLOSS_TMO 1 #define LPFC_MAX_DEVLOSS_TMO 255 #define LPFC_MAX_INFO_TMP_LEN 100 #define LPFC_INFO_MORE_STR "\nCould be more info...\n" /* * Write key size should be multiple of 4. If write key is changed * make sure that library write key is also changed. */ #define LPFC_REG_WRITE_KEY_SIZE 4 #define LPFC_REG_WRITE_KEY "EMLX" const char *const trunk_errmsg[] = { /* map errcode */ "", /* There is no such error code at index 0*/ "link negotiated speed does not match existing" " trunk - link was \"low\" speed", "link negotiated speed does not match" " existing trunk - link was \"middle\" speed", "link negotiated speed does not match existing" " trunk - link was \"high\" speed", "Attached to non-trunking port - F_Port", "Attached to non-trunking port - N_Port", "FLOGI response timeout", "non-FLOGI frame received", "Invalid FLOGI response", "Trunking initialization protocol", "Trunk peer device mismatch", }; /** * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules * @incr: integer to convert. * @hdw: ascii string holding converted integer plus a string terminator. * * Description: * JEDEC Joint Electron Device Engineering Council. * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii * character string. The string is then terminated with a NULL in byte 9. * Hex 0-9 becomes ascii '0' to '9'. * Hex a-f becomes ascii '=' to 'B' capital B. * * Notes: * Coded for 32 bit integers only. **/ static void lpfc_jedec_to_ascii(int incr, char hdw[]) { int i, j; for (i = 0; i < 8; i++) { j = (incr & 0xf); if (j <= 9) hdw[7 - i] = 0x30 + j; else hdw[7 - i] = 0x61 + j - 10; incr = (incr >> 4); } hdw[8] = 0; return; } static ssize_t lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_cgn_info *cp = NULL; struct lpfc_cgn_stat *cgs; int len = 0; int cpu; u64 rcv, total; char tmp[LPFC_MAX_INFO_TMP_LEN] = {0}; if (phba->cgn_i) cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; scnprintf(tmp, sizeof(tmp), "Congestion Mgmt Info: E2Eattr %d Ver %d " "CMF %d cnt %d\n", phba->sli4_hba.pc_sli4_params.mi_cap, cp ? cp->cgn_info_version : 0, phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; if (!phba->sli4_hba.pc_sli4_params.cmf) goto buffer_done; switch (phba->cgn_init_reg_signal) { case EDC_CG_SIG_WARN_ONLY: scnprintf(tmp, sizeof(tmp), "Register: Init: Signal:WARN "); break; case EDC_CG_SIG_WARN_ALARM: scnprintf(tmp, sizeof(tmp), "Register: Init: Signal:WARN|ALARM "); break; default: scnprintf(tmp, sizeof(tmp), "Register: Init: Signal:NONE "); break; } if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; switch (phba->cgn_init_reg_fpin) { case LPFC_CGN_FPIN_WARN: scnprintf(tmp, sizeof(tmp), "FPIN:WARN\n"); break; case LPFC_CGN_FPIN_ALARM: scnprintf(tmp, sizeof(tmp), "FPIN:ALARM\n"); break; case LPFC_CGN_FPIN_BOTH: scnprintf(tmp, sizeof(tmp), "FPIN:WARN|ALARM\n"); break; default: scnprintf(tmp, sizeof(tmp), "FPIN:NONE\n"); break; } if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; switch (phba->cgn_reg_signal) { case EDC_CG_SIG_WARN_ONLY: scnprintf(tmp, sizeof(tmp), " Current: Signal:WARN "); break; case EDC_CG_SIG_WARN_ALARM: scnprintf(tmp, sizeof(tmp), " Current: Signal:WARN|ALARM "); break; default: scnprintf(tmp, sizeof(tmp), " Current: Signal:NONE "); break; } if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; switch (phba->cgn_reg_fpin) { case LPFC_CGN_FPIN_WARN: scnprintf(tmp, sizeof(tmp), "FPIN:WARN ACQEcnt:%d\n", phba->cgn_acqe_cnt); break; case LPFC_CGN_FPIN_ALARM: scnprintf(tmp, sizeof(tmp), "FPIN:ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt); break; case LPFC_CGN_FPIN_BOTH: scnprintf(tmp, sizeof(tmp), "FPIN:WARN|ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt); break; default: scnprintf(tmp, sizeof(tmp), "FPIN:NONE ACQEcnt:%d\n", phba->cgn_acqe_cnt); break; } if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) { switch (phba->cmf_active_mode) { case LPFC_CFG_OFF: scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n"); break; case LPFC_CFG_MANAGED: scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n"); break; case LPFC_CFG_MONITOR: scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n"); break; default: scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n"); } if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; } switch (phba->cgn_p.cgn_param_mode) { case LPFC_CFG_OFF: scnprintf(tmp, sizeof(tmp), "Config: Mode:Off "); break; case LPFC_CFG_MANAGED: scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed "); break; case LPFC_CFG_MONITOR: scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor "); break; default: scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown "); } if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; total = 0; rcv = 0; for_each_present_cpu(cpu) { cgs = per_cpu_ptr(phba->cmf_stat, cpu); total += atomic64_read(&cgs->total_bytes); rcv += atomic64_read(&cgs->rcv_bytes); } scnprintf(tmp, sizeof(tmp), "IObusy:%d Info:%d Bytes: Rcv:x%llx Total:x%llx\n", atomic_read(&phba->cmf_busy), phba->cmf_active_info, rcv, total); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "Port_speed:%d Link_byte_cnt:%ld " "Max_byte_per_interval:%ld\n", lpfc_sli_port_speed_get(phba), (unsigned long)phba->cmf_link_byte_count, (unsigned long)phba->cmf_max_bytes_per_interval); strlcat(buf, tmp, PAGE_SIZE); buffer_done: len = strnlen(buf, PAGE_SIZE); if (unlikely(len >= (PAGE_SIZE - 1))) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6312 Catching potential buffer " "overflow > PAGE_SIZE = %lu bytes\n", PAGE_SIZE); strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR), LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1); } return len; } /** * lpfc_drvr_version_show - Return the Emulex driver string with version number * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); } /** * lpfc_enable_fip_show - Return the fip mode of the HBA * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->hba_flag & HBA_FIP_SUPPORT) return scnprintf(buf, PAGE_SIZE, "1\n"); else return scnprintf(buf, PAGE_SIZE, "0\n"); } static ssize_t lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = shost_priv(shost); struct lpfc_hba *phba = vport->phba; struct lpfc_nvmet_tgtport *tgtp; struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; struct lpfc_nvme_rport *rport; struct lpfc_nodelist *ndlp; struct nvme_fc_remote_port *nrport; struct lpfc_fc4_ctrl_stat *cstat; uint64_t data1, data2, data3; uint64_t totin, totout, tot; char *statep; int i; int len = 0; char tmp[LPFC_MAX_INFO_TMP_LEN] = {0}; if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); return len; } if (phba->nvmet_support) { if (!phba->targetport) { len = scnprintf(buf, PAGE_SIZE, "NVME Target: x%llx is not allocated\n", wwn_to_u64(vport->fc_portname.u.wwn)); return len; } /* Port state is only one of two values for now. */ if (phba->targetport->port_id) statep = "REGISTERED"; else statep = "INIT"; scnprintf(tmp, sizeof(tmp), "NVME Target Enabled State %s\n", statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "%s%d WWPN x%llx WWNN x%llx DID x%06x\n", "NVME Target: lpfc", phba->brd_no, wwn_to_u64(vport->fc_portname.u.wwn), wwn_to_u64(vport->fc_nodename.u.wwn), phba->targetport->port_id); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; scnprintf(tmp, sizeof(tmp), "LS: Rcv %08x Drop %08x Abort %08x\n", atomic_read(&tgtp->rcv_ls_req_in), atomic_read(&tgtp->rcv_ls_req_drop), atomic_read(&tgtp->xmt_ls_abort)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; if (atomic_read(&tgtp->rcv_ls_req_in) != atomic_read(&tgtp->rcv_ls_req_out)) { scnprintf(tmp, sizeof(tmp), "Rcv LS: in %08x != out %08x\n", atomic_read(&tgtp->rcv_ls_req_in), atomic_read(&tgtp->rcv_ls_req_out)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; } scnprintf(tmp, sizeof(tmp), "LS: Xmt %08x Drop %08x Cmpl %08x\n", atomic_read(&tgtp->xmt_ls_rsp), atomic_read(&tgtp->xmt_ls_drop), atomic_read(&tgtp->xmt_ls_rsp_cmpl)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "LS: RSP Abort %08x xb %08x Err %08x\n", atomic_read(&tgtp->xmt_ls_rsp_aborted), atomic_read(&tgtp->xmt_ls_rsp_xb_set), atomic_read(&tgtp->xmt_ls_rsp_error)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "FCP: Rcv %08x Defer %08x Release %08x " "Drop %08x\n", atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_defer), atomic_read(&tgtp->xmt_fcp_release), atomic_read(&tgtp->rcv_fcp_cmd_drop)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; if (atomic_read(&tgtp->rcv_fcp_cmd_in) != atomic_read(&tgtp->rcv_fcp_cmd_out)) { scnprintf(tmp, sizeof(tmp), "Rcv FCP: in %08x != out %08x\n", atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_out)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; } scnprintf(tmp, sizeof(tmp), "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x " "drop %08x\n", atomic_read(&tgtp->xmt_fcp_read), atomic_read(&tgtp->xmt_fcp_read_rsp), atomic_read(&tgtp->xmt_fcp_write), atomic_read(&tgtp->xmt_fcp_rsp), atomic_read(&tgtp->xmt_fcp_drop)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "FCP Rsp Cmpl: %08x err %08x drop %08x\n", atomic_read(&tgtp->xmt_fcp_rsp_cmpl), atomic_read(&tgtp->xmt_fcp_rsp_error), atomic_read(&tgtp->xmt_fcp_rsp_drop)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "FCP Rsp Abort: %08x xb %08x xricqe %08x\n", atomic_read(&tgtp->xmt_fcp_rsp_aborted), atomic_read(&tgtp->xmt_fcp_rsp_xb_set), atomic_read(&tgtp->xmt_fcp_xri_abort_cqe)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "ABORT: Xmt %08x Cmpl %08x\n", atomic_read(&tgtp->xmt_fcp_abort), atomic_read(&tgtp->xmt_fcp_abort_cmpl)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n", atomic_read(&tgtp->xmt_abort_sol), atomic_read(&tgtp->xmt_abort_unsol), atomic_read(&tgtp->xmt_abort_rsp), atomic_read(&tgtp->xmt_abort_rsp_error)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "DELAY: ctx %08x fod %08x wqfull %08x\n", atomic_read(&tgtp->defer_ctx), atomic_read(&tgtp->defer_fod), atomic_read(&tgtp->defer_wqfull)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; /* Calculate outstanding IOs */ tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); tot += atomic_read(&tgtp->xmt_fcp_release); tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; scnprintf(tmp, sizeof(tmp), "IO_CTX: %08x WAIT: cur %08x tot %08x\n" "CTX Outstanding %08llx\n\n", phba->sli4_hba.nvmet_xri_cnt, phba->sli4_hba.nvmet_io_wait_cnt, phba->sli4_hba.nvmet_io_wait_total, tot); strlcat(buf, tmp, PAGE_SIZE); goto buffer_done; } localport = vport->localport; if (!localport) { len = scnprintf(buf, PAGE_SIZE, "NVME Initiator x%llx is not allocated\n", wwn_to_u64(vport->fc_portname.u.wwn)); return len; } lport = (struct lpfc_nvme_lport *)localport->private; if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "XRI Dist lpfc%d Total %d IO %d ELS %d\n", phba->brd_no, phba->sli4_hba.max_cfg_param.max_xri, phba->sli4_hba.io_xri_max, lpfc_sli4_get_els_iocb_cnt(phba)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; /* Port state is only one of two values for now. */ if (localport->port_id) statep = "ONLINE"; else statep = "UNKNOWN "; scnprintf(tmp, sizeof(tmp), "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n", "NVME LPORT lpfc", phba->brd_no, wwn_to_u64(vport->fc_portname.u.wwn), wwn_to_u64(vport->fc_nodename.u.wwn), localport->port_id, statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { nrport = NULL; spin_lock(&ndlp->lock); rport = lpfc_ndlp_get_nrport(ndlp); if (rport) nrport = rport->remoteport; spin_unlock(&ndlp->lock); if (!nrport) continue; /* Port state is only one of two values for now. */ switch (nrport->port_state) { case FC_OBJSTATE_ONLINE: statep = "ONLINE"; break; case FC_OBJSTATE_UNKNOWN: statep = "UNKNOWN "; break; default: statep = "UNSUPPORTED"; break; } /* Tab in to show lport ownership. */ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) goto unlock_buf_done; if (phba->brd_no >= 10) { if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) goto unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", nrport->port_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto unlock_buf_done; scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", nrport->node_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto unlock_buf_done; scnprintf(tmp, sizeof(tmp), "DID x%06x ", nrport->port_id); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto unlock_buf_done; /* An NVME rport can have multiple roles. */ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) goto unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) goto unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) goto unlock_buf_done; } if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | FC_PORT_ROLE_NVME_DISCOVERY)) { scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", nrport->port_role); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "%s\n", statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto unlock_buf_done; } spin_unlock_irq(shost->host_lock); if (!lport) goto buffer_done; if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "LS: Xmt %010x Cmpl %010x Abort %08x\n", atomic_read(&lport->fc4NvmeLsRequests), atomic_read(&lport->fc4NvmeLsCmpls), atomic_read(&lport->xmt_ls_abort)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n", atomic_read(&lport->xmt_ls_err), atomic_read(&lport->cmpl_ls_xb), atomic_read(&lport->cmpl_ls_err)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; totin = 0; totout = 0; for (i = 0; i < phba->cfg_hdw_queue; i++) { cstat = &phba->sli4_hba.hdwq[i].nvme_cstat; tot = cstat->io_cmpls; totin += tot; data1 = cstat->input_requests; data2 = cstat->output_requests; data3 = cstat->control_requests; totout += (data1 + data2 + data3); } scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx " "OutIO %016llx\n", totin, totout, totout - totin); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "\tabort %08x noxri %08x nondlp %08x qdepth %08x " "wqerr %08x err %08x\n", atomic_read(&lport->xmt_fcp_abort), atomic_read(&lport->xmt_fcp_noxri), atomic_read(&lport->xmt_fcp_bad_ndlp), atomic_read(&lport->xmt_fcp_qdepth), atomic_read(&lport->xmt_fcp_wqerr), atomic_read(&lport->xmt_fcp_err)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "FCP CMPL: xb %08x Err %08x\n", atomic_read(&lport->cmpl_fcp_xb), atomic_read(&lport->cmpl_fcp_err)); strlcat(buf, tmp, PAGE_SIZE); /* host_lock is already unlocked. */ goto buffer_done; unlock_buf_done: spin_unlock_irq(shost->host_lock); buffer_done: len = strnlen(buf, PAGE_SIZE); if (unlikely(len >= (PAGE_SIZE - 1))) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME, "6314 Catching potential buffer " "overflow > PAGE_SIZE = %lu bytes\n", PAGE_SIZE); strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR), LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1); } return len; } static ssize_t lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = shost_priv(shost); struct lpfc_hba *phba = vport->phba; int len; struct lpfc_fc4_ctrl_stat *cstat; u64 data1, data2, data3; u64 tot, totin, totout; int i; char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) || (phba->sli_rev != LPFC_SLI_REV4)) return 0; scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n"); totin = 0; totout = 0; for (i = 0; i < phba->cfg_hdw_queue; i++) { cstat = &phba->sli4_hba.hdwq[i].scsi_cstat; tot = cstat->io_cmpls; totin += tot; data1 = cstat->input_requests; data2 = cstat->output_requests; data3 = cstat->control_requests; totout += (data1 + data2 + data3); scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx " "IO %016llx ", i, data1, data2, data3); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n", tot, ((data1 + data2 + data3) - tot)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; } scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx " "OutIO %016llx\n", totin, totout, totout - totin); strlcat(buf, tmp, PAGE_SIZE); buffer_done: len = strnlen(buf, PAGE_SIZE); return len; } static ssize_t lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->cfg_enable_bg) { if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) return scnprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n"); else return scnprintf(buf, PAGE_SIZE, "BlockGuard Not Supported\n"); } else return scnprintf(buf, PAGE_SIZE, "BlockGuard Disabled\n"); } static ssize_t lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)phba->bg_guard_err_cnt); } static ssize_t lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)phba->bg_apptag_err_cnt); } static ssize_t lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)phba->bg_reftag_err_cnt); } /** * lpfc_info_show - Return some pci info about the host in ascii * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted text from lpfc_info(). * * Returns: size of formatted string. **/ static ssize_t lpfc_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host)); } /** * lpfc_serialnum_show - Return the hba serial number in ascii * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted text serial number. * * Returns: size of formatted string. **/ static ssize_t lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber); } /** * lpfc_temp_sensor_show - Return the temperature sensor level * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted support level. * * Description: * Returns a number indicating the temperature sensor level currently * supported, zero or one in ascii. * * Returns: size of formatted string. **/ static ssize_t lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support); } /** * lpfc_modeldesc_show - Return the model description of the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd model description. * * Returns: size of formatted string. **/ static ssize_t lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc); } /** * lpfc_modelname_show - Return the model name of the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd model name. * * Returns: size of formatted string. **/ static ssize_t lpfc_modelname_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName); } /** * lpfc_programtype_show - Return the program type of the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd program type. * * Returns: size of formatted string. **/ static ssize_t lpfc_programtype_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType); } /** * lpfc_vportnum_show - Return the port number in ascii of the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains scsi vpd program type. * * Returns: size of formatted string. **/ static ssize_t lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port); } /** * lpfc_fwrev_show - Return the firmware rev running in the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd program type. * * Returns: size of formatted string. **/ static ssize_t lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t if_type; uint8_t sli_family; char fwrev[FW_REV_STR_SIZE]; int len; lpfc_decode_firmware_rev(phba, fwrev, 1); if_type = phba->sli4_hba.pc_sli4_params.if_type; sli_family = phba->sli4_hba.pc_sli4_params.sli_family; if (phba->sli_rev < LPFC_SLI_REV4) len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev); else len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n", fwrev, phba->sli_rev, if_type, sli_family); return len; } /** * lpfc_hdw_show - Return the jedec information about the hba * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the scsi vpd program type. * * Returns: size of formatted string. **/ static ssize_t lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) { char hdw[9]; struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; lpfc_vpd_t *vp = &phba->vpd; lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw, vp->rev.smRev, vp->rev.smFwRev); } /** * lpfc_option_rom_version_show - Return the adapter ROM FCode version * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the ROM and FCode ascii strings. * * Returns: size of formatted string. **/ static ssize_t lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; char fwrev[FW_REV_STR_SIZE]; if (phba->sli_rev < LPFC_SLI_REV4) return scnprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); lpfc_decode_firmware_rev(phba, fwrev, 1); return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev); } /** * lpfc_link_state_show - Return the link state of the port * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains text describing the state of the link. * * Notes: * The switch statement has no default so zero will be returned. * * Returns: size of formatted string. **/ static ssize_t lpfc_link_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int len = 0; switch (phba->link_state) { case LPFC_LINK_UNKNOWN: case LPFC_WARM_START: case LPFC_INIT_START: case LPFC_INIT_MBX_CMDS: case LPFC_LINK_DOWN: case LPFC_HBA_ERROR: if (phba->hba_flag & LINK_DISABLED) len += scnprintf(buf + len, PAGE_SIZE-len, "Link Down - User disabled\n"); else len += scnprintf(buf + len, PAGE_SIZE-len, "Link Down\n"); break; case LPFC_LINK_UP: case LPFC_CLEAR_LA: case LPFC_HBA_READY: len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - "); switch (vport->port_state) { case LPFC_LOCAL_CFG_LINK: len += scnprintf(buf + len, PAGE_SIZE-len, "Configuring Link\n"); break; case LPFC_FDISC: case LPFC_FLOGI: case LPFC_FABRIC_CFG_LINK: case LPFC_NS_REG: case LPFC_NS_QRY: case LPFC_BUILD_DISC_LIST: case LPFC_DISC_AUTH: len += scnprintf(buf + len, PAGE_SIZE - len, "Discovery\n"); break; case LPFC_VPORT_READY: len += scnprintf(buf + len, PAGE_SIZE - len, "Ready\n"); break; case LPFC_VPORT_FAILED: len += scnprintf(buf + len, PAGE_SIZE - len, "Failed\n"); break; case LPFC_VPORT_UNKNOWN: len += scnprintf(buf + len, PAGE_SIZE - len, "Unknown\n"); break; } if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { if (vport->fc_flag & FC_PUBLIC_LOOP) len += scnprintf(buf + len, PAGE_SIZE-len, " Public Loop\n"); else len += scnprintf(buf + len, PAGE_SIZE-len, " Private Loop\n"); } else { if (vport->fc_flag & FC_FABRIC) { if (phba->sli_rev == LPFC_SLI_REV4 && vport->port_type == LPFC_PHYSICAL_PORT && phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) len += scnprintf(buf + len, PAGE_SIZE - len, " Fabric FA-PWWN\n"); else len += scnprintf(buf + len, PAGE_SIZE - len, " Fabric\n"); } else { len += scnprintf(buf + len, PAGE_SIZE-len, " Point-2-Point\n"); } } } if ((phba->sli_rev == LPFC_SLI_REV4) && ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_6))) { struct lpfc_trunk_link link = phba->trunk_link; if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) len += scnprintf(buf + len, PAGE_SIZE - len, "Trunk port 0: Link %s %s\n", (link.link0.state == LPFC_LINK_UP) ? "Up" : "Down. ", trunk_errmsg[link.link0.fault]); if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) len += scnprintf(buf + len, PAGE_SIZE - len, "Trunk port 1: Link %s %s\n", (link.link1.state == LPFC_LINK_UP) ? "Up" : "Down. ", trunk_errmsg[link.link1.fault]); if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) len += scnprintf(buf + len, PAGE_SIZE - len, "Trunk port 2: Link %s %s\n", (link.link2.state == LPFC_LINK_UP) ? "Up" : "Down. ", trunk_errmsg[link.link2.fault]); if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) len += scnprintf(buf + len, PAGE_SIZE - len, "Trunk port 3: Link %s %s\n", (link.link3.state == LPFC_LINK_UP) ? "Up" : "Down. ", trunk_errmsg[link.link3.fault]); } return len; } /** * lpfc_sli4_protocol_show - Return the fip mode of the HBA * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->sli_rev < LPFC_SLI_REV4) return scnprintf(buf, PAGE_SIZE, "fc\n"); if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) { if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE) return scnprintf(buf, PAGE_SIZE, "fcoe\n"); if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) return scnprintf(buf, PAGE_SIZE, "fc\n"); } return scnprintf(buf, PAGE_SIZE, "unknown\n"); } /** * lpfc_oas_supported_show - Return whether or not Optimized Access Storage * (OAS) is supported. * @dev: class unused variable. * @attr: device attribute, not used. * @buf: on return contains the module description text. * * Returns: size of formatted string. **/ static ssize_t lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%d\n", phba->sli4_hba.pc_sli4_params.oas_supported); } /** * lpfc_link_state_store - Transition the link_state on an HBA port * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: one or more lpfc_polling_flags values. * @count: not used. * * Returns: * -EINVAL if the buffer is not "up" or "down" * return from link state change function if non-zero * length of the buf on success **/ static ssize_t lpfc_link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int status = -EINVAL; if ((strncmp(buf, "up", sizeof("up") - 1) == 0) && (phba->link_state == LPFC_LINK_DOWN)) status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) && (phba->link_state >= LPFC_LINK_UP)) status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT); if (status == 0) return strlen(buf); else return status; } /** * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the sum of fc mapped and unmapped. * * Description: * Returns the ascii text number of the sum of the fc mapped and unmapped * vport counts. * * Returns: size of formatted string. **/ static ssize_t lpfc_num_discovered_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; return scnprintf(buf, PAGE_SIZE, "%d\n", vport->fc_map_cnt + vport->fc_unmap_cnt); } /** * lpfc_issue_lip - Misnomer, name carried over from long ago * @shost: Scsi_Host pointer. * * Description: * Bring the link down gracefully then re-init the link. The firmware will * re-init the fiber channel interface as required. Does not issue a LIP. * * Returns: * -EPERM port offline or management commands are being blocked * -ENOMEM cannot allocate memory for the mailbox command * -EIO error sending the mailbox command * zero for success **/ static int lpfc_issue_lip(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *pmboxq; int mbxstatus = MBXERR_ERROR; /* * If the link is offline, disabled or BLOCK_MGMT_IO * it doesn't make any sense to allow issue_lip */ if ((vport->fc_flag & FC_OFFLINE_MODE) || (phba->hba_flag & LINK_DISABLED) || (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) return -EPERM; pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); if (!pmboxq) return -ENOMEM; memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; pmboxq->u.mb.mbxOwner = OWN_HOST; if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME)) vport->fc_flag &= ~FC_PT2PT_NO_NVME; mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0 || pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) { memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "2859 SLI authentication is required " "for INIT_LINK but has not done yet\n"); } lpfc_set_loopback_flag(phba); if (mbxstatus != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); if (mbxstatus == MBXERR_ERROR) return -EIO; return 0; } int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock) { int cnt = 0; spin_lock_irq(lock); while (!list_empty(q)) { spin_unlock_irq(lock); msleep(20); if (cnt++ > 250) { /* 5 secs */ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0466 Outstanding IO when " "bringing Adapter offline\n"); return 0; } spin_lock_irq(lock); } spin_unlock_irq(lock); return 1; } /** * lpfc_do_offline - Issues a mailbox command to bring the link down * @phba: lpfc_hba pointer. * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL. * * Notes: * Assumes any error from lpfc_do_offline() will be negative. * Can wait up to 5 seconds for the port ring buffers count * to reach zero, prints a warning if it is not zero and continues. * lpfc_workq_post_event() returns a non-zero return code if call fails. * * Returns: * -EIO error posting the event * zero for success **/ static int lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) { struct completion online_compl; struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; struct lpfc_sli *psli; int status = 0; int i; int rc; init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_OFFLINE_PREP); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); if (status != 0) return -EIO; psli = &phba->sli; /* * If freeing the queues have already started, don't access them. * Otherwise set FREE_WAIT to indicate that queues are being used * to hold the freeing process until we finish. */ spin_lock_irq(&phba->hbalock); if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) { psli->sli_flag |= LPFC_QUEUE_FREE_WAIT; } else { spin_unlock_irq(&phba->hbalock); goto skip_wait; } spin_unlock_irq(&phba->hbalock); /* Wait a little for things to settle down, but not * long enough for dev loss timeout to expire. */ if (phba->sli_rev != LPFC_SLI_REV4) { for (i = 0; i < psli->num_rings; i++) { pring = &psli->sli3_ring[i]; if (!lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock)) goto out; } } else { list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { pring = qp->pring; if (!pring) continue; if (!lpfc_emptyq_wait(phba, &pring->txcmplq, &pring->ring_lock)) goto out; } } out: spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT; spin_unlock_irq(&phba->hbalock); skip_wait: init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, type); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); if (status != 0) return -EIO; return 0; } /** * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA * @phba: lpfc_hba pointer. * * Description: * Issues a PCI secondary bus reset for the phba->pcidev. * * Notes: * First walks the bus_list to ensure only PCI devices with Emulex * vendor id, device ids that support hot reset, only one occurrence * of function 0, and all ports on the bus are in offline mode to ensure the * hot reset only affects one valid HBA. * * Returns: * -ENOTSUPP, cfg_enable_hba_reset must be of value 2 * -ENODEV, NULL ptr to pcidev * -EBADSLT, detected invalid device * -EBUSY, port is not in offline state * 0, successful */ static int lpfc_reset_pci_bus(struct lpfc_hba *phba) { struct pci_dev *pdev = phba->pcidev; struct Scsi_Host *shost = NULL; struct lpfc_hba *phba_other = NULL; struct pci_dev *ptr = NULL; int res; if (phba->cfg_enable_hba_reset != 2) return -ENOTSUPP; if (!pdev) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n"); return -ENODEV; } res = lpfc_check_pci_resettable(phba); if (res) return res; /* Walk the list of devices on the pci_dev's bus */ list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { /* Check port is offline */ shost = pci_get_drvdata(ptr); if (shost) { phba_other = ((struct lpfc_vport *)shost->hostdata)->phba; if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) { lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT, "8349 WWPN = 0x%02x%02x%02x%02x" "%02x%02x%02x%02x is not " "offline!\n", phba_other->wwpn[0], phba_other->wwpn[1], phba_other->wwpn[2], phba_other->wwpn[3], phba_other->wwpn[4], phba_other->wwpn[5], phba_other->wwpn[6], phba_other->wwpn[7]); return -EBUSY; } } } /* Issue PCI bus reset */ res = pci_reset_bus(pdev); if (res) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "8350 PCI reset bus failed: %d\n", res); } return res; } /** * lpfc_selective_reset - Offline then onlines the port * @phba: lpfc_hba pointer. * * Description: * If the port is configured to allow a reset then the hba is brought * offline then online. * * Notes: * Assumes any error from lpfc_do_offline() will be negative. * Do not make this function static. * * Returns: * lpfc_do_offline() return code if not zero * -EIO reset not configured or error posting the event * zero for success **/ int lpfc_selective_reset(struct lpfc_hba *phba) { struct completion online_compl; int status = 0; int rc; if (!phba->cfg_enable_hba_reset) return -EACCES; if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) { status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (status != 0) return status; } init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); if (rc == 0) return -ENOMEM; wait_for_completion(&online_compl); if (status != 0) return -EIO; return 0; } /** * lpfc_issue_reset - Selectively resets an adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing the string "selective". * @count: unused variable. * * Description: * If the buf contains the string "selective" then lpfc_selective_reset() * is called to perform the reset. * * Notes: * Assumes any error from lpfc_selective_reset() will be negative. * If lpfc_selective_reset() returns zero then the length of the buffer * is returned which indicates success * * Returns: * -EINVAL if the buffer does not contain the string "selective" * length of buf if lpfc-selective_reset() if the call succeeds * return value of lpfc_selective_reset() if the call fails **/ static ssize_t lpfc_issue_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int status = -EINVAL; if (!phba->cfg_enable_hba_reset) return -EACCES; if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) status = phba->lpfc_selective_reset(phba); if (status == 0) return strlen(buf); else return status; } /** * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness * @phba: lpfc_hba pointer. * * Description: * SLI4 interface type-2 device to wait on the sliport status register for * the readyness after performing a firmware reset. * * Returns: * zero for success, -EPERM when port does not have privilege to perform the * reset, -EIO when port timeout from recovering from the reset. * * Note: * As the caller will interpret the return code by value, be careful in making * change or addition to return codes. **/ int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) { struct lpfc_register portstat_reg = {0}; int i; msleep(100); if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0)) return -EIO; /* verify if privileged for the request operation */ if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) && !bf_get(lpfc_sliport_status_err, &portstat_reg)) return -EPERM; /* There is no point to wait if the port is in an unrecoverable * state. */ if (lpfc_sli4_unrecoverable_port(&portstat_reg)) return -EIO; /* wait for the SLI port firmware ready after firmware reset */ for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { msleep(10); if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0)) continue; if (!bf_get(lpfc_sliport_status_err, &portstat_reg)) continue; if (!bf_get(lpfc_sliport_status_rn, &portstat_reg)) continue; if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg)) continue; break; } if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT) return 0; else return -EIO; } /** * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc * @phba: lpfc_hba pointer. * @opcode: The sli4 config command opcode. * * Description: * Request SLI4 interface type-2 device to perform a physical register set * access. * * Returns: * zero for success **/ static ssize_t lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) { struct completion online_compl; struct pci_dev *pdev = phba->pcidev; uint32_t before_fc_flag; uint32_t sriov_nr_virtfn; uint32_t reg_val; int status = 0, rc = 0; int job_posted = 1, sriov_err; if (!phba->cfg_enable_hba_reset) return -EACCES; if ((phba->sli_rev < LPFC_SLI_REV4) || (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2)) return -EPERM; /* Keep state if we need to restore back */ before_fc_flag = phba->pport->fc_flag; sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn; if (opcode == LPFC_FW_DUMP) { init_completion(&online_compl); phba->fw_dump_cmpl = &online_compl; } else { /* Disable SR-IOV virtual functions if enabled */ if (phba->cfg_sriov_nr_virtfn) { pci_disable_sriov(pdev); phba->cfg_sriov_nr_virtfn = 0; } status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (status != 0) return status; /* wait for the device to be quiesced before firmware reset */ msleep(100); } reg_val = readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); if (opcode == LPFC_FW_DUMP) reg_val |= LPFC_FW_DUMP_REQUEST; else if (opcode == LPFC_FW_RESET) reg_val |= LPFC_CTL_PDEV_CTL_FRST; else if (opcode == LPFC_DV_RESET) reg_val |= LPFC_CTL_PDEV_CTL_DRST; writel(reg_val, phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); /* flush */ readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); /* delay driver action following IF_TYPE_2 reset */ rc = lpfc_sli4_pdev_status_reg_wait(phba); if (rc == -EPERM) { /* no privilege for reset */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3150 No privilege to perform the requested " "access: x%x\n", reg_val); } else if (rc == -EIO) { /* reset failed, there is nothing more we can do */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3153 Fail to perform the requested " "access: x%x\n", reg_val); if (phba->fw_dump_cmpl) phba->fw_dump_cmpl = NULL; return rc; } /* keep the original port state */ if (before_fc_flag & FC_OFFLINE_MODE) { if (phba->fw_dump_cmpl) phba->fw_dump_cmpl = NULL; goto out; } /* Firmware dump will trigger an HA_ERATT event, and * lpfc_handle_eratt_s4 routine already handles bringing the port back * online. */ if (opcode == LPFC_FW_DUMP) { wait_for_completion(phba->fw_dump_cmpl); } else { init_completion(&online_compl); job_posted = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); if (!job_posted) goto out; wait_for_completion(&online_compl); } out: /* in any case, restore the virtual functions enabled as before */ if (sriov_nr_virtfn) { /* If fw_dump was performed, first disable to clean up */ if (opcode == LPFC_FW_DUMP) { pci_disable_sriov(pdev); phba->cfg_sriov_nr_virtfn = 0; } sriov_err = lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn); if (!sriov_err) phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn; } /* return proper error code */ if (!rc) { if (!job_posted) rc = -ENOMEM; else if (status) rc = -EIO; } return rc; } /** * lpfc_nport_evt_cnt_show - Return the number of nport events * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the ascii number of nport events. * * Returns: size of formatted string. **/ static ssize_t lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); } static int lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out) { LPFC_MBOXQ_t *mbox = NULL; unsigned long val = 0; char *pval = NULL; int rc = 0; if (!strncmp("enable", buff_out, strlen("enable"))) { pval = buff_out + strlen("enable") + 1; rc = kstrtoul(pval, 0, &val); if (rc) return rc; /* Invalid number */ } else if (!strncmp("disable", buff_out, strlen("disable"))) { val = 0; } else { return -EINVAL; /* Invalid command */ } switch (val) { case 0: val = 0x0; /* Disable */ break; case 2: val = 0x1; /* Enable two port trunk */ break; case 4: val = 0x2; /* Enable four port trunk */ break; default: return -EINVAL; } lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0070 Set trunk mode with val %ld ", val); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE, 12, LPFC_SLI4_MBX_EMBED); bf_set(lpfc_mbx_set_trunk_mode, &mbox->u.mqe.un.set_trunk_mode, val); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); if (rc) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0071 Set trunk mode failed with status: %d", rc); mempool_free(mbox, phba->mbox_mem_pool); return 0; } static ssize_t lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; int rc; int len = 0; struct lpfc_rdp_context *rdp_context; u16 temperature; u16 rx_power; u16 tx_bias; u16 tx_power; u16 vcc; char chbuf[128]; u16 wavelength = 0; struct sff_trasnceiver_codes_byte7 *trasn_code_byte7; /* Get transceiver information */ rdp_context = kmalloc(sizeof(*rdp_context), GFP_KERNEL); rc = lpfc_get_sfp_info_wait(phba, rdp_context); if (rc) { len = scnprintf(buf, PAGE_SIZE - len, "SFP info NA:\n"); goto out_free_rdp; } strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_NAME], 16); len = scnprintf(buf, PAGE_SIZE - len, "VendorName:\t%s\n", chbuf); len += scnprintf(buf + len, PAGE_SIZE - len, "VendorOUI:\t%02x-%02x-%02x\n", (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI], (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 1], (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 2]); strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_PN], 16); len += scnprintf(buf + len, PAGE_SIZE - len, "VendorPN:\t%s\n", chbuf); strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_SN], 16); len += scnprintf(buf + len, PAGE_SIZE - len, "VendorSN:\t%s\n", chbuf); strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_REV], 4); len += scnprintf(buf + len, PAGE_SIZE - len, "VendorRev:\t%s\n", chbuf); strscpy(chbuf, &rdp_context->page_a0[SSF_DATE_CODE], 8); len += scnprintf(buf + len, PAGE_SIZE - len, "DateCode:\t%s\n", chbuf); len += scnprintf(buf + len, PAGE_SIZE - len, "Identifier:\t%xh\n", (uint8_t)rdp_context->page_a0[SSF_IDENTIFIER]); len += scnprintf(buf + len, PAGE_SIZE - len, "ExtIdentifier:\t%xh\n", (uint8_t)rdp_context->page_a0[SSF_EXT_IDENTIFIER]); len += scnprintf(buf + len, PAGE_SIZE - len, "Connector:\t%xh\n", (uint8_t)rdp_context->page_a0[SSF_CONNECTOR]); wavelength = (rdp_context->page_a0[SSF_WAVELENGTH_B1] << 8) | rdp_context->page_a0[SSF_WAVELENGTH_B0]; len += scnprintf(buf + len, PAGE_SIZE - len, "Wavelength:\t%d nm\n", wavelength); trasn_code_byte7 = (struct sff_trasnceiver_codes_byte7 *) &rdp_context->page_a0[SSF_TRANSCEIVER_CODE_B7]; len += scnprintf(buf + len, PAGE_SIZE - len, "Speeds: \t"); if (*(uint8_t *)trasn_code_byte7 == 0) { len += scnprintf(buf + len, PAGE_SIZE - len, "Unknown\n"); } else { if (trasn_code_byte7->fc_sp_100MB) len += scnprintf(buf + len, PAGE_SIZE - len, "1 "); if (trasn_code_byte7->fc_sp_200mb) len += scnprintf(buf + len, PAGE_SIZE - len, "2 "); if (trasn_code_byte7->fc_sp_400MB) len += scnprintf(buf + len, PAGE_SIZE - len, "4 "); if (trasn_code_byte7->fc_sp_800MB) len += scnprintf(buf + len, PAGE_SIZE - len, "8 "); if (trasn_code_byte7->fc_sp_1600MB) len += scnprintf(buf + len, PAGE_SIZE - len, "16 "); if (trasn_code_byte7->fc_sp_3200MB) len += scnprintf(buf + len, PAGE_SIZE - len, "32 "); if (trasn_code_byte7->speed_chk_ecc) len += scnprintf(buf + len, PAGE_SIZE - len, "64 "); len += scnprintf(buf + len, PAGE_SIZE - len, "GB\n"); } temperature = (rdp_context->page_a2[SFF_TEMPERATURE_B1] << 8 | rdp_context->page_a2[SFF_TEMPERATURE_B0]); vcc = (rdp_context->page_a2[SFF_VCC_B1] << 8 | rdp_context->page_a2[SFF_VCC_B0]); tx_power = (rdp_context->page_a2[SFF_TXPOWER_B1] << 8 | rdp_context->page_a2[SFF_TXPOWER_B0]); tx_bias = (rdp_context->page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | rdp_context->page_a2[SFF_TX_BIAS_CURRENT_B0]); rx_power = (rdp_context->page_a2[SFF_RXPOWER_B1] << 8 | rdp_context->page_a2[SFF_RXPOWER_B0]); len += scnprintf(buf + len, PAGE_SIZE - len, "Temperature:\tx%04x C\n", temperature); len += scnprintf(buf + len, PAGE_SIZE - len, "Vcc:\t\tx%04x V\n", vcc); len += scnprintf(buf + len, PAGE_SIZE - len, "TxBiasCurrent:\tx%04x mA\n", tx_bias); len += scnprintf(buf + len, PAGE_SIZE - len, "TxPower:\tx%04x mW\n", tx_power); len += scnprintf(buf + len, PAGE_SIZE - len, "RxPower:\tx%04x mW\n", rx_power); out_free_rdp: kfree(rdp_context); return len; } /** * lpfc_board_mode_show - Return the state of the board * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the state of the adapter. * * Returns: size of formatted string. **/ static ssize_t lpfc_board_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; char * state; if (phba->link_state == LPFC_HBA_ERROR) state = "error"; else if (phba->link_state == LPFC_WARM_START) state = "warm start"; else if (phba->link_state == LPFC_INIT_START) state = "offline"; else state = "online"; return scnprintf(buf, PAGE_SIZE, "%s\n", state); } /** * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing one of the strings "online", "offline", "warm" or "error". * @count: unused variable. * * Returns: * -EACCES if enable hba reset not enabled * -EINVAL if the buffer does not contain a valid string (see above) * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails * buf length greater than zero indicates success **/ static ssize_t lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct completion online_compl; char *board_mode_str = NULL; int status = 0; int rc; if (!phba->cfg_enable_hba_reset) { status = -EACCES; goto board_mode_out; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3050 lpfc_board_mode set to %s\n", buf); init_completion(&online_compl); if(strncmp(buf, "online", sizeof("online") - 1) == 0) { rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); if (rc == 0) { status = -ENOMEM; goto board_mode_out; } wait_for_completion(&online_compl); if (status) status = -EIO; } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) if (phba->sli_rev == LPFC_SLI_REV4) status = -EINVAL; else status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); else if (strncmp(buf, "error", sizeof("error") - 1) == 0) if (phba->sli_rev == LPFC_SLI_REV4) status = -EINVAL; else status = lpfc_do_offline(phba, LPFC_EVT_KILL); else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0) status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP); else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0) status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET); else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0) status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET); else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1) == 0) status = lpfc_reset_pci_bus(phba); else if (strncmp(buf, "heartbeat", sizeof("heartbeat") - 1) == 0) lpfc_issue_hb_tmo(phba); else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0) status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk")); else status = -EINVAL; board_mode_out: if (!status) return strlen(buf); else { board_mode_str = strchr(buf, '\n'); if (board_mode_str) *board_mode_str = '\0'; lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3097 Failed \"%s\", status(%d), " "fc_flag(x%x)\n", buf, status, phba->pport->fc_flag); return status; } } /** * lpfc_get_hba_info - Return various bits of informaton about the adapter * @phba: pointer to the adapter structure. * @mxri: max xri count. * @axri: available xri count. * @mrpi: max rpi count. * @arpi: available rpi count. * @mvpi: max vpi count. * @avpi: available vpi count. * * Description: * If an integer pointer for an count is not null then the value for the * count is returned. * * Returns: * zero on error * one for success **/ static int lpfc_get_hba_info(struct lpfc_hba *phba, uint32_t *mxri, uint32_t *axri, uint32_t *mrpi, uint32_t *arpi, uint32_t *mvpi, uint32_t *avpi) { LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; int rc = 0; struct lpfc_sli4_hba *sli4_hba; struct lpfc_max_cfg_param *max_cfg_param; u16 rsrc_ext_cnt, rsrc_ext_size, max_vpi; /* * prevent udev from issuing mailbox commands until the port is * configured. */ if (phba->link_state < LPFC_LINK_DOWN || !phba->mbox_mem_pool || (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) return 0; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) return 0; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) return 0; memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_CONFIG; pmb->mbxOwner = OWN_HOST; pmboxq->ctx_buf = NULL; if (phba->pport->fc_flag & FC_OFFLINE_MODE) rc = MBX_NOT_FINISHED; else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return 0; } if (phba->sli_rev == LPFC_SLI_REV4) { sli4_hba = &phba->sli4_hba; max_cfg_param = &sli4_hba->max_cfg_param; /* Normally, extents are not used */ if (!phba->sli4_hba.extents_in_use) { if (mrpi) *mrpi = max_cfg_param->max_rpi; if (mxri) *mxri = max_cfg_param->max_xri; if (mvpi) { max_vpi = max_cfg_param->max_vpi; /* Limit the max we support */ if (max_vpi > LPFC_MAX_VPI) max_vpi = LPFC_MAX_VPI; *mvpi = max_vpi; } } else { /* Extents in use */ if (mrpi) { if (lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI, &rsrc_ext_cnt, &rsrc_ext_size)) { rc = 0; goto free_pmboxq; } *mrpi = rsrc_ext_cnt * rsrc_ext_size; } if (mxri) { if (lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI, &rsrc_ext_cnt, &rsrc_ext_size)) { rc = 0; goto free_pmboxq; } *mxri = rsrc_ext_cnt * rsrc_ext_size; } if (mvpi) { if (lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI, &rsrc_ext_cnt, &rsrc_ext_size)) { rc = 0; goto free_pmboxq; } max_vpi = rsrc_ext_cnt * rsrc_ext_size; /* Limit the max we support */ if (max_vpi > LPFC_MAX_VPI) max_vpi = LPFC_MAX_VPI; *mvpi = max_vpi; } } } else { if (mrpi) *mrpi = pmb->un.varRdConfig.max_rpi; if (arpi) *arpi = pmb->un.varRdConfig.avail_rpi; if (mxri) *mxri = pmb->un.varRdConfig.max_xri; if (axri) *axri = pmb->un.varRdConfig.avail_xri; if (mvpi) *mvpi = pmb->un.varRdConfig.max_vpi; if (avpi) { /* avail_vpi is only valid if link is up and ready */ if (phba->link_state == LPFC_HBA_READY) *avpi = pmb->un.varRdConfig.avail_vpi; else *avpi = pmb->un.varRdConfig.max_vpi; } } /* Success */ rc = 1; free_pmboxq: mempool_free(pmboxq, phba->mbox_mem_pool); return rc; } /** * lpfc_max_rpi_show - Return maximum rpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the maximum rpi count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mrpi count. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt; if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL)) return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); return scnprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_used_rpi_show - Return maximum rpi minus available rpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing the used rpi count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_sli4_hba *sli4_hba; struct lpfc_max_cfg_param *max_cfg_param; u32 cnt = 0, acnt = 0; if (phba->sli_rev == LPFC_SLI_REV4) { sli4_hba = &phba->sli4_hba; max_cfg_param = &sli4_hba->max_cfg_param; return scnprintf(buf, PAGE_SIZE, "%d\n", max_cfg_param->rpi_used); } else { if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL)) return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); } return scnprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_max_xri_show - Return maximum xri * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the maximum xri count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mrpi count. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_max_xri_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt; if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL)) return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); return scnprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_used_xri_show - Return maximum xpi minus the available xpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the used xri count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mxri and axri counts. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_used_xri_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_sli4_hba *sli4_hba; struct lpfc_max_cfg_param *max_cfg_param; u32 cnt = 0, acnt = 0; if (phba->sli_rev == LPFC_SLI_REV4) { sli4_hba = &phba->sli4_hba; max_cfg_param = &sli4_hba->max_cfg_param; return scnprintf(buf, PAGE_SIZE, "%d\n", max_cfg_param->xri_used); } else { if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL)) return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); } return scnprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_max_vpi_show - Return maximum vpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the maximum vpi count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mvpi count. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t cnt; if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL)) return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); return scnprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_used_vpi_show - Return maximum vpi minus the available vpi * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the used vpi count in decimal or "Unknown". * * Description: * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts. * If lpfc_get_hba_info() returns zero (failure) the buffer text is set * to "Unknown" and the buffer length is returned, therefore the caller * must check for "Unknown" in the buffer to detect a failure. * * Returns: size of formatted string. **/ static ssize_t lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_sli4_hba *sli4_hba; struct lpfc_max_cfg_param *max_cfg_param; u32 cnt = 0, acnt = 0; if (phba->sli_rev == LPFC_SLI_REV4) { sli4_hba = &phba->sli4_hba; max_cfg_param = &sli4_hba->max_cfg_param; return scnprintf(buf, PAGE_SIZE, "%d\n", max_cfg_param->vpi_used); } else { if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt)) return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); } return scnprintf(buf, PAGE_SIZE, "Unknown\n"); } /** * lpfc_npiv_info_show - Return text about NPIV support for the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: text that must be interpreted to determine if npiv is supported. * * Description: * Buffer will contain text indicating npiv is not suppoerted on the port, * the port is an NPIV physical port, or it is an npiv virtual port with * the id of the vport. * * Returns: size of formatted string. **/ static ssize_t lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (!(phba->max_vpi)) return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n"); if (vport->port_type == LPFC_PHYSICAL_PORT) return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n"); return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); } /** * lpfc_poll_show - Return text about poll support for the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the cfg_poll in hex. * * Notes: * cfg_poll should be a lpfc_polling_flags type. * * Returns: size of formatted string. **/ static ssize_t lpfc_poll_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); } /** * lpfc_poll_store - Set the value of cfg_poll for the adapter * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: one or more lpfc_polling_flags values. * @count: not used. * * Notes: * buf contents converted to integer and checked for a valid value. * * Returns: * -EINVAL if the buffer connot be converted or is out of range * length of the buf on success **/ static ssize_t lpfc_poll_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint32_t creg_val; uint32_t old_val; int val=0; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; if ((val & 0x3) != val) return -EINVAL; if (phba->sli_rev == LPFC_SLI_REV4) val = 0; lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3051 lpfc_poll changed from %d to %d\n", phba->cfg_poll, val); spin_lock_irq(&phba->hbalock); old_val = phba->cfg_poll; if (val & ENABLE_FCP_RING_POLLING) { if ((val & DISABLE_FCP_RING_INT) && !(old_val & DISABLE_FCP_RING_INT)) { if (lpfc_readl(phba->HCregaddr, &creg_val)) { spin_unlock_irq(&phba->hbalock); return -EINVAL; } creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ lpfc_poll_start_timer(phba); } } else if (val != 0x0) { spin_unlock_irq(&phba->hbalock); return -EINVAL; } if (!(val & DISABLE_FCP_RING_INT) && (old_val & DISABLE_FCP_RING_INT)) { spin_unlock_irq(&phba->hbalock); del_timer(&phba->fcp_poll_timer); spin_lock_irq(&phba->hbalock); if (lpfc_readl(phba->HCregaddr, &creg_val)) { spin_unlock_irq(&phba->hbalock); return -EINVAL; } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } phba->cfg_poll = val; spin_unlock_irq(&phba->hbalock); return strlen(buf); } /** * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the formatted support level. * * Description: * Returns the maximum number of virtual functions a physical function can * support, 0 will be returned if called on virtual function. * * Returns: size of formatted string. **/ static ssize_t lpfc_sriov_hw_max_virtfn_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; uint16_t max_nr_virtfn; max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba); return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); } /** * lpfc_enable_bbcr_set: Sets an attribute value. * @phba: pointer to the adapter structure. * @val: integer attribute value. * * Description: * Validates the min and max values then sets the * adapter config field if in the valid range. prints error message * and does not set the parameter if invalid. * * Returns: * zero on success * -EINVAL if val is invalid */ static ssize_t lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val) { if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3068 lpfc_enable_bbcr changed from %d to " "%d\n", phba->cfg_enable_bbcr, val); phba->cfg_enable_bbcr = val; return 0; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 lpfc_enable_bbcr cannot set to %d, range is 0, " "1\n", val); return -EINVAL; } /* * lpfc_param_show - Return a cfg attribute value in decimal * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_show. * * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the attribute value in decimal. * * Returns: size of formatted string. **/ #define lpfc_param_show(attr) \ static ssize_t \ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ struct lpfc_hba *phba = vport->phba;\ return scnprintf(buf, PAGE_SIZE, "%d\n",\ phba->cfg_##attr);\ } /* * lpfc_param_hex_show - Return a cfg attribute value in hex * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_show * * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the attribute value in hexadecimal. * * Returns: size of formatted string. **/ #define lpfc_param_hex_show(attr) \ static ssize_t \ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ struct lpfc_hba *phba = vport->phba;\ uint val = 0;\ val = phba->cfg_##attr;\ return scnprintf(buf, PAGE_SIZE, "%#x\n",\ phba->cfg_##attr);\ } /* * lpfc_param_init - Initializes a cfg attribute * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_init. The macro also * takes a default argument, a minimum and maximum argument. * * lpfc_##attr##_init: Initializes an attribute. * @phba: pointer to the adapter structure. * @val: integer attribute value. * * Validates the min and max values then sets the adapter config field * accordingly, or uses the default if out of range and prints an error message. * * Returns: * zero on success * -EINVAL if default used **/ #define lpfc_param_init(attr, default, minval, maxval) \ static int \ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ { \ if (lpfc_rangecheck(val, minval, maxval)) {\ phba->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ "0449 lpfc_"#attr" attribute cannot be set to %d, "\ "allowed range is ["#minval", "#maxval"]\n", val); \ phba->cfg_##attr = default;\ return -EINVAL;\ } /* * lpfc_param_set - Set a cfg attribute value * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_set * * lpfc_##attr##_set: Sets an attribute value. * @phba: pointer to the adapter structure. * @val: integer attribute value. * * Description: * Validates the min and max values then sets the * adapter config field if in the valid range. prints error message * and does not set the parameter if invalid. * * Returns: * zero on success * -EINVAL if val is invalid **/ #define lpfc_param_set(attr, default, minval, maxval) \ static int \ lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ { \ if (lpfc_rangecheck(val, minval, maxval)) {\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ "3052 lpfc_" #attr " changed from %d to %d\n", \ phba->cfg_##attr, val); \ phba->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ "0450 lpfc_"#attr" attribute cannot be set to %d, "\ "allowed range is ["#minval", "#maxval"]\n", val); \ return -EINVAL;\ } /* * lpfc_param_store - Set a vport attribute value * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_store. * * lpfc_##attr##_store: Set an sttribute value. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: contains the attribute value in ascii. * @count: not used. * * Description: * Convert the ascii text number to an integer, then * use the lpfc_##attr##_set function to set the value. * * Returns: * -EINVAL if val is invalid or lpfc_##attr##_set() fails * length of buffer upon success. **/ #define lpfc_param_store(attr) \ static ssize_t \ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ struct lpfc_hba *phba = vport->phba;\ uint val = 0;\ if (!isdigit(buf[0]))\ return -EINVAL;\ if (sscanf(buf, "%i", &val) != 1)\ return -EINVAL;\ if (lpfc_##attr##_set(phba, val) == 0) \ return strlen(buf);\ else \ return -EINVAL;\ } /* * lpfc_vport_param_show - Return decimal formatted cfg attribute value * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_show * * lpfc_##attr##_show: prints the attribute value in decimal. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the attribute value in decimal. * * Returns: length of formatted string. **/ #define lpfc_vport_param_show(attr) \ static ssize_t \ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ } /* * lpfc_vport_param_hex_show - Return hex formatted attribute value * * Description: * Macro that given an attr e.g. * hba_queue_depth expands into a function with the name * lpfc_hba_queue_depth_show * * lpfc_##attr##_show: prints the attribute value in hexadecimal. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: on return contains the attribute value in hexadecimal. * * Returns: length of formatted string. **/ #define lpfc_vport_param_hex_show(attr) \ static ssize_t \ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ } /* * lpfc_vport_param_init - Initialize a vport cfg attribute * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_init. The macro also * takes a default argument, a minimum and maximum argument. * * lpfc_##attr##_init: validates the min and max values then sets the * adapter config field accordingly, or uses the default if out of range * and prints an error message. * @phba: pointer to the adapter structure. * @val: integer attribute value. * * Returns: * zero on success * -EINVAL if default used **/ #define lpfc_vport_param_init(attr, default, minval, maxval) \ static int \ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ { \ if (lpfc_rangecheck(val, minval, maxval)) {\ vport->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ "0423 lpfc_"#attr" attribute cannot be set to %d, "\ "allowed range is ["#minval", "#maxval"]\n", val); \ vport->cfg_##attr = default;\ return -EINVAL;\ } /* * lpfc_vport_param_set - Set a vport cfg attribute * * Description: * Macro that given an attr e.g. hba_queue_depth expands * into a function with the name lpfc_hba_queue_depth_set * * lpfc_##attr##_set: validates the min and max values then sets the * adapter config field if in the valid range. prints error message * and does not set the parameter if invalid. * @phba: pointer to the adapter structure. * @val: integer attribute value. * * Returns: * zero on success * -EINVAL if val is invalid **/ #define lpfc_vport_param_set(attr, default, minval, maxval) \ static int \ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ { \ if (lpfc_rangecheck(val, minval, maxval)) {\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ "3053 lpfc_" #attr \ " changed from %d (x%x) to %d (x%x)\n", \ vport->cfg_##attr, vport->cfg_##attr, \ val, val); \ vport->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ "0424 lpfc_"#attr" attribute cannot be set to %d, "\ "allowed range is ["#minval", "#maxval"]\n", val); \ return -EINVAL;\ } /* * lpfc_vport_param_store - Set a vport attribute * * Description: * Macro that given an attr e.g. hba_queue_depth * expands into a function with the name lpfc_hba_queue_depth_store * * lpfc_##attr##_store: convert the ascii text number to an integer, then * use the lpfc_##attr##_set function to set the value. * @cdev: class device that is converted into a Scsi_host. * @buf: contains the attribute value in decimal. * @count: not used. * * Returns: * -EINVAL if val is invalid or lpfc_##attr##_set() fails * length of buffer upon success. **/ #define lpfc_vport_param_store(attr) \ static ssize_t \ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ uint val = 0;\ if (!isdigit(buf[0]))\ return -EINVAL;\ if (sscanf(buf, "%i", &val) != 1)\ return -EINVAL;\ if (lpfc_##attr##_set(vport, val) == 0) \ return strlen(buf);\ else \ return -EINVAL;\ } static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL); static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL); static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL); static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL); static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL); static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL); static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show, lpfc_link_state_store); static DEVICE_ATTR(option_rom_version, S_IRUGO, lpfc_option_rom_version_show, NULL); static DEVICE_ATTR(num_discovered_ports, S_IRUGO, lpfc_num_discovered_ports_show, NULL); static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); static DEVICE_ATTR_RO(lpfc_drvr_version); static DEVICE_ATTR_RO(lpfc_enable_fip); static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, lpfc_board_mode_show, lpfc_board_mode_store); static DEVICE_ATTR_RO(lpfc_xcvr_data); static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL); static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL); static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL); static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL); static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); static DEVICE_ATTR_RO(lpfc_temp_sensor); static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn); static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, NULL); static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL); #define WWN_SZ 8 /** * lpfc_wwn_set - Convert string to the 8 byte WWN value. * @buf: WWN string. * @cnt: Length of string. * @wwn: Array to receive converted wwn value. * * Returns: * -EINVAL if the buffer does not contain a valid wwn * 0 success **/ static size_t lpfc_wwn_set(const char *buf, size_t cnt, char wwn[]) { unsigned int i, j; /* Count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) || ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) return -EINVAL; memset(wwn, 0, WWN_SZ); /* Validate and store the new name */ for (i = 0, j = 0; i < 16; i++) { if ((*buf >= 'a') && (*buf <= 'f')) j = ((j << 4) | ((*buf++ - 'a') + 10)); else if ((*buf >= 'A') && (*buf <= 'F')) j = ((j << 4) | ((*buf++ - 'A') + 10)); else if ((*buf >= '0') && (*buf <= '9')) j = ((j << 4) | (*buf++ - '0')); else return -EINVAL; if (i % 2) { wwn[i/2] = j & 0xff; j = 0; } } return 0; } /** * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for * Optimized Access Storage (OAS) operations. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: buffer for passing information. * * Returns: * value of count **/ static ssize_t lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; return scnprintf(buf, PAGE_SIZE, "0x%llx\n", wwn_to_u64(phba->cfg_oas_tgt_wwpn)); } /** * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for * Optimized Access Storage (OAS) operations. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: buffer for passing information. * @count: Size of the data buffer. * * Returns: * -EINVAL count is invalid, invalid wwpn byte invalid * -EPERM oas is not supported by hba * value of count on success **/ static ssize_t lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; unsigned int cnt = count; uint8_t wwpn[WWN_SZ]; int rc; if (!phba->cfg_fof) return -EPERM; /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; rc = lpfc_wwn_set(buf, cnt, wwpn); if (rc) return rc; memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); if (wwn_to_u64(wwpn) == 0) phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET; else phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET; phba->cfg_oas_flags &= ~OAS_LUN_VALID; phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; return count; } static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR, lpfc_oas_tgt_show, lpfc_oas_tgt_store); /** * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for * Optimized Access Storage (OAS) operations. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: buffer for passing information. * * Returns: * value of count **/ static ssize_t lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority); } /** * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for * Optimized Access Storage (OAS) operations. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: buffer for passing information. * @count: Size of the data buffer. * * Returns: * -EINVAL count is invalid, invalid wwpn byte invalid * -EPERM oas is not supported by hba * value of count on success **/ static ssize_t lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; unsigned int cnt = count; unsigned long val; int ret; if (!phba->cfg_fof) return -EPERM; /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; ret = kstrtoul(buf, 0, &val); if (ret || (val > 0x7f)) return -EINVAL; if (val) phba->cfg_oas_priority = (uint8_t)val; else phba->cfg_oas_priority = phba->cfg_XLanePriority; return count; } static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR, lpfc_oas_priority_show, lpfc_oas_priority_store); /** * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled * for Optimized Access Storage (OAS) operations. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: buffer for passing information. * * Returns: * value of count on success **/ static ssize_t lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; return scnprintf(buf, PAGE_SIZE, "0x%llx\n", wwn_to_u64(phba->cfg_oas_vpt_wwpn)); } /** * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled * for Optimized Access Storage (OAS) operations. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: buffer for passing information. * @count: Size of the data buffer. * * Returns: * -EINVAL count is invalid, invalid wwpn byte invalid * -EPERM oas is not supported by hba * value of count on success **/ static ssize_t lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; unsigned int cnt = count; uint8_t wwpn[WWN_SZ]; int rc; if (!phba->cfg_fof) return -EPERM; /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; rc = lpfc_wwn_set(buf, cnt, wwpn); if (rc) return rc; memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); if (wwn_to_u64(wwpn) == 0) phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT; else phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT; phba->cfg_oas_flags &= ~OAS_LUN_VALID; if (phba->cfg_oas_priority == 0) phba->cfg_oas_priority = phba->cfg_XLanePriority; phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; return count; } static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR, lpfc_oas_vpt_show, lpfc_oas_vpt_store); /** * lpfc_oas_lun_state_show - Return the current state (enabled or disabled) * of whether luns will be enabled or disabled * for Optimized Access Storage (OAS) operations. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: buffer for passing information. * * Returns: * size of formatted string. **/ static ssize_t lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state); } /** * lpfc_oas_lun_state_store - Store the state (enabled or disabled) * of whether luns will be enabled or disabled * for Optimized Access Storage (OAS) operations. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: buffer for passing information. * @count: Size of the data buffer. * * Returns: * -EINVAL count is invalid, invalid wwpn byte invalid * -EPERM oas is not supported by hba * value of count on success **/ static ssize_t lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; int val = 0; if (!phba->cfg_fof) return -EPERM; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; if ((val != 0) && (val != 1)) return -EINVAL; phba->cfg_oas_lun_state = val; return strlen(buf); } static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR, lpfc_oas_lun_state_show, lpfc_oas_lun_state_store); /** * lpfc_oas_lun_status_show - Return the status of the Optimized Access * Storage (OAS) lun returned by the * lpfc_oas_lun_show function. * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: buffer for passing information. * * Returns: * size of formatted string. **/ static ssize_t lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; if (!(phba->cfg_oas_flags & OAS_LUN_VALID)) return -EFAULT; return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status); } static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO, lpfc_oas_lun_status_show, NULL); /** * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage * (OAS) operations. * @phba: lpfc_hba pointer. * @vpt_wwpn: wwpn of the vport associated with the returned lun * @tgt_wwpn: wwpn of the target associated with the returned lun * @lun: the fc lun for setting oas state. * @oas_state: the oas state to be set to the lun. * @pri: priority * * Returns: * SUCCESS : 0 * -EPERM OAS is not enabled or not supported by this port. * */ static size_t lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state, uint8_t pri) { int rc = 0; if (!phba->cfg_fof) return -EPERM; if (oas_state) { if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, (struct lpfc_name *)tgt_wwpn, lun, pri)) rc = -ENOMEM; } else { lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, (struct lpfc_name *)tgt_wwpn, lun, pri); } return rc; } /** * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized * Access Storage (OAS) operations. * @phba: lpfc_hba pointer. * @vpt_wwpn: wwpn of the vport associated with the returned lun * @tgt_wwpn: wwpn of the target associated with the returned lun * @lun_status: status of the lun returned lun * @lun_pri: priority of the lun returned lun * * Returns the first or next lun enabled for OAS operations for the vport/target * specified. If a lun is found, its vport wwpn, target wwpn and status is * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned. * * Return: * lun that is OAS enabled for the vport/target * NOT_OAS_ENABLED_LUN when no oas enabled lun found. */ static uint64_t lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], uint8_t tgt_wwpn[], uint32_t *lun_status, uint32_t *lun_pri) { uint64_t found_lun; if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn) return NOT_OAS_ENABLED_LUN; if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *) phba->sli4_hba.oas_next_vpt_wwpn, (struct lpfc_name *) phba->sli4_hba.oas_next_tgt_wwpn, &phba->sli4_hba.oas_next_lun, (struct lpfc_name *)vpt_wwpn, (struct lpfc_name *)tgt_wwpn, &found_lun, lun_status, lun_pri)) return found_lun; else return NOT_OAS_ENABLED_LUN; } /** * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations * @phba: lpfc_hba pointer. * @vpt_wwpn: vport wwpn by reference. * @tgt_wwpn: target wwpn by reference. * @lun: the fc lun for setting oas state. * @oas_state: the oas state to be set to the oas_lun. * @pri: priority * * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE) * a lun for OAS operations. * * Return: * SUCCESS: 0 * -ENOMEM: failed to enable an lun for OAS operations * -EPERM: OAS is not enabled */ static ssize_t lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[], uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state, uint8_t pri) { int rc; rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun, oas_state, pri); return rc; } /** * lpfc_oas_lun_show - Return oas enabled luns from a chosen target * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: buffer for passing information. * * This routine returns a lun enabled for OAS each time the function * is called. * * Returns: * SUCCESS: size of formatted string. * -EFAULT: target or vport wwpn was not set properly. * -EPERM: oas is not enabled. **/ static ssize_t lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; uint64_t oas_lun; int len = 0; if (!phba->cfg_fof) return -EPERM; if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)) return -EFAULT; if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)) return -EFAULT; oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn, phba->cfg_oas_tgt_wwpn, &phba->cfg_oas_lun_status, &phba->cfg_oas_priority); if (oas_lun != NOT_OAS_ENABLED_LUN) phba->cfg_oas_flags |= OAS_LUN_VALID; len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun); return len; } /** * lpfc_oas_lun_store - Sets the OAS state for lun * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: buffer for passing information. * @count: size of the formatting string * * This function sets the OAS state for lun. Before this function is called, * the vport wwpn, target wwpn, and oas state need to be set. * * Returns: * SUCCESS: size of formatted string. * -EFAULT: target or vport wwpn was not set properly. * -EPERM: oas is not enabled. * size of formatted string. **/ static ssize_t lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; uint64_t scsi_lun; uint32_t pri; ssize_t rc; if (!phba->cfg_fof) return -EPERM; if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) return -EFAULT; if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) return -EFAULT; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "0x%llx", &scsi_lun) != 1) return -EINVAL; pri = phba->cfg_oas_priority; if (pri == 0) pri = phba->cfg_XLanePriority; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx " "priority 0x%x with oas state %d\n", wwn_to_u64(phba->cfg_oas_vpt_wwpn), wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun, pri, phba->cfg_oas_lun_state); rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn, phba->cfg_oas_tgt_wwpn, scsi_lun, phba->cfg_oas_lun_state, pri); if (rc) return rc; return count; } static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR, lpfc_oas_lun_show, lpfc_oas_lun_store); int lpfc_enable_nvmet_cnt; unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444); MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target"); static int lpfc_poll = 0; module_param(lpfc_poll, int, S_IRUGO); MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" " 0 - none," " 1 - poll with interrupts enabled" " 3 - poll and disable FCP ring interrupts"); static DEVICE_ATTR_RW(lpfc_poll); int lpfc_no_hba_reset_cnt; unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444); MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset"); LPFC_ATTR(sli_mode, 3, 3, 3, "SLI mode selector: 3 - select SLI-3"); LPFC_ATTR_R(enable_npiv, 1, 0, 1, "Enable NPIV functionality"); LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, "FCF Fast failover=1 Priority failover=2"); /* * lpfc_fcp_wait_abts_rsp: Modifies criteria for reporting completion of * aborted IO. * The range is [0,1]. Default value is 0 * 0, IO completes after ABTS issued (default). * 1, IO completes after receipt of ABTS response or timeout. */ LPFC_ATTR_R(fcp_wait_abts_rsp, 0, 0, 1, "Wait for FCP ABTS completion"); /* # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures # 0x0 = disabled, XRI/OXID use not tracked. # 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent. # 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent. */ LPFC_ATTR_R(enable_rrq, 2, 0, 2, "Enable RRQ functionality"); /* # lpfc_suppress_link_up: Bring link up at initialization # 0x0 = bring link up (issue MBX_INIT_LINK) # 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK) # 0x2 = never bring up link # Default value is 0. */ LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, LPFC_DELAY_INIT_LINK_INDEFINITELY, "Suppress Link Up at initialization"); static ssize_t lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; return scnprintf(buf, PAGE_SIZE, "%d\n", phba->sli4_hba.pc_sli4_params.pls); } static DEVICE_ATTR(pls, 0444, lpfc_pls_show, NULL); static ssize_t lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; return scnprintf(buf, PAGE_SIZE, "%d\n", (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0); } static DEVICE_ATTR(pt, 0444, lpfc_pt_show, NULL); /* # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS # 1 - (1024) # 2 - (2048) # 3 - (3072) # 4 - (4096) # 5 - (5120) */ static ssize_t lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max); } static DEVICE_ATTR(iocb_hw, S_IRUGO, lpfc_iocb_hw_show, NULL); static ssize_t lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); return scnprintf(buf, PAGE_SIZE, "%d\n", pring ? pring->txq_max : 0); } static DEVICE_ATTR(txq_hw, S_IRUGO, lpfc_txq_hw_show, NULL); static ssize_t lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); return scnprintf(buf, PAGE_SIZE, "%d\n", pring ? pring->txcmplq_max : 0); } static DEVICE_ATTR(txcmplq_hw, S_IRUGO, lpfc_txcmplq_hw_show, NULL); /* # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear # until the timer expires. Value range is [0,255]. Default value is 30. */ static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO; module_param(lpfc_nodev_tmo, int, 0); MODULE_PARM_DESC(lpfc_nodev_tmo, "Seconds driver will hold I/O waiting " "for a device to come back"); /** * lpfc_nodev_tmo_show - Return the hba dev loss timeout value * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains the dev loss timeout in decimal. * * Returns: size of formatted string. **/ static ssize_t lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); } /** * lpfc_nodev_tmo_init - Set the hba nodev timeout value * @vport: lpfc vport structure pointer. * @val: contains the nodev timeout value. * * Description: * If the devloss tmo is already set then nodev tmo is set to devloss tmo, * a kernel error message is printed and zero is returned. * Else if val is in range then nodev tmo and devloss tmo are set to val. * Otherwise nodev tmo is set to the default value. * * Returns: * zero if already set or if val is in range * -EINVAL val out of range **/ static int lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) { if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) { vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; if (val != LPFC_DEF_DEVLOSS_TMO) lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0407 Ignoring lpfc_nodev_tmo module " "parameter because lpfc_devloss_tmo " "is set.\n"); return 0; } if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0400 lpfc_nodev_tmo attribute cannot be set to" " %d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; return -EINVAL; } /** * lpfc_update_rport_devloss_tmo - Update dev loss tmo value * @vport: lpfc vport structure pointer. * * Description: * Update all the ndlp's dev loss tmo with the vport devloss tmo value. **/ static void lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) { struct Scsi_Host *shost; struct lpfc_nodelist *ndlp; #if (IS_ENABLED(CONFIG_NVME_FC)) struct lpfc_nvme_rport *rport; struct nvme_fc_remote_port *remoteport = NULL; #endif shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->rport) ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; #if (IS_ENABLED(CONFIG_NVME_FC)) spin_lock(&ndlp->lock); rport = lpfc_ndlp_get_nrport(ndlp); if (rport) remoteport = rport->remoteport; spin_unlock(&ndlp->lock); if (rport && remoteport) nvme_fc_set_remoteport_devloss(remoteport, vport->cfg_devloss_tmo); #endif } spin_unlock_irq(shost->host_lock); } /** * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values * @vport: lpfc vport structure pointer. * @val: contains the tmo value. * * Description: * If the devloss tmo is already set or the vport dev loss tmo has changed * then a kernel error message is printed and zero is returned. * Else if val is in range then nodev tmo and devloss tmo are set to val. * Otherwise nodev tmo is set to the default value. * * Returns: * zero if already set or if val is in range * -EINVAL val out of range **/ static int lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) { if (vport->dev_loss_tmo_changed || (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0401 Ignoring change to lpfc_nodev_tmo " "because lpfc_devloss_tmo is set.\n"); return 0; } if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; /* * For compat: set the fc_host dev loss so new rports * will get the value. */ fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; lpfc_update_rport_devloss_tmo(vport); return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0403 lpfc_nodev_tmo attribute cannot be set to " "%d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); return -EINVAL; } lpfc_vport_param_store(nodev_tmo) static DEVICE_ATTR_RW(lpfc_nodev_tmo); /* # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that # disappear until the timer expires. Value range is [0,255]. Default # value is 30. */ module_param(lpfc_devloss_tmo, int, S_IRUGO); MODULE_PARM_DESC(lpfc_devloss_tmo, "Seconds driver will hold I/O waiting " "for a device to come back"); lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO) lpfc_vport_param_show(devloss_tmo) /** * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit * @vport: lpfc vport structure pointer. * @val: contains the tmo value. * * Description: * If val is in a valid range then set the vport nodev tmo, * devloss tmo, also set the vport dev loss tmo changed flag. * Else a kernel error message is printed. * * Returns: * zero if val is in range * -EINVAL val out of range **/ static int lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) { if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; vport->dev_loss_tmo_changed = 1; fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; lpfc_update_rport_devloss_tmo(vport); return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0404 lpfc_devloss_tmo attribute cannot be set to " "%d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); return -EINVAL; } lpfc_vport_param_store(devloss_tmo) static DEVICE_ATTR_RW(lpfc_devloss_tmo); /* * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it * lpfc_suppress_rsp = 0 Disable * lpfc_suppress_rsp = 1 Enable (default) * */ LPFC_ATTR_R(suppress_rsp, 1, 0, 1, "Enable suppress rsp feature is firmware supports it"); /* * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs * lpfc_nvmet_mrq = 1 use a single RQ pair * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ * */ LPFC_ATTR_R(nvmet_mrq, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX, "Specify number of RQ pairs for processing NVMET cmds"); /* * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post * to each NVMET RQ. Range 64 to 2048, default is 512. */ LPFC_ATTR_R(nvmet_mrq_post, LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST, LPFC_NVMET_RQE_DEF_COUNT, "Specify number of RQ buffers to initially post"); /* * lpfc_enable_fc4_type: Defines what FC4 types are supported. * Supported Values: 1 - register just FCP * 3 - register both FCP and NVME * Supported values are [1,3]. Default value is 3 */ LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE, LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE, "Enable FC4 Protocol support - FCP / NVME"); /* # lpfc_log_verbose: Only turn this flag on if you are willing to risk being # deluged with LOTS of information. # You can set a bit mask to record specific types of verbose messages: # See lpfc_logmsh.h for definitions. */ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, "Verbose logging bit-mask"); /* # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters # objects that have been registered with the nameserver after login. */ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1, "Deregister nameserver objects before LOGO"); /* # lun_queue_depth: This parameter is used to limit the number of outstanding # commands per FCP LUN. */ LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512, "Max number of FCP commands we can queue to a specific LUN"); /* # tgt_queue_depth: This parameter is used to limit the number of outstanding # commands per target port. Value range is [10,65535]. Default value is 65535. */ static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH; module_param(lpfc_tgt_queue_depth, uint, 0444); MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth"); lpfc_vport_param_show(tgt_queue_depth); lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH); /** * lpfc_tgt_queue_depth_set: Sets an attribute value. * @vport: lpfc vport structure pointer. * @val: integer attribute value. * * Description: Sets the parameter to the new value. * * Returns: * zero on success * -EINVAL if val is invalid */ static int lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH)) return -EINVAL; if (val == vport->cfg_tgt_queue_depth) return 0; spin_lock_irq(shost->host_lock); vport->cfg_tgt_queue_depth = val; /* Next loop thru nodelist and change cmd_qdepth */ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; spin_unlock_irq(shost->host_lock); return 0; } lpfc_vport_param_store(tgt_queue_depth); static DEVICE_ATTR_RW(lpfc_tgt_queue_depth); /* # hba_queue_depth: This parameter is used to limit the number of outstanding # commands per lpfc HBA. Value range is [32,8192]. If this parameter # value is greater than the maximum number of exchanges supported by the HBA, # then maximum number of exchanges supported by the HBA is used to determine # the hba_queue_depth. */ LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192, "Max number of FCP commands we can queue to a lpfc HBA"); /* # peer_port_login: This parameter allows/prevents logins # between peer ports hosted on the same physical port. # When this parameter is set 0 peer ports of same physical port # are not allowed to login to each other. # When this parameter is set 1 peer ports of same physical port # are allowed to login to each other. # Default value of this parameter is 0. */ LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1, "Allow peer ports on the same physical port to login to each " "other."); /* # restrict_login: This parameter allows/prevents logins # between Virtual Ports and remote initiators. # When this parameter is not set (0) Virtual Ports will accept PLOGIs from # other initiators and will attempt to PLOGI all remote ports. # When this parameter is set (1) Virtual Ports will reject PLOGIs from # remote ports and will not attempt to PLOGI to other initiators. # This parameter does not restrict to the physical port. # This parameter does not restrict logins to Fabric resident remote ports. # Default value of this parameter is 1. */ static int lpfc_restrict_login = 1; module_param(lpfc_restrict_login, int, S_IRUGO); MODULE_PARM_DESC(lpfc_restrict_login, "Restrict virtual ports login to remote initiators."); lpfc_vport_param_show(restrict_login); /** * lpfc_restrict_login_init - Set the vport restrict login flag * @vport: lpfc vport structure pointer. * @val: contains the restrict login value. * * Description: * If val is not in a valid range then log a kernel error message and set * the vport restrict login to one. * If the port type is physical clear the restrict login flag and return. * Else set the restrict login flag to val. * * Returns: * zero if val is in range * -EINVAL val out of range **/ static int lpfc_restrict_login_init(struct lpfc_vport *vport, int val) { if (val < 0 || val > 1) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0422 lpfc_restrict_login attribute cannot " "be set to %d, allowed range is [0, 1]\n", val); vport->cfg_restrict_login = 1; return -EINVAL; } if (vport->port_type == LPFC_PHYSICAL_PORT) { vport->cfg_restrict_login = 0; return 0; } vport->cfg_restrict_login = val; return 0; } /** * lpfc_restrict_login_set - Set the vport restrict login flag * @vport: lpfc vport structure pointer. * @val: contains the restrict login value. * * Description: * If val is not in a valid range then log a kernel error message and set * the vport restrict login to one. * If the port type is physical and the val is not zero log a kernel * error message, clear the restrict login flag and return zero. * Else set the restrict login flag to val. * * Returns: * zero if val is in range * -EINVAL val out of range **/ static int lpfc_restrict_login_set(struct lpfc_vport *vport, int val) { if (val < 0 || val > 1) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0425 lpfc_restrict_login attribute cannot " "be set to %d, allowed range is [0, 1]\n", val); vport->cfg_restrict_login = 1; return -EINVAL; } if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0468 lpfc_restrict_login must be 0 for " "Physical ports.\n"); vport->cfg_restrict_login = 0; return 0; } vport->cfg_restrict_login = val; return 0; } lpfc_vport_param_store(restrict_login); static DEVICE_ATTR_RW(lpfc_restrict_login); /* # Some disk devices have a "select ID" or "select Target" capability. # From a protocol standpoint "select ID" usually means select the # Fibre channel "ALPA". In the FC-AL Profile there is an "informative # annex" which contains a table that maps a "select ID" (a number # between 0 and 7F) to an ALPA. By default, for compatibility with # older drivers, the lpfc driver scans this table from low ALPA to high # ALPA. # # Turning on the scan-down variable (on = 1, off = 0) will # cause the lpfc driver to use an inverted table, effectively # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1. # # (Note: This "select ID" functionality is a LOOP ONLY characteristic # and will not work across a fabric. Also this parameter will take # effect only in the case when ALPA map is not available.) */ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1, "Start scanning for devices from highest ALPA to lowest"); /* # lpfc_topology: link topology for init link # 0x0 = attempt loop mode then point-to-point # 0x01 = internal loopback mode # 0x02 = attempt point-to-point mode only # 0x04 = attempt loop mode only # 0x06 = attempt point-to-point mode then loop # Set point-to-point mode if you want to run as an N_Port. # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6]. # Default value is 0. */ LPFC_ATTR(topology, 0, 0, 6, "Select Fibre Channel topology"); /** * lpfc_topology_store - Set the adapters topology field * @dev: class device that is converted into a scsi_host. * @attr:device attribute, not used. * @buf: buffer for passing information. * @count: size of the data buffer. * * Description: * If val is in a valid range then set the adapter's topology field and * issue a lip; if the lip fails reset the topology to the old value. * * If the value is not in range log a kernel error message and return an error. * * Returns: * zero if val is in range and lip okay * non-zero return value from lpfc_issue_lip() * -EINVAL val out of range **/ static ssize_t lpfc_topology_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int val = 0; int nolip = 0; const char *val_buf = buf; int err; uint32_t prev_val; u8 sli_family, if_type; if (!strncmp(buf, "nolip ", strlen("nolip "))) { nolip = 1; val_buf = &buf[strlen("nolip ")]; } if (!isdigit(val_buf[0])) return -EINVAL; if (sscanf(val_buf, "%i", &val) != 1) return -EINVAL; if (val >= 0 && val <= 6) { prev_val = phba->cfg_topology; if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G && val == 4) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3113 Loop mode not supported at speed %d\n", val); return -EINVAL; } /* * The 'topology' is not a configurable parameter if : * - persistent topology enabled * - ASIC_GEN_NUM >= 0xC, with no private loop support */ sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if ((phba->hba_flag & HBA_PERSISTENT_TOPO || (!phba->sli4_hba.pc_sli4_params.pls && (sli_family == LPFC_SLI_INTF_FAMILY_G6 || if_type == LPFC_SLI_INTF_IF_TYPE_6))) && val == 4) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3114 Loop mode not supported\n"); return -EINVAL; } phba->cfg_topology = val; if (nolip) return strlen(buf); lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3054 lpfc_topology changed from %d to %d\n", prev_val, val); if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4) phba->fc_topology_changed = 1; err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_topology = prev_val; return -EINVAL; } else return strlen(buf); } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0467 lpfc_topology attribute cannot be set to %d, " "allowed range is [0, 6]\n", phba->brd_no, val); return -EINVAL; } lpfc_param_show(topology) static DEVICE_ATTR_RW(lpfc_topology); /** * lpfc_static_vport_show: Read callback function for * lpfc_static_vport sysfs file. * @dev: Pointer to class device object. * @attr: device attribute structure. * @buf: Data buffer. * * This function is the read call back function for * lpfc_static_vport sysfs file. The lpfc_static_vport * sysfs file report the mageability of the vport. **/ static ssize_t lpfc_static_vport_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; if (vport->vport_flag & STATIC_VPORT) sprintf(buf, "1\n"); else sprintf(buf, "0\n"); return strlen(buf); } /* * Sysfs attribute to control the statistical data collection. */ static DEVICE_ATTR_RO(lpfc_static_vport); /* # lpfc_link_speed: Link speed selection for initializing the Fibre Channel # connection. # Value range is [0,16]. Default value is 0. */ /** * lpfc_link_speed_store - Set the adapters link speed * @dev: Pointer to class device. * @attr: Unused. * @buf: Data buffer. * @count: Size of the data buffer. * * Description: * If val is in a valid range then set the adapter's link speed field and * issue a lip; if the lip fails reset the link speed to the old value. * * Notes: * If the value is not in range log a kernel error message and return an error. * * Returns: * zero if val is in range and lip okay. * non-zero return value from lpfc_issue_lip() * -EINVAL val out of range **/ static ssize_t lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int val = LPFC_USER_LINK_SPEED_AUTO; int nolip = 0; const char *val_buf = buf; int err; uint32_t prev_val, if_type; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 && phba->hba_flag & HBA_FORCED_LINK_SPEED) return -EPERM; if (!strncmp(buf, "nolip ", strlen("nolip "))) { nolip = 1; val_buf = &buf[strlen("nolip ")]; } if (!isdigit(val_buf[0])) return -EINVAL; if (sscanf(val_buf, "%i", &val) != 1) return -EINVAL; lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3055 lpfc_link_speed changed from %d to %d %s\n", phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)"); if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) || ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) || ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) || ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2879 lpfc_link_speed attribute cannot be set " "to %d. Speed is not supported by this port.\n", val); return -EINVAL; } if (val >= LPFC_USER_LINK_SPEED_16G && phba->fc_topology == LPFC_TOPOLOGY_LOOP) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3112 lpfc_link_speed attribute cannot be set " "to %d. Speed is not supported in loop mode.\n", val); return -EINVAL; } switch (val) { case LPFC_USER_LINK_SPEED_AUTO: case LPFC_USER_LINK_SPEED_1G: case LPFC_USER_LINK_SPEED_2G: case LPFC_USER_LINK_SPEED_4G: case LPFC_USER_LINK_SPEED_8G: case LPFC_USER_LINK_SPEED_16G: case LPFC_USER_LINK_SPEED_32G: case LPFC_USER_LINK_SPEED_64G: prev_val = phba->cfg_link_speed; phba->cfg_link_speed = val; if (nolip) return strlen(buf); err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_link_speed = prev_val; return -EINVAL; } return strlen(buf); default: break; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0469 lpfc_link_speed attribute cannot be set to %d, " "allowed values are [%s]\n", val, LPFC_LINK_SPEED_STRING); return -EINVAL; } static int lpfc_link_speed = 0; module_param(lpfc_link_speed, int, S_IRUGO); MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); lpfc_param_show(link_speed) /** * lpfc_link_speed_init - Set the adapters link speed * @phba: lpfc_hba pointer. * @val: link speed value. * * Description: * If val is in a valid range then set the adapter's link speed field. * * Notes: * If the value is not in range log a kernel error message, clear the link * speed and return an error. * * Returns: * zero if val saved. * -EINVAL val out of range **/ static int lpfc_link_speed_init(struct lpfc_hba *phba, int val) { if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3111 lpfc_link_speed of %d cannot " "support loop mode, setting topology to default.\n", val); phba->cfg_topology = 0; } switch (val) { case LPFC_USER_LINK_SPEED_AUTO: case LPFC_USER_LINK_SPEED_1G: case LPFC_USER_LINK_SPEED_2G: case LPFC_USER_LINK_SPEED_4G: case LPFC_USER_LINK_SPEED_8G: case LPFC_USER_LINK_SPEED_16G: case LPFC_USER_LINK_SPEED_32G: case LPFC_USER_LINK_SPEED_64G: phba->cfg_link_speed = val; return 0; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0405 lpfc_link_speed attribute cannot " "be set to %d, allowed values are " "["LPFC_LINK_SPEED_STRING"]\n", val); phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; return -EINVAL; } } static DEVICE_ATTR_RW(lpfc_link_speed); /* # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) # 1 = aer supported and enabled (default) # PCIe error reporting is always enabled by the PCI core, so this always # shows 1. # # N.B. Parts of LPFC_ATTR open-coded since some of the underlying # infrastructure (phba->cfg_aer_support) is gone. */ static uint lpfc_aer_support = 1; module_param(lpfc_aer_support, uint, S_IRUGO); MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support"); static ssize_t lpfc_aer_support_show(struct device *dev, struct device_attribute *attr, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", lpfc_aer_support); } /** * lpfc_aer_support_store - Set the adapter for aer support * * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing enable or disable aer flag. * @count: unused variable. * * Description: * PCIe error reporting is enabled by the PCI core, so drivers don't need * to do anything. Retain this interface for backwards compatibility, * but do nothing. * * Returns: * length of the buf on success * -EINVAL if val out of range **/ static ssize_t lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int val = 0; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; dev_info_once(dev, "PCIe error reporting automatically enabled by the PCI core; sysfs write ignored\n"); return strlen(buf); } static DEVICE_ATTR_RW(lpfc_aer_support); /** * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing flag 1 for aer cleanup state. * @count: unused variable. * * Description: * If the @buf contains 1, invokes the kernel AER helper routine * pci_aer_clear_nonfatal_status() to clean up the uncorrectable * error status register. * * Notes: * * Returns: * -EINVAL if the buf does not contain 1 * -EPERM if the OS cannot clear AER error status, i.e., when platform * firmware owns the AER Capability **/ static ssize_t lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int val, rc = -1; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; if (val != 1) return -EINVAL; rc = pci_aer_clear_nonfatal_status(phba->pcidev); if (rc == 0) return strlen(buf); else return -EPERM; } static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, lpfc_aer_cleanup_state); /** * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions * * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing the string the number of vfs to be enabled. * @count: unused variable. * * Description: * When this api is called either through user sysfs, the driver shall * try to enable or disable SR-IOV virtual functions according to the * following: * * If zero virtual function has been enabled to the physical function, * the driver shall invoke the pci enable virtual function api trying * to enable the virtual functions. If the nr_vfn provided is greater * than the maximum supported, the maximum virtual function number will * be used for invoking the api; otherwise, the nr_vfn provided shall * be used for invoking the api. If the api call returned success, the * actual number of virtual functions enabled will be set to the driver * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver * cfg_sriov_nr_virtfn remains zero. * * If none-zero virtual functions have already been enabled to the * physical function, as reflected by the driver's cfg_sriov_nr_virtfn, * -EINVAL will be returned and the driver does nothing; * * If the nr_vfn provided is zero and none-zero virtual functions have * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the * disabling virtual function api shall be invoded to disable all the * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to * zero. Otherwise, if zero virtual function has been enabled, do * nothing. * * Returns: * length of the buf on success if val is in range the intended mode * is supported. * -EINVAL if val out of range or intended mode is not supported. **/ static ssize_t lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; struct pci_dev *pdev = phba->pcidev; int val = 0, rc = -EINVAL; /* Sanity check on user data */ if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; if (val < 0) return -EINVAL; /* Request disabling virtual functions */ if (val == 0) { if (phba->cfg_sriov_nr_virtfn > 0) { pci_disable_sriov(pdev); phba->cfg_sriov_nr_virtfn = 0; } return strlen(buf); } /* Request enabling virtual functions */ if (phba->cfg_sriov_nr_virtfn > 0) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3018 There are %d virtual functions " "enabled on physical function.\n", phba->cfg_sriov_nr_virtfn); return -EEXIST; } if (val <= LPFC_MAX_VFN_PER_PFN) phba->cfg_sriov_nr_virtfn = val; else { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3019 Enabling %d virtual functions is not " "allowed.\n", val); return -EINVAL; } rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn); if (rc) { phba->cfg_sriov_nr_virtfn = 0; rc = -EPERM; } else rc = strlen(buf); return rc; } LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN, "Enable PCIe device SR-IOV virtual fn"); lpfc_param_show(sriov_nr_virtfn) static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn); /** * lpfc_request_firmware_upgrade_store - Request for Linux generic firmware upgrade * * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: containing the string the number of vfs to be enabled. * @count: unused variable. * * Description: * * Returns: * length of the buf on success if val is in range the intended mode * is supported. * -EINVAL if val out of range or intended mode is not supported. **/ static ssize_t lpfc_request_firmware_upgrade_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; int val = 0, rc; /* Sanity check on user data */ if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; if (val != 1) return -EINVAL; rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE); if (rc) rc = -EPERM; else rc = strlen(buf); return rc; } static int lpfc_req_fw_upgrade; module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade"); lpfc_param_show(request_firmware_upgrade) /** * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade * @phba: lpfc_hba pointer. * @val: 0 or 1. * * Description: * Set the initial Linux generic firmware upgrade enable or disable flag. * * Returns: * zero if val saved. * -EINVAL val out of range **/ static int lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val) { if (val >= 0 && val <= 1) { phba->cfg_request_firmware_upgrade = val; return 0; } return -EINVAL; } static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR, lpfc_request_firmware_upgrade_show, lpfc_request_firmware_upgrade_store); /** * lpfc_force_rscn_store * * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: unused string * @count: unused variable. * * Description: * Force the switch to send a RSCN to all other NPorts in our zone * If we are direct connect pt2pt, build the RSCN command ourself * and send to the other NPort. Not supported for private loop. * * Returns: * 0 - on success * -EIO - if command is not sent **/ static ssize_t lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; int i; i = lpfc_issue_els_rscn(vport, 0); if (i) return -EIO; return strlen(buf); } /* * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts * connected to the HBA. * * Value range is any ascii value */ static int lpfc_force_rscn; module_param(lpfc_force_rscn, int, 0644); MODULE_PARM_DESC(lpfc_force_rscn, "Force an RSCN to be sent to all remote NPorts"); lpfc_param_show(force_rscn) /** * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts * @phba: lpfc_hba pointer. * @val: unused value. * * Returns: * zero if val saved. **/ static int lpfc_force_rscn_init(struct lpfc_hba *phba, int val) { return 0; } static DEVICE_ATTR_RW(lpfc_force_rscn); /** * lpfc_fcp_imax_store * * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: string with the number of fast-path FCP interrupts per second. * @count: unused variable. * * Description: * If val is in a valid range [636,651042], then set the adapter's * maximum number of fast-path FCP interrupts per second. * * Returns: * length of the buf on success if val is in range the intended mode * is supported. * -EINVAL if val out of range or intended mode is not supported. **/ static ssize_t lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_eq_intr_info *eqi; uint32_t usdelay; int val = 0, i; /* fcp_imax is only valid for SLI4 */ if (phba->sli_rev != LPFC_SLI_REV4) return -EINVAL; /* Sanity check on user data */ if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &val) != 1) return -EINVAL; /* * Value range for the HBA is [5000,5000000] * The value for each EQ depends on how many EQs are configured. * Allow value == 0 */ if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)) return -EINVAL; phba->cfg_auto_imax = (val) ? 0 : 1; if (phba->cfg_fcp_imax && !val) { queue_delayed_work(phba->wq, &phba->eq_delay_work, msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); for_each_present_cpu(i) { eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); eqi->icnt = 0; } } phba->cfg_fcp_imax = (uint32_t)val; if (phba->cfg_fcp_imax) usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; else usdelay = 0; for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT) lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT, usdelay); return strlen(buf); } /* # lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second # for the HBA. # # Value range is [5,000 to 5,000,000]. Default value is 50,000. */ static int lpfc_fcp_imax = LPFC_DEF_IMAX; module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(lpfc_fcp_imax, "Set the maximum number of FCP interrupts per second per HBA"); lpfc_param_show(fcp_imax) /** * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable * @phba: lpfc_hba pointer. * @val: link speed value. * * Description: * If val is in a valid range [636,651042], then initialize the adapter's * maximum number of fast-path FCP interrupts per second. * * Returns: * zero if val saved. * -EINVAL val out of range **/ static int lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) { if (phba->sli_rev != LPFC_SLI_REV4) { phba->cfg_fcp_imax = 0; return 0; } if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) || (val == 0)) { phba->cfg_fcp_imax = val; return 0; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3016 lpfc_fcp_imax: %d out of range, using default\n", val); phba->cfg_fcp_imax = LPFC_DEF_IMAX; return 0; } static DEVICE_ATTR_RW(lpfc_fcp_imax); /** * lpfc_cq_max_proc_limit_store * * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: string with the cq max processing limit of cqes * @count: unused variable. * * Description: * If val is in a valid range, then set value on each cq * * Returns: * The length of the buf: if successful * -ERANGE: if val is not in the valid range * -EINVAL: if bad value format or intended mode is not supported. **/ static ssize_t lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_queue *eq, *cq; unsigned long val; int i; /* cq_max_proc_limit is only valid for SLI4 */ if (phba->sli_rev != LPFC_SLI_REV4) return -EINVAL; /* Sanity check on user data */ if (!isdigit(buf[0])) return -EINVAL; if (kstrtoul(buf, 0, &val)) return -EINVAL; if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT) return -ERANGE; phba->cfg_cq_max_proc_limit = (uint32_t)val; /* set the values on the cq's */ for (i = 0; i < phba->cfg_irq_chann; i++) { /* Get the EQ corresponding to the IRQ vector */ eq = phba->sli4_hba.hba_eq_hdl[i].eq; if (!eq) continue; list_for_each_entry(cq, &eq->child_list, list) cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); } return strlen(buf); } /* * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an * itteration of CQ processing. */ static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT; module_param(lpfc_cq_max_proc_limit, int, 0644); MODULE_PARM_DESC(lpfc_cq_max_proc_limit, "Set the maximum number CQEs processed in an iteration of " "CQ processing"); lpfc_param_show(cq_max_proc_limit) /* * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a * single handler call which should request a polled completion rather * than re-enabling interrupts. */ LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL, LPFC_CQ_MIN_THRESHOLD_TO_POLL, LPFC_CQ_MAX_THRESHOLD_TO_POLL, "CQE Processing Threshold to enable Polling"); /** * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit * @phba: lpfc_hba pointer. * @val: entry limit * * Description: * If val is in a valid range, then initialize the adapter's maximum * value. * * Returns: * Always returns 0 for success, even if value not always set to * requested value. If value out of range or not supported, will fall * back to default. **/ static int lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val) { phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT; if (phba->sli_rev != LPFC_SLI_REV4) return 0; if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) { phba->cfg_cq_max_proc_limit = val; return 0; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0371 lpfc_cq_max_proc_limit: %d out of range, using " "default\n", phba->cfg_cq_max_proc_limit); return 0; } static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit); /** * lpfc_fcp_cpu_map_show - Display current driver CPU affinity * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains text describing the state of the link. * * Returns: size of formatted string. **/ static ssize_t lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_vector_map_info *cpup; int len = 0; if ((phba->sli_rev != LPFC_SLI_REV4) || (phba->intr_type != MSIX)) return len; switch (phba->cfg_fcp_cpu_map) { case 0: len += scnprintf(buf + len, PAGE_SIZE-len, "fcp_cpu_map: No mapping (%d)\n", phba->cfg_fcp_cpu_map); return len; case 1: len += scnprintf(buf + len, PAGE_SIZE-len, "fcp_cpu_map: HBA centric mapping (%d): " "%d of %d CPUs online from %d possible CPUs\n", phba->cfg_fcp_cpu_map, num_online_cpus(), num_present_cpus(), phba->sli4_hba.num_possible_cpu); break; } while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_possible_cpu) { cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; if (!cpu_present(phba->sli4_hba.curr_disp_cpu)) len += scnprintf(buf + len, PAGE_SIZE - len, "CPU %02d not present\n", phba->sli4_hba.curr_disp_cpu); else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) len += scnprintf( buf + len, PAGE_SIZE - len, "CPU %02d hdwq None " "physid %d coreid %d ht %d ua %d\n", phba->sli4_hba.curr_disp_cpu, cpup->phys_id, cpup->core_id, (cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_UNASSIGN)); else len += scnprintf( buf + len, PAGE_SIZE - len, "CPU %02d EQ None hdwq %04d " "physid %d coreid %d ht %d ua %d\n", phba->sli4_hba.curr_disp_cpu, cpup->hdwq, cpup->phys_id, cpup->core_id, (cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_UNASSIGN)); } else { if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) len += scnprintf( buf + len, PAGE_SIZE - len, "CPU %02d hdwq None " "physid %d coreid %d ht %d ua %d IRQ %d\n", phba->sli4_hba.curr_disp_cpu, cpup->phys_id, cpup->core_id, (cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_UNASSIGN), lpfc_get_irq(cpup->eq)); else len += scnprintf( buf + len, PAGE_SIZE - len, "CPU %02d EQ %04d hdwq %04d " "physid %d coreid %d ht %d ua %d IRQ %d\n", phba->sli4_hba.curr_disp_cpu, cpup->eq, cpup->hdwq, cpup->phys_id, cpup->core_id, (cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_UNASSIGN), lpfc_get_irq(cpup->eq)); } phba->sli4_hba.curr_disp_cpu++; /* display max number of CPUs keeping some margin */ if (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_possible_cpu && (len >= (PAGE_SIZE - 64))) { len += scnprintf(buf + len, PAGE_SIZE - len, "more...\n"); break; } } if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu) phba->sli4_hba.curr_disp_cpu = 0; return len; } /** * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. * @buf: one or more lpfc_polling_flags values. * @count: not used. * * Returns: * -EINVAL - Not implemented yet. **/ static ssize_t lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return -EINVAL; } /* # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors # for the HBA. # # Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1). # 0 - Do not affinitze IRQ vectors # 1 - Affintize HBA vectors with respect to each HBA # (start with CPU0 for each HBA) # This also defines how Hardware Queues are mapped to specific CPUs. */ static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP; module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(lpfc_fcp_cpu_map, "Defines how to map CPUs to IRQ vectors per HBA"); /** * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable * @phba: lpfc_hba pointer. * @val: link speed value. * * Description: * If val is in a valid range [0-2], then affinitze the adapter's * MSIX vectors. * * Returns: * zero if val saved. * -EINVAL val out of range **/ static int lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) { if (phba->sli_rev != LPFC_SLI_REV4) { phba->cfg_fcp_cpu_map = 0; return 0; } if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) { phba->cfg_fcp_cpu_map = val; return 0; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3326 lpfc_fcp_cpu_map: %d out of range, using " "default\n", val); phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP; return 0; } static DEVICE_ATTR_RW(lpfc_fcp_cpu_map); /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. # Value range is [2,3]. Default value is 3. */ LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3, "Select Fibre Channel class of service for FCP sequences"); /* # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range # is [0,1]. Default value is 1. */ LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1, "Use ADISC on rediscovery to authenticate FCP devices"); /* # lpfc_first_burst_size: First burst size to use on the NPorts # that support first burst. # Value range is [0,65536]. Default value is 0. */ LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536, "First burst size for Targets that support first burst"); /* * lpfc_nvmet_fb_size: NVME Target mode supported first burst size. * When the driver is configured as an NVME target, this value is * communicated to the NVME initiator in the PRLI response. It is * used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support * parameters are set and the target is sending the PRLI RSP. * Parameter supported on physical port only - no NPIV support. * Value range is [0,65536]. Default value is 0. */ LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536, "NVME Target mode first burst size in 512B increments."); /* * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions. * For the Initiator (I), enabling this parameter means that an NVMET * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be * processed by the initiator for subsequent NVME FCP IO. * Currently, this feature is not supported on the NVME target * Value range is [0,1]. Default value is 0 (disabled). */ LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1, "Enable First Burst feature for NVME Initiator."); /* # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue # depth. Default value is 0. When the value of this parameter is zero the # SCSI command completion time is not used for controlling I/O queue depth. When # the parameter is set to a non-zero value, the I/O queue depth is controlled # to limit the I/O completion time to the parameter value. # The value is set in milliseconds. */ LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000, "Use command completion time to control queue depth"); lpfc_vport_param_show(max_scsicmpl_time); static int lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp, *next_ndlp; if (val == vport->cfg_max_scsicmpl_time) return 0; if ((val < 0) || (val > 60000)) return -EINVAL; vport->cfg_max_scsicmpl_time = val; spin_lock_irq(shost->host_lock); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; } spin_unlock_irq(shost->host_lock); return 0; } lpfc_vport_param_store(max_scsicmpl_time); static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time); /* # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value # range is [0,1]. Default value is 0. */ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); /* # lpfc_xri_rebalancing: enable or disable XRI rebalancing feature # range is [0,1]. Default value is 1. */ LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing"); /* * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds * range is [0,1]. Default value is 0. * For [0], FCP commands are issued to Work Queues based on upper layer * hardware queue index. * For [1], FCP commands are issued to a Work Queue associated with the * current CPU. * * LPFC_FCP_SCHED_BY_HDWQ == 0 * LPFC_FCP_SCHED_BY_CPU == 1 * * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu * affinity for FCP/NVME I/Os through Work Queues associated with the current * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os * through WQs will be used. */ LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU, LPFC_FCP_SCHED_BY_HDWQ, LPFC_FCP_SCHED_BY_CPU, "Determine scheduling algorithm for " "issuing commands [0] - Hardware Queue, [1] - Current CPU"); /* * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN * range is [0,1]. Default value is 0. * For [0], GID_FT is used for NameServer queries after RSCN (default) * For [1], GID_PT is used for NameServer queries after RSCN * */ LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT, "Determine algorithm NameServer queries after RSCN " "[0] - GID_FT, [1] - GID_PT"); /* # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior # range is [0,1]. Default value is 0. # For [0], bus reset issues target reset to ALL devices # For [1], bus reset issues target reset to non-FCP2 devices */ LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for " "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset"); /* # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing # cr_delay (msec) or cr_count outstanding commands. cr_delay can take # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay # is 0. Default value of cr_count is 1. The cr_count feature is disabled if # cr_delay is set to 0. */ LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an " "interrupt response is generated"); LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an " "interrupt response is generated"); /* # lpfc_multi_ring_support: Determines how many rings to spread available # cmd/rsp IOCB entries across. # Value range is [1,2]. Default value is 1. */ LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary " "SLI rings to spread IOCB entries across"); /* # lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this # identifies what rctl value to configure the additional ring for. # Value range is [1,0xff]. Default value is 4 (Unsolicated Data). */ LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1, 255, "Identifies RCTL for additional ring configuration"); /* # lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this # identifies what type value to configure the additional ring for. # Value range is [1,0xff]. Default value is 5 (LLC/SNAP). */ LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1, 255, "Identifies TYPE for additional ring configuration"); /* # lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN # 0 = SmartSAN functionality disabled (default) # 1 = SmartSAN functionality enabled # This parameter will override the value of lpfc_fdmi_on module parameter. # Value range is [0,1]. Default value is 0. */ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); /* # lpfc_fdmi_on: Controls FDMI support. # 0 No FDMI support # 1 Traditional FDMI support (default) # Traditional FDMI support means the driver will assume FDMI-2 support; # however, if that fails, it will fallback to FDMI-1. # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of # lpfc_fdmi_on. # Value range [0,1]. Default value is 1. */ LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support"); /* # Specifies the maximum number of ELS cmds we can have outstanding (for # discovery). Value range is [1,64]. Default value = 32. */ LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " "during discovery"); /* # lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that # will be scanned by the SCSI midlayer when sequential scanning is # used; and is also the highest LUN ID allowed when the SCSI midlayer # parses REPORT_LUN responses. The lpfc driver has no LUN count or # LUN ID limit, but the SCSI midlayer requires this field for the uses # above. The lpfc driver limits the default value to 255 for two reasons. # As it bounds the sequential scan loop, scanning for thousands of luns # on a target can take minutes of wall clock time. Additionally, # there are FC targets, such as JBODs, that only recognize 8-bits of # LUN ID. When they receive a value greater than 8 bits, they chop off # the high order bits. In other words, they see LUN IDs 0, 256, 512, # and so on all as LUN ID 0. This causes the linux kernel, which sees # valid responses at each of the LUN IDs, to believe there are multiple # devices present, when in fact, there is only 1. # A customer that is aware of their target behaviors, and the results as # indicated above, is welcome to increase the lpfc_max_luns value. # As mentioned, this value is not used by the lpfc driver, only the # SCSI midlayer. # Value range is [0,65535]. Default value is 255. # NOTE: The SCSI layer might probe all allowed LUN on some old targets. */ LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID"); /* # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. # Value range is [1,255], default value is 10. */ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, "Milliseconds driver will wait between polling FCP ring"); /* # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands # to complete in seconds. Value range is [5,180], default value is 60. */ LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180, "Maximum time to wait for task management commands to complete"); /* # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that # support this feature # 0 = MSI disabled # 1 = MSI enabled # 2 = MSI-X enabled (default) # Value range is [0,2]. Default value is 2. */ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); /* * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs * * 0 = NVME OAS disabled * 1 = NVME OAS enabled * * Value range is [0,1]. Default value is 0. */ LPFC_ATTR_RW(nvme_oas, 0, 0, 1, "Use OAS bit on NVME IOs"); /* * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs * * 0 = Put NVME Command in SGL * 1 = Embed NVME Command in WQE (unless G7) * 2 = Embed NVME Command in WQE (force) * * Value range is [0,2]. Default value is 1. */ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, "Embed NVME Command in WQE"); /* * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues * the driver will advertise it supports to the SCSI layer. * * 0 = Set nr_hw_queues by the number of CPUs or HW queues. * 1,256 = Manually specify nr_hw_queue value to be advertised, * * Value range is [0,256]. Default value is 8. */ LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, "Set the number of SCSI Queues advertised"); /* * lpfc_hdw_queue: Set the number of Hardware Queues the driver * will advertise it supports to the NVME and SCSI layers. This also * will map to the number of CQ/WQ pairs the driver will create. * * The NVME Layer will try to create this many, plus 1 administrative * hardware queue. The administrative queue will always map to WQ 0 * A hardware IO queue maps (qidx) to a specific driver CQ/WQ. * * 0 = Configure the number of hdw queues to the number of active CPUs. * 1,256 = Manually specify how many hdw queues to use. * * Value range is [0,256]. Default value is 0. */ LPFC_ATTR_R(hdw_queue, LPFC_HBA_HDWQ_DEF, LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, "Set the number of I/O Hardware Queues"); #if IS_ENABLED(CONFIG_X86) /** * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on * irq_chann_mode * @phba: Pointer to HBA context object. **/ static void lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba) { unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE; const struct cpumask *sibling_mask; struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask; cpumask_clear(aff_mask); if (phba->irq_chann_mode == NUMA_MODE) { /* Check if we're a NUMA architecture */ numa_node = dev_to_node(&phba->pcidev->dev); if (numa_node == NUMA_NO_NODE) { phba->irq_chann_mode = NORMAL_MODE; return; } } for_each_possible_cpu(cpu) { switch (phba->irq_chann_mode) { case NUMA_MODE: if (cpu_to_node(cpu) == numa_node) cpumask_set_cpu(cpu, aff_mask); break; case NHT_MODE: sibling_mask = topology_sibling_cpumask(cpu); first_cpu = cpumask_first(sibling_mask); if (first_cpu < nr_cpu_ids) cpumask_set_cpu(first_cpu, aff_mask); break; default: break; } } } #endif static void lpfc_assign_default_irq_chann(struct lpfc_hba *phba) { #if IS_ENABLED(CONFIG_X86) switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: /* If AMD architecture, then default is NUMA_MODE */ phba->irq_chann_mode = NUMA_MODE; break; case X86_VENDOR_INTEL: /* If Intel architecture, then default is no hyperthread mode */ phba->irq_chann_mode = NHT_MODE; break; default: phba->irq_chann_mode = NORMAL_MODE; break; } lpfc_cpumask_irq_mode_init(phba); #else phba->irq_chann_mode = NORMAL_MODE; #endif } /* * lpfc_irq_chann: Set the number of IRQ vectors that are available * for Hardware Queues to utilize. This also will map to the number * of EQ / MSI-X vectors the driver will create. This should never be * more than the number of Hardware Queues * * 0 = Configure number of IRQ Channels to: * if AMD architecture, number of CPUs on HBA's NUMA node * if Intel architecture, number of physical CPUs. * otherwise, number of active CPUs. * [1,256] = Manually specify how many IRQ Channels to use. * * Value range is [0,256]. Default value is [0]. */ static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF; module_param(lpfc_irq_chann, uint, 0444); MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate"); /* lpfc_irq_chann_init - Set the hba irq_chann initial value * @phba: lpfc_hba pointer. * @val: contains the initial value * * Description: * Validates the initial value is within range and assigns it to the * adapter. If not in range, an error message is posted and the * default value is assigned. * * Returns: * zero if value is in range and is set * -EINVAL if value was out of range **/ static int lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val) { const struct cpumask *aff_mask; if (phba->cfg_use_msi != 2) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8532 use_msi = %u ignoring cfg_irq_numa\n", phba->cfg_use_msi); phba->irq_chann_mode = NORMAL_MODE; phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; return 0; } /* Check if default setting was passed */ if (val == LPFC_IRQ_CHANN_DEF && phba->cfg_hdw_queue == LPFC_HBA_HDWQ_DEF && phba->sli_rev == LPFC_SLI_REV4) lpfc_assign_default_irq_chann(phba); if (phba->irq_chann_mode != NORMAL_MODE) { aff_mask = &phba->sli4_hba.irq_aff_mask; if (cpumask_empty(aff_mask)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8533 Could not identify CPUS for " "mode %d, ignoring\n", phba->irq_chann_mode); phba->irq_chann_mode = NORMAL_MODE; phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; } else { phba->cfg_irq_chann = cpumask_weight(aff_mask); /* If no hyperthread mode, then set hdwq count to * aff_mask weight as well */ if (phba->irq_chann_mode == NHT_MODE) phba->cfg_hdw_queue = phba->cfg_irq_chann; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8543 lpfc_irq_chann set to %u " "(mode: %d)\n", phba->cfg_irq_chann, phba->irq_chann_mode); } } else { if (val > LPFC_IRQ_CHANN_MAX) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8545 lpfc_irq_chann attribute cannot " "be set to %u, allowed range is " "[%u,%u]\n", val, LPFC_IRQ_CHANN_MIN, LPFC_IRQ_CHANN_MAX); phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; return -EINVAL; } if (phba->sli_rev == LPFC_SLI_REV4) { phba->cfg_irq_chann = val; } else { phba->cfg_irq_chann = 2; phba->cfg_hdw_queue = 1; } } return 0; } /** * lpfc_irq_chann_show - Display value of irq_chann * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains a string with the list sizes * * Returns: size of formatted string. **/ static ssize_t lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann); } static DEVICE_ATTR_RO(lpfc_irq_chann); /* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. # 0 = HBA resets disabled # 1 = HBA resets enabled (default) # 2 = HBA reset via PCI bus reset enabled # Value range is [0,2]. Default value is 1. */ LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver."); /* # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer.. # 0 = HBA Heartbeat disabled # 1 = HBA Heartbeat enabled (default) # Value range is [0,1]. Default value is 1. */ LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); /* # lpfc_EnableXLane: Enable Express Lane Feature # 0x0 Express Lane Feature disabled # 0x1 Express Lane Feature enabled # Value range is [0,1]. Default value is 0. */ LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature."); /* # lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature # 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits) # Value range is [0x0,0x7f]. Default value is 0 */ LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); /* # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) # 0 = BlockGuard disabled (default) # 1 = BlockGuard enabled # Value range is [0,1]. Default value is 0. */ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); /* # lpfc_prot_mask: # - Bit mask of host protection capabilities used to register with the # SCSI mid-layer # - Only meaningful if BG is turned on (lpfc_enable_bg=1). # - Allows you to ultimately specify which profiles to use # - Default will result in registering capabilities for all profiles. # - SHOST_DIF_TYPE1_PROTECTION 1 # HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection # - SHOST_DIX_TYPE0_PROTECTION 8 # HBA supports DIX Type 0: Host to HBA protection only # - SHOST_DIX_TYPE1_PROTECTION 16 # HBA supports DIX Type 1: Host to HBA Type 1 protection # */ LPFC_ATTR(prot_mask, (SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION | SHOST_DIX_TYPE1_PROTECTION), 0, (SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION | SHOST_DIX_TYPE1_PROTECTION), "T10-DIF host protection capabilities mask"); /* # lpfc_prot_guard: # - Bit mask of protection guard types to register with the SCSI mid-layer # - Guard types are currently either 1) T10-DIF CRC 2) IP checksum # - Allows you to ultimately specify which profiles to use # - Default will result in registering capabilities for all guard types # */ LPFC_ATTR(prot_guard, SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP, "T10-DIF host protection guard type"); /* * Delay initial NPort discovery when Clean Address bit is cleared in * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed. * This parameter can have value 0 or 1. * When this parameter is set to 0, no delay is added to the initial * discovery. * When this parameter is set to non-zero value, initial Nport discovery is * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC * accept and FCID/Fabric name/Fabric portname is changed. * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion * when Clean Address bit is cleared in FLOGI/FDISC * accept and FCID/Fabric name/Fabric portname is changed. * Default value is 0. */ LPFC_ATTR(delay_discovery, 0, 0, 1, "Delay NPort discovery when Clean Address bit is cleared."); /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count * This value can be set to values between 64 and 4096. The default value * is 64, but may be increased to allow for larger Max I/O sizes. The scsi * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE). * Because of the additional overhead involved in setting up T10-DIF, * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 * and will be limited to 512 if BlockGuard is enabled under SLI3. */ static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT; module_param(lpfc_sg_seg_cnt, uint, 0444); MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count"); /** * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes * configured for the adapter * @dev: class converted to a Scsi_host structure. * @attr: device attribute, not used. * @buf: on return contains a string with the list sizes * * Returns: size of formatted string. **/ static ssize_t lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; int len; len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n", phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt); len += scnprintf(buf + len, PAGE_SIZE - len, "Cfg: %d SCSI: %d NVME: %d\n", phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt, phba->cfg_nvme_seg_cnt); return len; } static DEVICE_ATTR_RO(lpfc_sg_seg_cnt); /** * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value * @phba: lpfc_hba pointer. * @val: contains the initial value * * Description: * Validates the initial value is within range and assigns it to the * adapter. If not in range, an error message is posted and the * default value is assigned. * * Returns: * zero if value is in range and is set * -EINVAL if value was out of range **/ static int lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val) { if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) { phba->cfg_sg_seg_cnt = val; return 0; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0409 lpfc_sg_seg_cnt attribute cannot be set to %d, " "allowed range is [%d, %d]\n", val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT); phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT; return -EINVAL; } /* * lpfc_enable_mds_diags: Enable MDS Diagnostics * 0 = MDS Diagnostics disabled (default) * 1 = MDS Diagnostics enabled * Value range is [0,1]. Default value is 0. */ LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); /* * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size * 0 = Disable firmware logging (default) * [1-4] = Multiple of 1/4th Mb of host memory for FW logging * Value range [0..4]. Default value is 0 */ LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging"); lpfc_param_show(ras_fwlog_buffsize); static ssize_t lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val) { int ret = 0; enum ras_state state; if (!lpfc_rangecheck(val, 0, 4)) return -EINVAL; if (phba->cfg_ras_fwlog_buffsize == val) return 0; if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn)) return -EINVAL; spin_lock_irq(&phba->hbalock); state = phba->ras_fwlog.state; spin_unlock_irq(&phba->hbalock); if (state == REG_INPROGRESS) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging " "registration is in progress\n"); return -EBUSY; } /* For disable logging: stop the logs and free the DMA. * For ras_fwlog_buffsize size change we still need to free and * reallocate the DMA in lpfc_sli4_ras_fwlog_init. */ phba->cfg_ras_fwlog_buffsize = val; if (state == ACTIVE) { lpfc_ras_stop_fwlog(phba); lpfc_sli4_ras_dma_free(phba); } lpfc_sli4_ras_init(phba); if (phba->ras_fwlog.ras_enabled) ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, LPFC_RAS_ENABLE_LOGGING); return ret; } lpfc_param_store(ras_fwlog_buffsize); static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize); /* * lpfc_ras_fwlog_level: Firmware logging verbosity level * Valid only if firmware logging is enabled * 0(Least Verbosity) 4 (most verbosity) * Value range is [0..4]. Default value is 0 */ LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level"); /* * lpfc_ras_fwlog_func: Firmware logging enabled on function number * Default function which has RAS support : 0 * Value Range is [0..7]. * FW logging is a global action and enablement is via a specific * port. */ LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function"); /* * lpfc_enable_bbcr: Enable BB Credit Recovery * 0 = BB Credit Recovery disabled * 1 = BB Credit Recovery enabled (default) * Value range is [0,1]. Default value is 1. */ LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery"); /* Signaling module parameters */ int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ module_param(lpfc_fabric_cgn_frequency, int, 0444); MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq"); unsigned char lpfc_acqe_cgn_frequency = 10; /* 10 sec default */ module_param(lpfc_acqe_cgn_frequency, byte, 0444); MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq"); int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */ module_param(lpfc_use_cgn_signal, int, 0444); MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available"); /* * lpfc_enable_dpp: Enable DPP on G7 * 0 = DPP on G7 disabled * 1 = DPP on G7 enabled (default) * Value range is [0,1]. Default value is 1. */ LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push"); /* * lpfc_enable_mi: Enable FDMI MIB * 0 = disabled * 1 = enabled (default) * Value range is [0,1]. */ LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI"); /* * lpfc_max_vmid: Maximum number of VMs to be tagged. This is valid only if * either vmid_app_header or vmid_priority_tagging is enabled. * 4 - 255 = vmid support enabled for 4-255 VMs * Value range is [4,255]. */ LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID, "Maximum number of VMs supported"); /* * lpfc_vmid_inactivity_timeout: Inactivity timeout duration in hours * 0 = Timeout is disabled * Value range is [0,24]. */ LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24, "Inactivity timeout in hours"); /* * lpfc_vmid_app_header: Enable App Header VMID support * 0 = Support is disabled (default) * 1 = Support is enabled * Value range is [0,1]. */ LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE, "Enable App Header VMID support"); /* * lpfc_vmid_priority_tagging: Enable Priority Tagging VMID support * 0 = Support is disabled (default) * 1 = Allow supported targets only * 2 = Allow all targets * Value range is [0,2]. */ LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE, LPFC_VMID_PRIO_TAG_DISABLE, LPFC_VMID_PRIO_TAG_ALL_TARGETS, "Enable Priority Tagging VMID support"); static struct attribute *lpfc_hba_attrs[] = { &dev_attr_nvme_info.attr, &dev_attr_scsi_stat.attr, &dev_attr_bg_info.attr, &dev_attr_bg_guard_err.attr, &dev_attr_bg_apptag_err.attr, &dev_attr_bg_reftag_err.attr, &dev_attr_info.attr, &dev_attr_serialnum.attr, &dev_attr_modeldesc.attr, &dev_attr_modelname.attr, &dev_attr_programtype.attr, &dev_attr_portnum.attr, &dev_attr_fwrev.attr, &dev_attr_hdw.attr, &dev_attr_option_rom_version.attr, &dev_attr_link_state.attr, &dev_attr_num_discovered_ports.attr, &dev_attr_lpfc_drvr_version.attr, &dev_attr_lpfc_enable_fip.attr, &dev_attr_lpfc_temp_sensor.attr, &dev_attr_lpfc_log_verbose.attr, &dev_attr_lpfc_lun_queue_depth.attr, &dev_attr_lpfc_tgt_queue_depth.attr, &dev_attr_lpfc_hba_queue_depth.attr, &dev_attr_lpfc_peer_port_login.attr, &dev_attr_lpfc_nodev_tmo.attr, &dev_attr_lpfc_devloss_tmo.attr, &dev_attr_lpfc_enable_fc4_type.attr, &dev_attr_lpfc_fcp_class.attr, &dev_attr_lpfc_use_adisc.attr, &dev_attr_lpfc_first_burst_size.attr, &dev_attr_lpfc_ack0.attr, &dev_attr_lpfc_xri_rebalancing.attr, &dev_attr_lpfc_topology.attr, &dev_attr_lpfc_scan_down.attr, &dev_attr_lpfc_link_speed.attr, &dev_attr_lpfc_fcp_io_sched.attr, &dev_attr_lpfc_ns_query.attr, &dev_attr_lpfc_fcp2_no_tgt_reset.attr, &dev_attr_lpfc_cr_delay.attr, &dev_attr_lpfc_cr_count.attr, &dev_attr_lpfc_multi_ring_support.attr, &dev_attr_lpfc_multi_ring_rctl.attr, &dev_attr_lpfc_multi_ring_type.attr, &dev_attr_lpfc_fdmi_on.attr, &dev_attr_lpfc_enable_SmartSAN.attr, &dev_attr_lpfc_max_luns.attr, &dev_attr_lpfc_enable_npiv.attr, &dev_attr_lpfc_fcf_failover_policy.attr, &dev_attr_lpfc_enable_rrq.attr, &dev_attr_lpfc_fcp_wait_abts_rsp.attr, &dev_attr_nport_evt_cnt.attr, &dev_attr_board_mode.attr, &dev_attr_lpfc_xcvr_data.attr, &dev_attr_max_vpi.attr, &dev_attr_used_vpi.attr, &dev_attr_max_rpi.attr, &dev_attr_used_rpi.attr, &dev_attr_max_xri.attr, &dev_attr_used_xri.attr, &dev_attr_npiv_info.attr, &dev_attr_issue_reset.attr, &dev_attr_lpfc_poll.attr, &dev_attr_lpfc_poll_tmo.attr, &dev_attr_lpfc_task_mgmt_tmo.attr, &dev_attr_lpfc_use_msi.attr, &dev_attr_lpfc_nvme_oas.attr, &dev_attr_lpfc_nvme_embed_cmd.attr, &dev_attr_lpfc_fcp_imax.attr, &dev_attr_lpfc_force_rscn.attr, &dev_attr_lpfc_cq_poll_threshold.attr, &dev_attr_lpfc_cq_max_proc_limit.attr, &dev_attr_lpfc_fcp_cpu_map.attr, &dev_attr_lpfc_fcp_mq_threshold.attr, &dev_attr_lpfc_hdw_queue.attr, &dev_attr_lpfc_irq_chann.attr, &dev_attr_lpfc_suppress_rsp.attr, &dev_attr_lpfc_nvmet_mrq.attr, &dev_attr_lpfc_nvmet_mrq_post.attr, &dev_attr_lpfc_nvme_enable_fb.attr, &dev_attr_lpfc_nvmet_fb_size.attr, &dev_attr_lpfc_enable_bg.attr, &dev_attr_lpfc_enable_hba_reset.attr, &dev_attr_lpfc_enable_hba_heartbeat.attr, &dev_attr_lpfc_EnableXLane.attr, &dev_attr_lpfc_XLanePriority.attr, &dev_attr_lpfc_xlane_lun.attr, &dev_attr_lpfc_xlane_tgt.attr, &dev_attr_lpfc_xlane_vpt.attr, &dev_attr_lpfc_xlane_lun_state.attr, &dev_attr_lpfc_xlane_lun_status.attr, &dev_attr_lpfc_xlane_priority.attr, &dev_attr_lpfc_sg_seg_cnt.attr, &dev_attr_lpfc_max_scsicmpl_time.attr, &dev_attr_lpfc_aer_support.attr, &dev_attr_lpfc_aer_state_cleanup.attr, &dev_attr_lpfc_sriov_nr_virtfn.attr, &dev_attr_lpfc_req_fw_upgrade.attr, &dev_attr_lpfc_suppress_link_up.attr, &dev_attr_iocb_hw.attr, &dev_attr_pls.attr, &dev_attr_pt.attr, &dev_attr_txq_hw.attr, &dev_attr_txcmplq_hw.attr, &dev_attr_lpfc_sriov_hw_max_virtfn.attr, &dev_attr_protocol.attr, &dev_attr_lpfc_xlane_supported.attr, &dev_attr_lpfc_enable_mds_diags.attr, &dev_attr_lpfc_ras_fwlog_buffsize.attr, &dev_attr_lpfc_ras_fwlog_level.attr, &dev_attr_lpfc_ras_fwlog_func.attr, &dev_attr_lpfc_enable_bbcr.attr, &dev_attr_lpfc_enable_dpp.attr, &dev_attr_lpfc_enable_mi.attr, &dev_attr_cmf_info.attr, &dev_attr_lpfc_max_vmid.attr, &dev_attr_lpfc_vmid_inactivity_timeout.attr, &dev_attr_lpfc_vmid_app_header.attr, &dev_attr_lpfc_vmid_priority_tagging.attr, NULL, }; static const struct attribute_group lpfc_hba_attr_group = { .attrs = lpfc_hba_attrs }; const struct attribute_group *lpfc_hba_groups[] = { &lpfc_hba_attr_group, NULL }; static struct attribute *lpfc_vport_attrs[] = { &dev_attr_info.attr, &dev_attr_link_state.attr, &dev_attr_num_discovered_ports.attr, &dev_attr_lpfc_drvr_version.attr, &dev_attr_lpfc_log_verbose.attr, &dev_attr_lpfc_lun_queue_depth.attr, &dev_attr_lpfc_tgt_queue_depth.attr, &dev_attr_lpfc_nodev_tmo.attr, &dev_attr_lpfc_devloss_tmo.attr, &dev_attr_lpfc_hba_queue_depth.attr, &dev_attr_lpfc_peer_port_login.attr, &dev_attr_lpfc_restrict_login.attr, &dev_attr_lpfc_fcp_class.attr, &dev_attr_lpfc_use_adisc.attr, &dev_attr_lpfc_first_burst_size.attr, &dev_attr_lpfc_max_luns.attr, &dev_attr_nport_evt_cnt.attr, &dev_attr_npiv_info.attr, &dev_attr_lpfc_enable_da_id.attr, &dev_attr_lpfc_max_scsicmpl_time.attr, &dev_attr_lpfc_static_vport.attr, &dev_attr_cmf_info.attr, NULL, }; static const struct attribute_group lpfc_vport_attr_group = { .attrs = lpfc_vport_attrs }; const struct attribute_group *lpfc_vport_groups[] = { &lpfc_vport_attr_group, NULL }; /** * sysfs_ctlreg_write - Write method for writing to ctlreg * @filp: open sysfs file * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. * @buf: contains the data to be written to the adapter IOREG space. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * * Description: * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. * Uses the adapter io control registers to send buf contents to the adapter. * * Returns: * -ERANGE off and count combo out of range * -EINVAL off, count or buff address invalid * -EPERM adapter is offline * value of count, buf contents written **/ static ssize_t sysfs_ctlreg_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { size_t buf_off; struct device *dev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->sli_rev >= LPFC_SLI_REV4) return -EPERM; if ((off + count) > FF_REG_AREA_SIZE) return -ERANGE; if (count <= LPFC_REG_WRITE_KEY_SIZE) return 0; if (off % 4 || count % 4 || (unsigned long)buf % 4) return -EINVAL; /* This is to protect HBA registers from accidental writes. */ if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE)) return -EINVAL; if (!(vport->fc_flag & FC_OFFLINE_MODE)) return -EPERM; spin_lock_irq(&phba->hbalock); for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE; buf_off += sizeof(uint32_t)) writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)), phba->ctrl_regs_memmap_p + off + buf_off); spin_unlock_irq(&phba->hbalock); return count; } /** * sysfs_ctlreg_read - Read method for reading from ctlreg * @filp: open sysfs file * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. * @buf: if successful contains the data from the adapter IOREG space. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * * Description: * Accessed via /sys/class/scsi_host/hostxxx/ctlreg. * Uses the adapter io control registers to read data into buf. * * Returns: * -ERANGE off and count combo out of range * -EINVAL off, count or buff address invalid * value of count, buf contents read **/ static ssize_t sysfs_ctlreg_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { size_t buf_off; uint32_t * tmp_ptr; struct device *dev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; if (phba->sli_rev >= LPFC_SLI_REV4) return -EPERM; if (off > FF_REG_AREA_SIZE) return -ERANGE; if ((off + count) > FF_REG_AREA_SIZE) count = FF_REG_AREA_SIZE - off; if (count == 0) return 0; if (off % 4 || count % 4 || (unsigned long)buf % 4) return -EINVAL; spin_lock_irq(&phba->hbalock); for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) { tmp_ptr = (uint32_t *)(buf + buf_off); *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off); } spin_unlock_irq(&phba->hbalock); return count; } static struct bin_attribute sysfs_ctlreg_attr = { .attr = { .name = "ctlreg", .mode = S_IRUSR | S_IWUSR, }, .size = 256, .read = sysfs_ctlreg_read, .write = sysfs_ctlreg_write, }; /** * sysfs_mbox_write - Write method for writing information via mbox * @filp: open sysfs file * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. * @buf: contains the data to be written to sysfs mbox. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * * Description: * Deprecated function. All mailbox access from user space is performed via the * bsg interface. * * Returns: * -EPERM operation not permitted **/ static ssize_t sysfs_mbox_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { return -EPERM; } /** * sysfs_mbox_read - Read method for reading information via mbox * @filp: open sysfs file * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. * @buf: contains the data to be read from sysfs mbox. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * * Description: * Deprecated function. All mailbox access from user space is performed via the * bsg interface. * * Returns: * -EPERM operation not permitted **/ static ssize_t sysfs_mbox_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { return -EPERM; } static struct bin_attribute sysfs_mbox_attr = { .attr = { .name = "mbox", .mode = S_IRUSR | S_IWUSR, }, .size = MAILBOX_SYSFS_MAX, .read = sysfs_mbox_read, .write = sysfs_mbox_write, }; /** * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries * @vport: address of lpfc vport structure. * * Return codes: * zero on success * error return code from sysfs_create_bin_file() **/ int lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); int error; /* Virtual ports do not need ctrl_reg and mbox */ if (vport->port_type == LPFC_NPIV_PORT) return 0; error = sysfs_create_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); if (error) goto out; error = sysfs_create_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); if (error) goto out_remove_ctlreg_attr; return 0; out_remove_ctlreg_attr: sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); out: return error; } /** * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries * @vport: address of lpfc vport structure. **/ void lpfc_free_sysfs_attr(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); /* Virtual ports do not need ctrl_reg and mbox */ if (vport->port_type == LPFC_NPIV_PORT) return; sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); } /* * Dynamic FC Host Attributes Support */ /** * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_symbolic_name(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), sizeof fc_host_symbolic_name(shost)); } /** * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_port_id(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; /* note: fc_myDID already in cpu endianness */ fc_host_port_id(shost) = vport->fc_myDID; } /** * lpfc_get_host_port_type - Set the value of the scsi host port type * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_port_type(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; spin_lock_irq(shost->host_lock); if (vport->port_type == LPFC_NPIV_PORT) { fc_host_port_type(shost) = FC_PORTTYPE_NPIV; } else if (lpfc_is_link_up(phba)) { if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { if (vport->fc_flag & FC_PUBLIC_LOOP) fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; else fc_host_port_type(shost) = FC_PORTTYPE_LPORT; } else { if (vport->fc_flag & FC_FABRIC) fc_host_port_type(shost) = FC_PORTTYPE_NPORT; else fc_host_port_type(shost) = FC_PORTTYPE_PTP; } } else fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; spin_unlock_irq(shost->host_lock); } /** * lpfc_get_host_port_state - Set the value of the scsi host port state * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_port_state(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_OFFLINE_MODE) fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; else { switch (phba->link_state) { case LPFC_LINK_UNKNOWN: case LPFC_LINK_DOWN: fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; break; case LPFC_LINK_UP: case LPFC_CLEAR_LA: case LPFC_HBA_READY: /* Links up, reports port state accordingly */ if (vport->port_state < LPFC_VPORT_READY) fc_host_port_state(shost) = FC_PORTSTATE_BYPASSED; else fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; break; case LPFC_HBA_ERROR: fc_host_port_state(shost) = FC_PORTSTATE_ERROR; break; default: fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; break; } } spin_unlock_irq(shost->host_lock); } /** * lpfc_get_host_speed - Set the value of the scsi host speed * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_speed(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; spin_lock_irq(shost->host_lock); if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) { switch(phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; break; case LPFC_LINK_SPEED_2GHZ: fc_host_speed(shost) = FC_PORTSPEED_2GBIT; break; case LPFC_LINK_SPEED_4GHZ: fc_host_speed(shost) = FC_PORTSPEED_4GBIT; break; case LPFC_LINK_SPEED_8GHZ: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; case LPFC_LINK_SPEED_10GHZ: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; case LPFC_LINK_SPEED_16GHZ: fc_host_speed(shost) = FC_PORTSPEED_16GBIT; break; case LPFC_LINK_SPEED_32GHZ: fc_host_speed(shost) = FC_PORTSPEED_32GBIT; break; case LPFC_LINK_SPEED_64GHZ: fc_host_speed(shost) = FC_PORTSPEED_64GBIT; break; case LPFC_LINK_SPEED_128GHZ: fc_host_speed(shost) = FC_PORTSPEED_128GBIT; break; case LPFC_LINK_SPEED_256GHZ: fc_host_speed(shost) = FC_PORTSPEED_256GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) { switch (phba->fc_linkspeed) { case LPFC_ASYNC_LINK_SPEED_1GBPS: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; break; case LPFC_ASYNC_LINK_SPEED_10GBPS: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; case LPFC_ASYNC_LINK_SPEED_20GBPS: fc_host_speed(shost) = FC_PORTSPEED_20GBIT; break; case LPFC_ASYNC_LINK_SPEED_25GBPS: fc_host_speed(shost) = FC_PORTSPEED_25GBIT; break; case LPFC_ASYNC_LINK_SPEED_40GBPS: fc_host_speed(shost) = FC_PORTSPEED_40GBIT; break; case LPFC_ASYNC_LINK_SPEED_100GBPS: fc_host_speed(shost) = FC_PORTSPEED_100GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } else fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; spin_unlock_irq(shost->host_lock); } /** * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name * @shost: kernel scsi host pointer. **/ static void lpfc_get_host_fabric_name (struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; u64 node_name; spin_lock_irq(shost->host_lock); if ((vport->port_state > LPFC_FLOGI) && ((vport->fc_flag & FC_FABRIC) || ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && (vport->fc_flag & FC_PUBLIC_LOOP)))) node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); else /* fabric is local port if there is no F/FL_Port */ node_name = 0; spin_unlock_irq(shost->host_lock); fc_host_fabric_name(shost) = node_name; } /** * lpfc_get_stats - Return statistical information about the adapter * @shost: kernel scsi host pointer. * * Notes: * NULL on error for link down, no mbox pool, sli2 active, * management not allowed, memory allocation error, or mbox error. * * Returns: * NULL for error * address of the adapter host statistics **/ static struct fc_host_statistics * lpfc_get_stats(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_sli *psli = &phba->sli; struct fc_host_statistics *hs = &phba->link_stats; struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; int rc = 0; /* * prevent udev from issuing mailbox commands until the port is * configured. */ if (phba->link_state < LPFC_LINK_DOWN || !phba->mbox_mem_pool || (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) return NULL; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) return NULL; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) return NULL; memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmboxq->ctx_buf = NULL; pmboxq->vport = vport; if (vport->fc_flag & FC_OFFLINE_MODE) { rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); if (rc != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); return NULL; } } else { rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return NULL; } } memset(hs, 0, sizeof (struct fc_host_statistics)); hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt; hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; /* * The MBX_READ_STATUS returns tx_k_bytes which has to be * converted to words. * * Check if extended byte flag is set, to know when to collect upper * bits of 64 bit wide statistics counter. */ if (pmb->un.varRdStatus.xkb & RD_ST_XKB) { hs->tx_words = (u64) ((((u64)(pmb->un.varRdStatus.xmit_xkb & RD_ST_XMIT_XKB_MASK) << 32) | (u64)pmb->un.varRdStatus.xmitByteCnt) * (u64)256); hs->rx_words = (u64) ((((u64)(pmb->un.varRdStatus.rcv_xkb & RD_ST_RCV_XKB_MASK) << 32) | (u64)pmb->un.varRdStatus.rcvByteCnt) * (u64)256); } else { hs->tx_words = (uint64_t) ((uint64_t)pmb->un.varRdStatus.xmitByteCnt * (uint64_t)256); hs->rx_words = (uint64_t) ((uint64_t)pmb->un.varRdStatus.rcvByteCnt * (uint64_t)256); } memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmb->mbxCommand = MBX_READ_LNK_STAT; pmb->mbxOwner = OWN_HOST; pmboxq->ctx_buf = NULL; pmboxq->vport = vport; if (vport->fc_flag & FC_OFFLINE_MODE) { rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); if (rc != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); return NULL; } } else { rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return NULL; } } hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; hs->error_frames = pmb->un.varRdLnk.crcCnt; hs->cn_sig_warn = atomic64_read(&phba->cgn_acqe_stat.warn); hs->cn_sig_alarm = atomic64_read(&phba->cgn_acqe_stat.alarm); hs->link_failure_count -= lso->link_failure_count; hs->loss_of_sync_count -= lso->loss_of_sync_count; hs->loss_of_signal_count -= lso->loss_of_signal_count; hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; hs->invalid_tx_word_count -= lso->invalid_tx_word_count; hs->invalid_crc_count -= lso->invalid_crc_count; hs->error_frames -= lso->error_frames; if (phba->hba_flag & HBA_FCOE_MODE) { hs->lip_count = -1; hs->nos_count = (phba->link_events >> 1); hs->nos_count -= lso->link_events; } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { hs->lip_count = (phba->fc_eventTag >> 1); hs->lip_count -= lso->link_events; hs->nos_count = -1; } else { hs->lip_count = -1; hs->nos_count = (phba->fc_eventTag >> 1); hs->nos_count -= lso->link_events; } hs->dumped_frames = -1; hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start; mempool_free(pmboxq, phba->mbox_mem_pool); return hs; } /** * lpfc_reset_stats - Copy the adapter link stats information * @shost: kernel scsi host pointer. **/ static void lpfc_reset_stats(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_sli *psli = &phba->sli; struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; int rc = 0; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) return; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) return; memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmb->un.varWords[0] = 0x1; /* reset request */ pmboxq->ctx_buf = NULL; pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); if (rc != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); return; } } else { rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return; } } memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); pmb->mbxCommand = MBX_READ_LNK_STAT; pmb->mbxOwner = OWN_HOST; pmboxq->ctx_buf = NULL; pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); if (rc != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); return; } } else { rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return; } } lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; lso->error_frames = pmb->un.varRdLnk.crcCnt; if (phba->hba_flag & HBA_FCOE_MODE) lso->link_events = (phba->link_events >> 1); else lso->link_events = (phba->fc_eventTag >> 1); atomic64_set(&phba->cgn_acqe_stat.warn, 0); atomic64_set(&phba->cgn_acqe_stat.alarm, 0); memset(&shost_to_fc_host(shost)->fpin_stats, 0, sizeof(shost_to_fc_host(shost)->fpin_stats)); psli->stats_start = ktime_get_seconds(); mempool_free(pmboxq, phba->mbox_mem_pool); return; } /* * The LPFC driver treats linkdown handling as target loss events so there * are no sysfs handlers for link_down_tmo. */ /** * lpfc_get_node_by_target - Return the nodelist for a target * @starget: kernel scsi target pointer. * * Returns: * address of the node list if found * NULL target not found **/ static struct lpfc_nodelist * lpfc_get_node_by_target(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_nodelist *ndlp; spin_lock_irq(shost->host_lock); /* Search for this, mapped, target ID */ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && starget->id == ndlp->nlp_sid) { spin_unlock_irq(shost->host_lock); return ndlp; } } spin_unlock_irq(shost->host_lock); return NULL; } /** * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1 * @starget: kernel scsi target pointer. **/ static void lpfc_get_starget_port_id(struct scsi_target *starget) { struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1; } /** * lpfc_get_starget_node_name - Set the target node name * @starget: kernel scsi target pointer. * * Description: Set the target node name to the ndlp node name wwn or zero. **/ static void lpfc_get_starget_node_name(struct scsi_target *starget) { struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); fc_starget_node_name(starget) = ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0; } /** * lpfc_get_starget_port_name - Set the target port name * @starget: kernel scsi target pointer. * * Description: set the target port name to the ndlp port name wwn or zero. **/ static void lpfc_get_starget_port_name(struct scsi_target *starget) { struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget); fc_starget_port_name(starget) = ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0; } /** * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo * @rport: fc rport address. * @timeout: new value for dev loss tmo. * * Description: * If timeout is non zero set the dev_loss_tmo to timeout, else set * dev_loss_tmo to one. **/ static void lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { struct lpfc_rport_data *rdata = rport->dd_data; struct lpfc_nodelist *ndlp = rdata->pnode; #if (IS_ENABLED(CONFIG_NVME_FC)) struct lpfc_nvme_rport *nrport = NULL; #endif if (timeout) rport->dev_loss_tmo = timeout; else rport->dev_loss_tmo = 1; if (!ndlp) { dev_info(&rport->dev, "Cannot find remote node to " "set rport dev loss tmo, port_id x%x\n", rport->port_id); return; } #if (IS_ENABLED(CONFIG_NVME_FC)) nrport = lpfc_ndlp_get_nrport(ndlp); if (nrport && nrport->remoteport) nvme_fc_set_remoteport_devloss(nrport->remoteport, rport->dev_loss_tmo); #endif } /* * lpfc_rport_show_function - Return rport target information * * Description: * Macro that uses field to generate a function with the name lpfc_show_rport_ * * lpfc_show_rport_##field: returns the bytes formatted in buf * @cdev: class converted to an fc_rport. * @buf: on return contains the target_field or zero. * * Returns: size of formatted string. **/ #define lpfc_rport_show_function(field, format_string, sz, cast) \ static ssize_t \ lpfc_show_rport_##field (struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct fc_rport *rport = transport_class_to_rport(dev); \ struct lpfc_rport_data *rdata = rport->hostdata; \ return scnprintf(buf, sz, format_string, \ (rdata->target) ? cast rdata->target->field : 0); \ } #define lpfc_rport_rd_attr(field, format_string, sz) \ lpfc_rport_show_function(field, format_string, sz, ) \ static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL) /** * lpfc_set_vport_symbolic_name - Set the vport's symbolic name * @fc_vport: The fc_vport who's symbolic name has been changed. * * Description: * This function is called by the transport after the @fc_vport's symbolic name * has been changed. This function re-registers the symbolic name with the * switch to propagate the change into the fabric if the vport is active. **/ static void lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) { struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; if (vport->port_state == LPFC_VPORT_READY) lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); } /** * lpfc_hba_log_verbose_init - Set hba's log verbose level * @phba: Pointer to lpfc_hba struct. * @verbose: Verbose level to set. * * This function is called by the lpfc_get_cfgparam() routine to set the * module lpfc_log_verbose into the @phba cfg_log_verbose for use with * log message according to the module's lpfc_log_verbose parameter setting * before hba port or vport created. **/ static void lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose) { phba->cfg_log_verbose = verbose; } struct fc_function_template lpfc_transport_functions = { /* fixed attributes the driver supports */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, .get_host_symbolic_name = lpfc_get_host_symbolic_name, .show_host_symbolic_name = 1, /* dynamic attributes the driver supports */ .get_host_port_id = lpfc_get_host_port_id, .show_host_port_id = 1, .get_host_port_type = lpfc_get_host_port_type, .show_host_port_type = 1, .get_host_port_state = lpfc_get_host_port_state, .show_host_port_state = 1, /* active_fc4s is shown but doesn't change (thus no get function) */ .show_host_active_fc4s = 1, .get_host_speed = lpfc_get_host_speed, .show_host_speed = 1, .get_host_fabric_name = lpfc_get_host_fabric_name, .show_host_fabric_name = 1, /* * The LPFC driver treats linkdown handling as target loss events * so there are no sysfs handlers for link_down_tmo. */ .get_fc_host_stats = lpfc_get_stats, .reset_fc_host_stats = lpfc_reset_stats, .dd_fcrport_size = sizeof(struct lpfc_rport_data), .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_starget_port_id = lpfc_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = lpfc_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = lpfc_get_starget_port_name, .show_starget_port_name = 1, .issue_fc_host_lip = lpfc_issue_lip, .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, .terminate_rport_io = lpfc_terminate_rport_io, .dd_fcvport_size = sizeof(struct lpfc_vport *), .vport_disable = lpfc_vport_disable, .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, .bsg_request = lpfc_bsg_request, .bsg_timeout = lpfc_bsg_timeout, }; struct fc_function_template lpfc_vport_transport_functions = { /* fixed attributes the driver supports */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, .get_host_symbolic_name = lpfc_get_host_symbolic_name, .show_host_symbolic_name = 1, /* dynamic attributes the driver supports */ .get_host_port_id = lpfc_get_host_port_id, .show_host_port_id = 1, .get_host_port_type = lpfc_get_host_port_type, .show_host_port_type = 1, .get_host_port_state = lpfc_get_host_port_state, .show_host_port_state = 1, /* active_fc4s is shown but doesn't change (thus no get function) */ .show_host_active_fc4s = 1, .get_host_speed = lpfc_get_host_speed, .show_host_speed = 1, .get_host_fabric_name = lpfc_get_host_fabric_name, .show_host_fabric_name = 1, /* * The LPFC driver treats linkdown handling as target loss events * so there are no sysfs handlers for link_down_tmo. */ .get_fc_host_stats = lpfc_get_stats, .reset_fc_host_stats = lpfc_reset_stats, .dd_fcrport_size = sizeof(struct lpfc_rport_data), .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_starget_port_id = lpfc_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = lpfc_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = lpfc_get_starget_port_name, .show_starget_port_name = 1, .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, .terminate_rport_io = lpfc_terminate_rport_io, .vport_disable = lpfc_vport_disable, .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, }; /** * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE * Mode * @phba: lpfc_hba pointer. **/ static void lpfc_get_hba_function_mode(struct lpfc_hba *phba) { /* If the adapter supports FCoE mode */ switch (phba->pcidev->device) { case PCI_DEVICE_ID_SKYHAWK: case PCI_DEVICE_ID_SKYHAWK_VF: case PCI_DEVICE_ID_LANCER_FCOE: case PCI_DEVICE_ID_LANCER_FCOE_VF: case PCI_DEVICE_ID_ZEPHYR_DCSP: case PCI_DEVICE_ID_TIGERSHARK: case PCI_DEVICE_ID_TOMCAT: phba->hba_flag |= HBA_FCOE_MODE; break; default: /* for others, clear the flag */ phba->hba_flag &= ~HBA_FCOE_MODE; } } /** * lpfc_get_cfgparam - Used during probe_one to init the adapter structure * @phba: lpfc_hba pointer. **/ void lpfc_get_cfgparam(struct lpfc_hba *phba) { lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched); lpfc_ns_query_init(phba, lpfc_ns_query); lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset); lpfc_cr_delay_init(phba, lpfc_cr_delay); lpfc_cr_count_init(phba, lpfc_cr_count); lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl); lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type); lpfc_ack0_init(phba, lpfc_ack0); lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing); lpfc_topology_init(phba, lpfc_topology); lpfc_link_speed_init(phba, lpfc_link_speed); lpfc_poll_tmo_init(phba, lpfc_poll_tmo); lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo); lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); lpfc_enable_rrq_init(phba, lpfc_enable_rrq); lpfc_fcp_wait_abts_rsp_init(phba, lpfc_fcp_wait_abts_rsp); lpfc_fdmi_on_init(phba, lpfc_fdmi_on); lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_nvme_oas_init(phba, lpfc_nvme_oas); lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); lpfc_force_rscn_init(phba, lpfc_force_rscn); lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold); lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit); lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); lpfc_EnableXLane_init(phba, lpfc_EnableXLane); /* VMID Inits */ lpfc_max_vmid_init(phba, lpfc_max_vmid); lpfc_vmid_inactivity_timeout_init(phba, lpfc_vmid_inactivity_timeout); lpfc_vmid_app_header_init(phba, lpfc_vmid_app_header); lpfc_vmid_priority_tagging_init(phba, lpfc_vmid_priority_tagging); if (phba->sli_rev != LPFC_SLI_REV4) phba->cfg_EnableXLane = 0; lpfc_XLanePriority_init(phba, lpfc_XLanePriority); memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t))); memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t))); phba->cfg_oas_lun_state = 0; phba->cfg_oas_lun_status = 0; phba->cfg_oas_flags = 0; phba->cfg_oas_priority = 0; lpfc_enable_bg_init(phba, lpfc_enable_bg); lpfc_prot_mask_init(phba, lpfc_prot_mask); lpfc_prot_guard_init(phba, lpfc_prot_guard); if (phba->sli_rev == LPFC_SLI_REV4) phba->cfg_poll = 0; else phba->cfg_poll = lpfc_poll; /* Get the function mode */ lpfc_get_hba_function_mode(phba); /* BlockGuard allowed for FC only. */ if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0581 BlockGuard feature not supported\n"); /* If set, clear the BlockGuard support param */ phba->cfg_enable_bg = 0; } else if (phba->cfg_enable_bg) { phba->sli3_options |= LPFC_SLI3_BG_ENABLED; } lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp); lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post); /* Initialize first burst. Target vs Initiator are different. */ lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold); lpfc_hdw_queue_init(phba, lpfc_hdw_queue); lpfc_irq_chann_init(phba, lpfc_irq_chann); lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); lpfc_enable_dpp_init(phba, lpfc_enable_dpp); lpfc_enable_mi_init(phba, lpfc_enable_mi); phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF; phba->cmf_active_mode = LPFC_CFG_OFF; if (lpfc_fabric_cgn_frequency > EDC_CG_SIGFREQ_CNT_MAX || lpfc_fabric_cgn_frequency < EDC_CG_SIGFREQ_CNT_MIN) lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ if (phba->sli_rev != LPFC_SLI_REV4) { /* NVME only supported on SLI4 */ phba->nvmet_support = 0; phba->cfg_nvmet_mrq = 0; phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; phba->cfg_enable_bbcr = 0; phba->cfg_xri_rebalancing = 0; } else { /* We MUST have FCP support */ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP; } phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1; phba->cfg_enable_pbde = 0; /* A value of 0 means use the number of CPUs found in the system */ if (phba->cfg_hdw_queue == 0) phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; if (phba->cfg_irq_chann == 0) phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; if (phba->cfg_irq_chann > phba->cfg_hdw_queue && phba->sli_rev == LPFC_SLI_REV4) phba->cfg_irq_chann = phba->cfg_hdw_queue; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn); lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade); lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); lpfc_delay_discovery_init(phba, lpfc_delay_discovery); lpfc_sli_mode_init(phba, lpfc_sli_mode); lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags); lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize); lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level); lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func); return; } /** * lpfc_nvme_mod_param_dep - Adjust module parameter value based on * dependencies between protocols and roles. * @phba: lpfc_hba pointer. **/ void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) { int logit = 0; if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) { phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; logit = 1; } if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) { phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; logit = 1; } if (phba->cfg_irq_chann > phba->cfg_hdw_queue) { phba->cfg_irq_chann = phba->cfg_hdw_queue; logit = 1; } if (logit) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2006 Reducing Queues - CPU limitation: " "IRQ %d HDWQ %d\n", phba->cfg_irq_chann, phba->cfg_hdw_queue); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && phba->nvmet_support) { phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6013 %s x%x fb_size x%x, fb_max x%x\n", "NVME Target PRLI ACC enable_fb ", phba->cfg_nvme_enable_fb, phba->cfg_nvmet_fb_size, LPFC_NVMET_FB_SZ_MAX); if (phba->cfg_nvme_enable_fb == 0) phba->cfg_nvmet_fb_size = 0; else { if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX) phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX; } if (!phba->cfg_nvmet_mrq) phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) { phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, "6018 Adjust lpfc_nvmet_mrq to %d\n", phba->cfg_nvmet_mrq); } if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; } else { /* Not NVME Target mode. Turn off Target parameters. */ phba->nvmet_support = 0; phba->cfg_nvmet_mrq = 0; phba->cfg_nvmet_fb_size = 0; } } /** * lpfc_get_vport_cfgparam - Used during port create, init the vport structure * @vport: lpfc_vport pointer. **/ void lpfc_get_vport_cfgparam(struct lpfc_vport *vport) { lpfc_log_verbose_init(vport, lpfc_log_verbose); lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth); lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth); lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo); lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo); lpfc_peer_port_login_init(vport, lpfc_peer_port_login); lpfc_restrict_login_init(vport, lpfc_restrict_login); lpfc_fcp_class_init(vport, lpfc_fcp_class); lpfc_use_adisc_init(vport, lpfc_use_adisc); lpfc_first_burst_size_init(vport, lpfc_first_burst_size); lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); lpfc_discovery_threads_init(vport, lpfc_discovery_threads); lpfc_max_luns_init(vport, lpfc_max_luns); lpfc_scan_down_init(vport, lpfc_scan_down); lpfc_enable_da_id_init(vport, lpfc_enable_da_id); return; }
linux-master
drivers/scsi/lpfc/lpfc_attr.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/kthread.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/sched/signal.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_version.h" #include "lpfc_vport.h" inline void lpfc_vport_set_state(struct lpfc_vport *vport, enum fc_vport_state new_state) { struct fc_vport *fc_vport = vport->fc_vport; if (fc_vport) { /* * When the transport defines fc_vport_set state we will replace * this code with the following line */ /* fc_vport_set_state(fc_vport, new_state); */ if (new_state != FC_VPORT_INITIALIZING) fc_vport->vport_last_state = fc_vport->vport_state; fc_vport->vport_state = new_state; } /* for all the error states we will set the invternal state to FAILED */ switch (new_state) { case FC_VPORT_NO_FABRIC_SUPP: case FC_VPORT_NO_FABRIC_RSCS: case FC_VPORT_FABRIC_LOGOUT: case FC_VPORT_FABRIC_REJ_WWN: case FC_VPORT_FAILED: vport->port_state = LPFC_VPORT_FAILED; break; case FC_VPORT_LINKDOWN: vport->port_state = LPFC_VPORT_UNKNOWN; break; default: /* do nothing */ break; } } int lpfc_alloc_vpi(struct lpfc_hba *phba) { unsigned long vpi; spin_lock_irq(&phba->hbalock); /* Start at bit 1 because vpi zero is reserved for the physical port */ vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1); if (vpi > phba->max_vpi) vpi = 0; else set_bit(vpi, phba->vpi_bmask); if (phba->sli_rev == LPFC_SLI_REV4) phba->sli4_hba.max_cfg_param.vpi_used++; spin_unlock_irq(&phba->hbalock); return vpi; } static void lpfc_free_vpi(struct lpfc_hba *phba, int vpi) { if (vpi == 0) return; spin_lock_irq(&phba->hbalock); clear_bit(vpi, phba->vpi_bmask); if (phba->sli_rev == LPFC_SLI_REV4) phba->sli4_hba.max_cfg_param.vpi_used--; spin_unlock_irq(&phba->hbalock); } static int lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) { LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; struct lpfc_dmabuf *mp; int rc; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { return -ENOMEM; } mb = &pmb->u.mb; rc = lpfc_read_sparam(phba, pmb, vport->vpi); if (rc) { mempool_free(pmb, phba->mbox_mem_pool); return -ENOMEM; } /* * Wait for the read_sparams mailbox to complete. Driver needs * this per vport to start the FDISC. If the mailbox fails, * just cleanup and return an error unless the failure is a * mailbox timeout. For MBX_TIMEOUT, allow the default * mbox completion handler to take care of the cleanup. This * is safe as the mailbox command isn't one that triggers * another mailbox. */ pmb->vport = vport; rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (signal_pending(current)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1830 Signal aborted mbxCmd x%x\n", mb->mbxCommand); if (rc != MBX_TIMEOUT) lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); return -EINTR; } else { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1818 VPort failed init, mbxCmd x%x " "READ_SPARM mbxStatus x%x, rc = x%x\n", mb->mbxCommand, mb->mbxStatus, rc); if (rc != MBX_TIMEOUT) lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); return -EIO; } } mp = (struct lpfc_dmabuf *)pmb->ctx_buf; memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, sizeof (struct lpfc_name)); memcpy(&vport->fc_portname, &vport->fc_sparam.portName, sizeof (struct lpfc_name)); lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); return 0; } static int lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn, const char *name_type) { /* ensure that IEEE format 1 addresses * contain zeros in bits 59-48 */ if (!((wwn->u.wwn[0] >> 4) == 1 && ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0))) return 1; lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, "1822 Invalid %s: %02x:%02x:%02x:%02x:" "%02x:%02x:%02x:%02x\n", name_type, wwn->u.wwn[0], wwn->u.wwn[1], wwn->u.wwn[2], wwn->u.wwn[3], wwn->u.wwn[4], wwn->u.wwn[5], wwn->u.wwn[6], wwn->u.wwn[7]); return 0; } static int lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport) { struct lpfc_vport *vport; unsigned long flags; spin_lock_irqsave(&phba->port_list_lock, flags); list_for_each_entry(vport, &phba->port_list, listentry) { if (vport == new_vport) continue; /* If they match, return not unique */ if (memcmp(&vport->fc_sparam.portName, &new_vport->fc_sparam.portName, sizeof(struct lpfc_name)) == 0) { spin_unlock_irqrestore(&phba->port_list_lock, flags); return 0; } } spin_unlock_irqrestore(&phba->port_list_lock, flags); return 1; } /** * lpfc_discovery_wait - Wait for driver discovery to quiesce * @vport: The virtual port for which this call is being executed. * * This driver calls this routine specifically from lpfc_vport_delete * to enforce a synchronous execution of vport * delete relative to discovery activities. The * lpfc_vport_delete routine should not return until it * can reasonably guarantee that discovery has quiesced. * Post FDISC LOGO, the driver must wait until its SAN teardown is * complete and all resources recovered before allowing * cleanup. * * This routine does not require any locks held. **/ static void lpfc_discovery_wait(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; uint32_t wait_flags = 0; unsigned long wait_time_max; unsigned long start_time; wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE | FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO; /* * The time constraint on this loop is a balance between the * fabric RA_TOV value and dev_loss tmo. The driver's * devloss_tmo is 10 giving this loop a 3x multiplier minimally. */ wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000); wait_time_max += jiffies; start_time = jiffies; while (time_before(jiffies, wait_time_max)) { if ((vport->num_disc_nodes > 0) || (vport->fc_flag & wait_flags) || ((vport->port_state > LPFC_VPORT_FAILED) && (vport->port_state < LPFC_VPORT_READY))) { lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, "1833 Vport discovery quiesce Wait:" " state x%x fc_flags x%x" " num_nodes x%x, waiting 1000 msecs" " total wait msecs x%x\n", vport->port_state, vport->fc_flag, vport->num_disc_nodes, jiffies_to_msecs(jiffies - start_time)); msleep(1000); } else { /* Base case. Wait variants satisfied. Break out */ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, "1834 Vport discovery quiesced:" " state x%x fc_flags x%x" " wait msecs x%x\n", vport->port_state, vport->fc_flag, jiffies_to_msecs(jiffies - start_time)); break; } } if (time_after(jiffies, wait_time_max)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1835 Vport discovery quiesce failed:" " state x%x fc_flags x%x wait msecs x%x\n", vport->port_state, vport->fc_flag, jiffies_to_msecs(jiffies - start_time)); } int lpfc_vport_create(struct fc_vport *fc_vport, bool disable) { struct lpfc_nodelist *ndlp; struct Scsi_Host *shost = fc_vport->shost; struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = pport->phba; struct lpfc_vport *vport = NULL; int instance; int vpi; int rc = VPORT_ERROR; int status; if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1808 Create VPORT failed: " "NPIV is not enabled: SLImode:%d\n", phba->sli_rev); rc = VPORT_INVAL; goto error_out; } /* NPIV is not supported if HBA has NVME Target enabled */ if (phba->nvmet_support) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3189 Create VPORT failed: " "NPIV is not supported on NVME Target\n"); rc = VPORT_INVAL; goto error_out; } vpi = lpfc_alloc_vpi(phba); if (vpi == 0) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1809 Create VPORT failed: " "Max VPORTs (%d) exceeded\n", phba->max_vpi); rc = VPORT_NORESOURCES; goto error_out; } /* Assign an unused board number */ if ((instance = lpfc_get_instance()) < 0) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1810 Create VPORT failed: Cannot get " "instance number\n"); lpfc_free_vpi(phba, vpi); rc = VPORT_NORESOURCES; goto error_out; } vport = lpfc_create_port(phba, instance, &fc_vport->dev); if (!vport) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1811 Create VPORT failed: vpi x%x\n", vpi); lpfc_free_vpi(phba, vpi); rc = VPORT_NORESOURCES; goto error_out; } vport->vpi = vpi; lpfc_debugfs_initialize(vport); if ((status = lpfc_vport_sparm(phba, vport))) { if (status == -EINTR) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1831 Create VPORT Interrupted.\n"); rc = VPORT_ERROR; } else { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1813 Create VPORT failed. " "Cannot get sparam\n"); rc = VPORT_NORESOURCES; } lpfc_free_vpi(phba, vpi); destroy_port(vport); goto error_out; } u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn); memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8); memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8); if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") || !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1821 Create VPORT failed. " "Invalid WWN format\n"); lpfc_free_vpi(phba, vpi); destroy_port(vport); rc = VPORT_INVAL; goto error_out; } if (!lpfc_unique_wwpn(phba, vport)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1823 Create VPORT failed. " "Duplicate WWN on HBA\n"); lpfc_free_vpi(phba, vpi); destroy_port(vport); rc = VPORT_INVAL; goto error_out; } /* Create binary sysfs attribute for vport */ lpfc_alloc_sysfs_attr(vport); /* Set the DFT_LUN_Q_DEPTH accordingly */ vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth; /* Only the physical port can support NVME for now */ vport->cfg_enable_fc4_type = LPFC_ENABLE_FCP; *(struct lpfc_vport **)fc_vport->dd_data = vport; vport->fc_vport = fc_vport; /* At this point we are fully registered with SCSI Layer. */ vport->load_flag |= FC_ALLOW_FDMI; if (phba->cfg_enable_SmartSAN || (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { /* Setup appropriate attribute masks */ vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask; vport->fdmi_port_mask = phba->pport->fdmi_port_mask; } /* * In SLI4, the vpi must be activated before it can be used * by the port. */ if ((phba->sli_rev == LPFC_SLI_REV4) && (pport->fc_flag & FC_VFI_REGISTERED)) { rc = lpfc_sli4_init_vpi(vport); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1838 Failed to INIT_VPI on vpi %d " "status %d\n", vpi, rc); rc = VPORT_NORESOURCES; lpfc_free_vpi(phba, vpi); goto error_out; } } else if (phba->sli_rev == LPFC_SLI_REV4) { /* * Driver cannot INIT_VPI now. Set the flags to * init_vpi when reg_vfi complete. */ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); rc = VPORT_OK; goto out; } if ((phba->link_state < LPFC_LINK_UP) || (pport->port_state < LPFC_FABRIC_CFG_LINK) || (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) { lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); rc = VPORT_OK; goto out; } if (disable) { lpfc_vport_set_state(vport, FC_VPORT_DISABLED); rc = VPORT_OK; goto out; } /* Use the Physical nodes Fabric NDLP to determine if the link is * up and ready to FDISC. */ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { lpfc_set_disctmo(vport); lpfc_initial_fdisc(vport); } else { lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0262 No NPIV Fabric support\n"); } } else { lpfc_vport_set_state(vport, FC_VPORT_FAILED); } rc = VPORT_OK; out: lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1825 Vport Created.\n"); lpfc_host_attrib_init(lpfc_shost_from_vport(vport)); error_out: return rc; } static int lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { int rc; struct lpfc_hba *phba = vport->phba; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); spin_lock_irq(&ndlp->lock); if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) && !ndlp->logo_waitq) { ndlp->logo_waitq = &waitq; ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_flag |= NLP_ISSUE_LOGO; ndlp->save_flags |= NLP_WAIT_FOR_LOGO; } spin_unlock_irq(&ndlp->lock); rc = lpfc_issue_els_npiv_logo(vport, ndlp); if (!rc) { wait_event_timeout(waitq, (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)), msecs_to_jiffies(phba->fc_ratov * 2000)); if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)) goto logo_cmpl; /* LOGO wait failed. Correct status. */ rc = -EINTR; } else { rc = -EIO; } /* Error - clean up node flags. */ spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; spin_unlock_irq(&ndlp->lock); logo_cmpl: lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, "1824 Issue LOGO completes with status %d\n", rc); spin_lock_irq(&ndlp->lock); ndlp->logo_waitq = NULL; spin_unlock_irq(&ndlp->lock); return rc; } static int disable_vport(struct fc_vport *fc_vport) { struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); /* Can't disable during an outstanding delete. */ if (vport->load_flag & FC_UNLOADING) return 0; ndlp = lpfc_findnode_did(vport, Fabric_DID); if (ndlp && phba->link_state >= LPFC_LINK_UP) (void)lpfc_send_npiv_logo(vport, ndlp); lpfc_sli_host_down(vport); lpfc_cleanup_rpis(vport, 0); lpfc_stop_vport_timers(vport); lpfc_unreg_all_rpis(vport); lpfc_unreg_default_rpis(vport); /* * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the * scsi_host_put() to release the vport. */ lpfc_mbx_unreg_vpi(vport); if (phba->sli_rev == LPFC_SLI_REV4) { spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); } lpfc_vport_set_state(vport, FC_VPORT_DISABLED); lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1826 Vport Disabled.\n"); return VPORT_OK; } static int enable_vport(struct fc_vport *fc_vport) { struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if ((phba->link_state < LPFC_LINK_UP) || (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) { lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); return VPORT_OK; } spin_lock_irq(shost->host_lock); vport->load_flag |= FC_LOADING; if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { spin_unlock_irq(shost->host_lock); lpfc_issue_init_vpi(vport); goto out; } vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); /* Use the Physical nodes Fabric NDLP to determine if the link is * up and ready to FDISC. */ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { lpfc_set_disctmo(vport); lpfc_initial_fdisc(vport); } else { lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0264 No NPIV Fabric support\n"); } } else { lpfc_vport_set_state(vport, FC_VPORT_FAILED); } out: lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1827 Vport Enabled.\n"); return VPORT_OK; } int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable) { if (disable) return disable_vport(fc_vport); else return enable_vport(fc_vport); } int lpfc_vport_delete(struct fc_vport *fc_vport) { struct lpfc_nodelist *ndlp = NULL; struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; int rc; if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1812 vport_delete failed: Cannot delete " "physical host\n"); return VPORT_ERROR; } /* If the vport is a static vport fail the deletion. */ if ((vport->vport_flag & STATIC_VPORT) && !(phba->pport->load_flag & FC_UNLOADING)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1837 vport_delete failed: Cannot delete " "static vport.\n"); return VPORT_ERROR; } spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); /* * If we are not unloading the driver then prevent the vport_delete * from happening until after this vport's discovery is finished. */ if (!(phba->pport->load_flag & FC_UNLOADING)) { int check_count = 0; while (check_count < ((phba->fc_ratov * 3) + 3) && vport->port_state > LPFC_VPORT_FAILED && vport->port_state < LPFC_VPORT_READY) { check_count++; msleep(1000); } if (vport->port_state > LPFC_VPORT_FAILED && vport->port_state < LPFC_VPORT_READY) return -EAGAIN; } /* * Take early refcount for outstanding I/O requests we schedule during * delete processing for unreg_vpi. Always keep this before * scsi_remove_host() as we can no longer obtain a reference through * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL. */ if (!scsi_host_get(shost)) return VPORT_INVAL; lpfc_free_sysfs_attr(vport); lpfc_debugfs_terminate(vport); /* Remove FC host to break driver binding. */ fc_remove_host(shost); scsi_remove_host(shost); /* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */ ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) goto skip_logo; if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && phba->link_state >= LPFC_LINK_UP && phba->fc_topology != LPFC_TOPOLOGY_LOOP) { if (vport->cfg_enable_da_id) { /* Send DA_ID and wait for a completion. */ rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0); if (rc) { lpfc_printf_log(vport->phba, KERN_WARNING, LOG_VPORT, "1829 CT command failed to " "delete objects on fabric, " "rc %d\n", rc); } } /* * If the vpi is not registered, then a valid FDISC doesn't * exist and there is no need for a ELS LOGO. Just cleanup * the ndlp. */ if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) goto skip_logo; /* Issue a Fabric LOGO to cleanup fabric resources. */ ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) goto skip_logo; rc = lpfc_send_npiv_logo(vport, ndlp); if (rc) goto skip_logo; } if (!(phba->pport->load_flag & FC_UNLOADING)) lpfc_discovery_wait(vport); skip_logo: lpfc_cleanup(vport); /* Remove scsi host now. The nodes are cleaned up. */ lpfc_sli_host_down(vport); lpfc_stop_vport_timers(vport); if (!(phba->pport->load_flag & FC_UNLOADING)) { lpfc_unreg_all_rpis(vport); lpfc_unreg_default_rpis(vport); /* * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) * does the scsi_host_put() to release the vport. */ if (!(vport->vpi_state & LPFC_VPI_REGISTERED) || lpfc_mbx_unreg_vpi(vport)) scsi_host_put(shost); } else { scsi_host_put(shost); } lpfc_free_vpi(phba, vport->vpi); vport->work_port_events = 0; spin_lock_irq(&phba->port_list_lock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->port_list_lock); lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1828 Vport Deleted.\n"); scsi_host_put(shost); return VPORT_OK; } struct lpfc_vport ** lpfc_create_vport_work_array(struct lpfc_hba *phba) { struct lpfc_vport *port_iterator; struct lpfc_vport **vports; int index = 0; vports = kcalloc(phba->max_vports + 1, sizeof(struct lpfc_vport *), GFP_KERNEL); if (vports == NULL) return NULL; spin_lock_irq(&phba->port_list_lock); list_for_each_entry(port_iterator, &phba->port_list, listentry) { if (port_iterator->load_flag & FC_UNLOADING) continue; if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) { lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_TRACE_EVENT, "1801 Create vport work array FAILED: " "cannot do scsi_host_get\n"); continue; } vports[index++] = port_iterator; } spin_unlock_irq(&phba->port_list_lock); return vports; } void lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports) { int i; if (vports == NULL) return; for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) scsi_host_put(lpfc_shost_from_vport(vports[i])); kfree(vports); }
linux-master
drivers/scsi/lpfc/lpfc_vport.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * ********************************************************************/ #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <asm/unaligned.h> #include <linux/crc-t10dif.h> #include <net/checksum.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include "lpfc_version.h" #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc.h" #include "lpfc_nvme.h" #include "lpfc_scsi.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_debugfs.h" /* NVME initiator-based functions */ static struct lpfc_io_buf * lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx, int expedite); static void lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *); static struct nvme_fc_port_template lpfc_nvme_template; /** * lpfc_nvme_create_queue - * @pnvme_lport: Transport localport that LS is to be issued from * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. * @qsize: Size of the queue in bytes * @handle: An opaque driver handle used in follow-up calls. * * Driver registers this routine to preallocate and initialize any * internal data structures to bind the @qidx to its internal IO queues. * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ. * * Return value : * 0 - Success * -EINVAL - Unsupported input value. * -ENOMEM - Could not alloc necessary memory **/ static int lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, unsigned int qidx, u16 qsize, void **handle) { struct lpfc_nvme_lport *lport; struct lpfc_vport *vport; struct lpfc_nvme_qhandle *qhandle; char *str; if (!pnvme_lport->private) return -ENOMEM; lport = (struct lpfc_nvme_lport *)pnvme_lport->private; vport = lport->vport; if (!vport || vport->load_flag & FC_UNLOADING || vport->phba->hba_flag & HBA_IOQ_FLUSH) return -ENODEV; qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); if (qhandle == NULL) return -ENOMEM; qhandle->cpu_id = raw_smp_processor_id(); qhandle->qidx = qidx; /* * NVME qidx == 0 is the admin queue, so both admin queue * and first IO queue will use MSI-X vector and associated * EQ/CQ/WQ at index 0. After that they are sequentially assigned. */ if (qidx) { str = "IO "; /* IO queue */ qhandle->index = ((qidx - 1) % lpfc_nvme_template.max_hw_queues); } else { str = "ADM"; /* Admin queue */ qhandle->index = qidx; } lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6073 Binding %s HdwQueue %d (cpu %d) to " "hdw_queue %d qhandle x%px\n", str, qidx, qhandle->cpu_id, qhandle->index, qhandle); *handle = (void *)qhandle; return 0; } /** * lpfc_nvme_delete_queue - * @pnvme_lport: Transport localport that LS is to be issued from * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. * @handle: An opaque driver handle from lpfc_nvme_create_queue * * Driver registers this routine to free * any internal data structures to bind the @qidx to its internal * IO queues. * * Return value : * 0 - Success * TODO: What are the failure codes. **/ static void lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, unsigned int qidx, void *handle) { struct lpfc_nvme_lport *lport; struct lpfc_vport *vport; if (!pnvme_lport->private) return; lport = (struct lpfc_nvme_lport *)pnvme_lport->private; vport = lport->vport; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n", lport, qidx, handle); kfree(handle); } static void lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) { struct lpfc_nvme_lport *lport = localport->private; lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME, "6173 localport x%px delete complete\n", lport); /* release any threads waiting for the unreg to complete */ if (lport->vport->localport) complete(lport->lport_unreg_cmp); } /* lpfc_nvme_remoteport_delete * * @remoteport: Pointer to an nvme transport remoteport instance. * * This is a template downcall. NVME transport calls this function * when it has completed the unregistration of a previously * registered remoteport. * * Return value : * None */ static void lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) { struct lpfc_nvme_rport *rport = remoteport->private; struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; u32 fc4_xpt_flags; ndlp = rport->ndlp; if (!ndlp) { pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n", __func__, rport, remoteport); goto rport_err; } vport = ndlp->vport; if (!vport) { pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n", __func__, ndlp, ndlp->nlp_state, rport); goto rport_err; } fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD; /* Remove this rport from the lport's list - memory is owned by the * transport. Remove the ndlp reference for the NVME transport before * calling state machine to remove the node. */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6146 remoteport delete of remoteport x%px, ndlp x%px " "DID x%x xflags x%x\n", remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags); spin_lock_irq(&ndlp->lock); /* The register rebind might have occurred before the delete * downcall. Guard against this race. */ if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT) ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD); spin_unlock_irq(&ndlp->lock); /* On a devloss timeout event, one more put is executed provided the * NVME and SCSI rport unregister requests are complete. */ if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); rport_err: return; } /** * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request * @phba: pointer to lpfc hba data structure. * @axchg: pointer to exchange context for the NVME LS request * * This routine is used for processing an asychronously received NVME LS * request. Any remaining validation is done and the LS is then forwarded * to the nvme-fc transport via nvme_fc_rcv_ls_req(). * * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing) * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done. * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg. * * Returns 0 if LS was handled and delivered to the transport * Returns 1 if LS failed to be handled and should be dropped */ int lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, struct lpfc_async_xchg_ctx *axchg) { #if (IS_ENABLED(CONFIG_NVME_FC)) struct lpfc_vport *vport; struct lpfc_nvme_rport *lpfc_rport; struct nvme_fc_remote_port *remoteport; struct lpfc_nvme_lport *lport; uint32_t *payload = axchg->payload; int rc; vport = axchg->ndlp->vport; lpfc_rport = axchg->ndlp->nrport; if (!lpfc_rport) return -EINVAL; remoteport = lpfc_rport->remoteport; if (!vport->localport || vport->phba->hba_flag & HBA_IOQ_FLUSH) return -EINVAL; lport = vport->localport->private; if (!lport) return -EINVAL; rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload, axchg->size); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x " "%08x %08x %08x\n", axchg->size, rc, *payload, *(payload+1), *(payload+2), *(payload+3), *(payload+4), *(payload+5)); if (!rc) return 0; #endif return 1; } /** * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME * LS request. * @phba: Pointer to HBA context object * @vport: The local port that issued the LS * @cmdwqe: Pointer to driver command WQE object. * @wcqe: Pointer to driver response CQE object. * * This function is the generic completion handler for NVME LS requests. * The function updates any states and statistics, calls the transport * ls_req done() routine, then tears down the command and buffers used * for the LS request. **/ void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe) { struct nvmefc_ls_req *pnvme_lsreq; struct lpfc_dmabuf *buf_ptr; struct lpfc_nodelist *ndlp; int status; pnvme_lsreq = cmdwqe->context_un.nvme_lsreq; ndlp = cmdwqe->ndlp; buf_ptr = cmdwqe->bpl_dmabuf; status = bf_get(lpfc_wcqe_c_status, wcqe); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x " "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px " "ndlp:x%px\n", pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, cmdwqe->sli4_xritag, status, (wcqe->parameter & 0xffff), cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf, ndlp); lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n", cmdwqe->sli4_xritag, status, wcqe->parameter); if (buf_ptr) { lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); cmdwqe->bpl_dmabuf = NULL; } if (pnvme_lsreq->done) { if (status != CQE_STATUS_SUCCESS) status = -ENXIO; pnvme_lsreq->done(pnvme_lsreq, status); } else { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6046 NVMEx cmpl without done call back? " "Data x%px DID %x Xri: %x status %x\n", pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, cmdwqe->sli4_xritag, status); } if (ndlp) { lpfc_nlp_put(ndlp); cmdwqe->ndlp = NULL; } lpfc_sli_release_iocbq(phba, cmdwqe); } static void lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe) { struct lpfc_vport *vport = cmdwqe->vport; struct lpfc_nvme_lport *lport; uint32_t status; struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; status = bf_get(lpfc_wcqe_c_status, wcqe); if (vport->localport) { lport = (struct lpfc_nvme_lport *)vport->localport->private; if (lport) { atomic_inc(&lport->fc4NvmeLsCmpls); if (status) { if (bf_get(lpfc_wcqe_c_xb, wcqe)) atomic_inc(&lport->cmpl_ls_xb); atomic_inc(&lport->cmpl_ls_err); } } } __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe); } static int lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, struct lpfc_dmabuf *inp, struct nvmefc_ls_req *pnvme_lsreq, void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *), struct lpfc_nodelist *ndlp, uint32_t num_entry, uint32_t tmo, uint8_t retry) { struct lpfc_hba *phba = vport->phba; union lpfc_wqe128 *wqe; struct lpfc_iocbq *genwqe; struct ulp_bde64 *bpl; struct ulp_bde64 bde; int i, rc, xmit_len, first_len; /* Allocate buffer for command WQE */ genwqe = lpfc_sli_get_iocbq(phba); if (genwqe == NULL) return 1; wqe = &genwqe->wqe; /* Initialize only 64 bytes */ memset(wqe, 0, sizeof(union lpfc_wqe)); genwqe->bpl_dmabuf = bmp; genwqe->cmd_flag |= LPFC_IO_NVME_LS; /* Save for completion so we can release these resources */ genwqe->ndlp = lpfc_nlp_get(ndlp); if (!genwqe->ndlp) { dev_warn(&phba->pcidev->dev, "Warning: Failed node ref, not sending LS_REQ\n"); lpfc_sli_release_iocbq(phba, genwqe); return 1; } genwqe->context_un.nvme_lsreq = pnvme_lsreq; /* Fill in payload, bp points to frame payload */ if (!tmo) /* FC spec states we need 3 * ratov for CT requests */ tmo = (3 * phba->fc_ratov); /* For this command calculate the xmit length of the request bde. */ xmit_len = 0; first_len = 0; bpl = (struct ulp_bde64 *)bmp->virt; for (i = 0; i < num_entry; i++) { bde.tus.w = bpl[i].tus.w; if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) break; xmit_len += bde.tus.f.bdeSize; if (i == 0) first_len = xmit_len; } genwqe->num_bdes = num_entry; genwqe->hba_wqidx = 0; /* Words 0 - 2 */ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; wqe->generic.bde.tus.f.bdeSize = first_len; wqe->generic.bde.addrLow = bpl[0].addrLow; wqe->generic.bde.addrHigh = bpl[0].addrHigh; /* Word 3 */ wqe->gen_req.request_payload_len = first_len; /* Word 4 */ /* Word 5 */ bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1); bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ); bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME); /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag); /* Word 7 */ bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo); bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3); bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE); bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI); /* Word 8 */ wqe->gen_req.wqe_com.abort_tag = genwqe->iotag; /* Word 9 */ bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag); /* Word 10 */ bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); /* Word 11 */ bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND); /* Issue GEN REQ WQE for NPORT <did> */ genwqe->cmd_cmpl = cmpl; genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; genwqe->vport = vport; genwqe->retry = retry; lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n", genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); if (rc) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6045 Issue GEN REQ WQE to NPORT x%x " "Data: x%x x%x rc x%x\n", ndlp->nlp_DID, genwqe->iotag, vport->port_state, rc); lpfc_nlp_put(ndlp); lpfc_sli_release_iocbq(phba, genwqe); return 1; } lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS, "6050 Issue GEN REQ WQE to NPORT x%x " "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px " "bmp:x%px xmit:%d 1st:%d\n", ndlp->nlp_DID, genwqe->sli4_xritag, vport->port_state, genwqe, pnvme_lsreq, bmp, xmit_len, first_len); return 0; } /** * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request * @vport: The local port issuing the LS * @ndlp: The remote port to send the LS to * @pnvme_lsreq: Pointer to LS request structure from the transport * @gen_req_cmp: Completion call-back * * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST * WQE to perform the LS operation. * * Return value : * 0 - Success * non-zero: various error codes, in form of -Exxx **/ int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq, void (*gen_req_cmp)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe)) { struct lpfc_dmabuf *bmp; struct ulp_bde64 *bpl; int ret; uint16_t ntype, nstate; if (!ndlp) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6051 NVMEx LS REQ: Bad NDLP x%px, Failing " "LS Req\n", ndlp); return -ENODEV; } ntype = ndlp->nlp_type; nstate = ndlp->nlp_state; if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) || (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6088 NVMEx LS REQ: Fail DID x%06x not " "ready for IO. Type x%x, State x%x\n", ndlp->nlp_DID, ntype, nstate); return -ENODEV; } if (vport->phba->hba_flag & HBA_IOQ_FLUSH) return -ENODEV; if (!vport->phba->sli4_hba.nvmels_wq) return -ENOMEM; /* * there are two dma buf in the request, actually there is one and * the second one is just the start address + cmd size. * Before calling lpfc_nvme_gen_req these buffers need to be wrapped * in a lpfc_dmabuf struct. When freeing we just free the wrapper * because the nvem layer owns the data bufs. * We do not have to break these packets open, we don't care what is * in them. And we do not have to look at the resonse data, we only * care that we got a response. All of the caring is going to happen * in the nvme-fc layer. */ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); if (!bmp) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6044 NVMEx LS REQ: Could not alloc LS buf " "for DID %x\n", ndlp->nlp_DID); return -ENOMEM; } bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); if (!bmp->virt) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6042 NVMEx LS REQ: Could not alloc mbuf " "for DID %x\n", ndlp->nlp_DID); kfree(bmp); return -ENOMEM; } INIT_LIST_HEAD(&bmp->list); bpl = (struct ulp_bde64 *)bmp->virt; bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma)); bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma)); bpl->tus.f.bdeFlags = 0; bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma)); bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma)); bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; bpl->tus.f.bdeSize = pnvme_lsreq->rsplen; bpl->tus.w = le32_to_cpu(bpl->tus.w); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, " "rqstlen:%d rsplen:%d %pad %pad\n", ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen, pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, &pnvme_lsreq->rspdma); ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr, pnvme_lsreq, gen_req_cmp, ndlp, 2, pnvme_lsreq->timeout, 0); if (ret != WQE_SUCCESS) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6052 NVMEx REQ: EXIT. issue ls wqe failed " "lsreq x%px Status %x DID %x\n", pnvme_lsreq, ret, ndlp->nlp_DID); lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); kfree(bmp); return -EIO; } return 0; } /** * lpfc_nvme_ls_req - Issue an NVME Link Service request * @pnvme_lport: Transport localport that LS is to be issued from. * @pnvme_rport: Transport remoteport that LS is to be sent to. * @pnvme_lsreq: the transport nvme_ls_req structure for the LS * * Driver registers this routine to handle any link service request * from the nvme_fc transport to a remote nvme-aware port. * * Return value : * 0 - Success * non-zero: various error codes, in form of -Exxx **/ static int lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, struct nvme_fc_remote_port *pnvme_rport, struct nvmefc_ls_req *pnvme_lsreq) { struct lpfc_nvme_lport *lport; struct lpfc_nvme_rport *rport; struct lpfc_vport *vport; int ret; lport = (struct lpfc_nvme_lport *)pnvme_lport->private; rport = (struct lpfc_nvme_rport *)pnvme_rport->private; if (unlikely(!lport) || unlikely(!rport)) return -EINVAL; vport = lport->vport; if (vport->load_flag & FC_UNLOADING || vport->phba->hba_flag & HBA_IOQ_FLUSH) return -ENODEV; atomic_inc(&lport->fc4NvmeLsRequests); ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq, lpfc_nvme_ls_req_cmp); if (ret) atomic_inc(&lport->xmt_ls_err); return ret; } /** * __lpfc_nvme_ls_abort - Generic service routine to abort a prior * NVME LS request * @vport: The local port that issued the LS * @ndlp: The remote port the LS was sent to * @pnvme_lsreq: Pointer to LS request structure from the transport * * The driver validates the ndlp, looks for the LS, and aborts the * LS if found. * * Returns: * 0 : if LS found and aborted * non-zero: various error conditions in form -Exxx **/ int __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq) { struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ring *pring; struct lpfc_iocbq *wqe, *next_wqe; bool foundit = false; if (!ndlp) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID " "x%06x, Failing LS Req\n", ndlp, ndlp ? ndlp->nlp_DID : 0); return -EINVAL; } lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq " "x%px rqstlen:%d rsplen:%d %pad %pad\n", pnvme_lsreq, pnvme_lsreq->rqstlen, pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, &pnvme_lsreq->rspdma); /* * Lock the ELS ring txcmplq and look for the wqe that matches * this ELS. If found, issue an abort on the wqe. */ pring = phba->sli4_hba.nvmels_wq->pring; spin_lock_irq(&phba->hbalock); spin_lock(&pring->ring_lock); list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { if (wqe->context_un.nvme_lsreq == pnvme_lsreq) { wqe->cmd_flag |= LPFC_DRIVER_ABORTED; foundit = true; break; } } spin_unlock(&pring->ring_lock); if (foundit) lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL); spin_unlock_irq(&phba->hbalock); if (foundit) return 0; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n", pnvme_lsreq); return -EINVAL; } static int lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport, struct nvme_fc_remote_port *remoteport, struct nvmefc_ls_rsp *ls_rsp) { struct lpfc_async_xchg_ctx *axchg = container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp); struct lpfc_nvme_lport *lport; int rc; if (axchg->phba->pport->load_flag & FC_UNLOADING) return -ENODEV; lport = (struct lpfc_nvme_lport *)localport->private; rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp); if (rc) { /* * unless the failure is due to having already sent * the response, an abort will be generated for the * exchange if the rsp can't be sent. */ if (rc != -EALREADY) atomic_inc(&lport->xmt_ls_abort); return rc; } return 0; } /** * lpfc_nvme_ls_abort - Abort a prior NVME LS request * @pnvme_lport: Transport localport that LS is to be issued from. * @pnvme_rport: Transport remoteport that LS is to be sent to. * @pnvme_lsreq: the transport nvme_ls_req structure for the LS * * Driver registers this routine to abort a NVME LS request that is * in progress (from the transports perspective). **/ static void lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, struct nvme_fc_remote_port *pnvme_rport, struct nvmefc_ls_req *pnvme_lsreq) { struct lpfc_nvme_lport *lport; struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; int ret; lport = (struct lpfc_nvme_lport *)pnvme_lport->private; if (unlikely(!lport)) return; vport = lport->vport; if (vport->load_flag & FC_UNLOADING) return; ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq); if (!ret) atomic_inc(&lport->xmt_ls_abort); } /* Fix up the existing sgls for NVME IO. */ static inline void lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_ncmd, struct nvmefc_fcp_req *nCmd) { struct lpfc_hba *phba = vport->phba; struct sli4_sge *sgl; union lpfc_wqe128 *wqe; uint32_t *wptr, *dptr; /* * Get a local pointer to the built-in wqe and correct * the cmd size to match NVME's 96 bytes and fix * the dma address. */ wqe = &lpfc_ncmd->cur_iocbq.wqe; /* * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to * match NVME. NVME sends 96 bytes. Also, use the * nvme commands command and response dma addresses * rather than the virtual memory to ease the restore * operation. */ sgl = lpfc_ncmd->dma_sgl; sgl->sge_len = cpu_to_le32(nCmd->cmdlen); if (phba->cfg_nvme_embed_cmd) { sgl->addr_hi = 0; sgl->addr_lo = 0; /* Word 0-2 - NVME CMND IU (embedded payload) */ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED; wqe->generic.bde.tus.f.bdeSize = 56; wqe->generic.bde.addrHigh = 0; wqe->generic.bde.addrLow = 64; /* Word 16 */ /* Word 10 - dbde is 0, wqes is 1 in template */ /* * Embed the payload in the last half of the WQE * WQE words 16-30 get the NVME CMD IU payload * * WQE words 16-19 get payload Words 1-4 * WQE words 20-21 get payload Words 6-7 * WQE words 22-29 get payload Words 16-23 */ wptr = &wqe->words[16]; /* WQE ptr */ dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */ dptr++; /* Skip Word 0 in payload */ *wptr++ = *dptr++; /* Word 1 */ *wptr++ = *dptr++; /* Word 2 */ *wptr++ = *dptr++; /* Word 3 */ *wptr++ = *dptr++; /* Word 4 */ dptr++; /* Skip Word 5 in payload */ *wptr++ = *dptr++; /* Word 6 */ *wptr++ = *dptr++; /* Word 7 */ dptr += 8; /* Skip Words 8-15 in payload */ *wptr++ = *dptr++; /* Word 16 */ *wptr++ = *dptr++; /* Word 17 */ *wptr++ = *dptr++; /* Word 18 */ *wptr++ = *dptr++; /* Word 19 */ *wptr++ = *dptr++; /* Word 20 */ *wptr++ = *dptr++; /* Word 21 */ *wptr++ = *dptr++; /* Word 22 */ *wptr = *dptr; /* Word 23 */ } else { sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma)); sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma)); /* Word 0-2 - NVME CMND IU Inline BDE */ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen; wqe->generic.bde.addrHigh = sgl->addr_hi; wqe->generic.bde.addrLow = sgl->addr_lo; /* Word 10 */ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); } sgl++; /* Setup the physical region for the FCP RSP */ sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma)); sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma)); sgl->word2 = le32_to_cpu(sgl->word2); if (nCmd->sg_cnt) bf_set(lpfc_sli4_sge_last, sgl, 0); else bf_set(lpfc_sli4_sge_last, sgl, 1); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(nCmd->rsplen); } /* * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO * * Driver registers this routine as it io request handler. This * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq * data structure to the rport indicated in @lpfc_nvme_rport. * * Return value : * 0 - Success * TODO: What are the failure codes. **/ static void lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, struct lpfc_iocbq *pwqeOut) { struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf; struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl; struct lpfc_vport *vport = pwqeIn->vport; struct nvmefc_fcp_req *nCmd; struct nvme_fc_ersp_iu *ep; struct nvme_fc_cmd_iu *cp; struct lpfc_nodelist *ndlp; struct lpfc_nvme_fcpreq_priv *freqpriv; struct lpfc_nvme_lport *lport; uint32_t code, status, idx; uint16_t cid, sqhd, data; uint32_t *ptr; uint32_t lat; bool call_done = false; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS int cpu; #endif int offline = 0; /* Sanity check on return of outstanding command */ if (!lpfc_ncmd) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6071 Null lpfc_ncmd pointer. No " "release, skip completion\n"); return; } /* Guard against abort handler being called at same time */ spin_lock(&lpfc_ncmd->buf_lock); if (!lpfc_ncmd->nvmeCmd) { spin_unlock(&lpfc_ncmd->buf_lock); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6066 Missing cmpl ptrs: lpfc_ncmd x%px, " "nvmeCmd x%px\n", lpfc_ncmd, lpfc_ncmd->nvmeCmd); /* Release the lpfc_ncmd regardless of the missing elements. */ lpfc_release_nvme_buf(phba, lpfc_ncmd); return; } nCmd = lpfc_ncmd->nvmeCmd; status = bf_get(lpfc_wcqe_c_status, wcqe); idx = lpfc_ncmd->cur_iocbq.hba_wqidx; phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++; if (unlikely(status && vport->localport)) { lport = (struct lpfc_nvme_lport *)vport->localport->private; if (lport) { if (bf_get(lpfc_wcqe_c_xb, wcqe)) atomic_inc(&lport->cmpl_fcp_xb); atomic_inc(&lport->cmpl_fcp_err); } } lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, status, wcqe->parameter); /* * Catch race where our node has transitioned, but the * transport is still transitioning. */ ndlp = lpfc_ncmd->ndlp; if (!ndlp) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6062 Ignoring NVME cmpl. No ndlp\n"); goto out_err; } code = bf_get(lpfc_wcqe_c_code, wcqe); if (code == CQE_CODE_NVME_ERSP) { /* For this type of CQE, we need to rebuild the rsp */ ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; /* * Get Command Id from cmd to plug into response. This * code is not needed in the next NVME Transport drop. */ cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; cid = cp->sqe.common.command_id; /* * RSN is in CQE word 2 * SQHD is in CQE Word 3 bits 15:0 * Cmd Specific info is in CQE Word 1 * and in CQE Word 0 bits 15:0 */ sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe); /* Now lets build the NVME ERSP IU */ ep->iu_len = cpu_to_be16(8); ep->rsn = wcqe->parameter; ep->xfrd_len = cpu_to_be32(nCmd->payload_length); ep->rsvd12 = 0; ptr = (uint32_t *)&ep->cqe.result.u64; *ptr++ = wcqe->total_data_placed; data = bf_get(lpfc_wcqe_c_ersp0, wcqe); *ptr = (uint32_t)data; ep->cqe.sq_head = sqhd; ep->cqe.sq_id = nCmd->sqid; ep->cqe.command_id = cid; ep->cqe.status = 0; lpfc_ncmd->status = IOSTAT_SUCCESS; lpfc_ncmd->result = 0; nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; nCmd->transferred_length = nCmd->payload_length; } else { lpfc_ncmd->status = status; lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK); /* For NVME, the only failure path that results in an * IO error is when the adapter rejects it. All other * conditions are a success case and resolved by the * transport. * IOSTAT_FCP_RSP_ERROR means: * 1. Length of data received doesn't match total * transfer length in WQE * 2. If the RSP payload does NOT match these cases: * a. RSP length 12/24 bytes and all zeros * b. NVME ERSP */ switch (lpfc_ncmd->status) { case IOSTAT_SUCCESS: nCmd->transferred_length = wcqe->total_data_placed; nCmd->rcv_rsplen = 0; nCmd->status = 0; break; case IOSTAT_FCP_RSP_ERROR: nCmd->transferred_length = wcqe->total_data_placed; nCmd->rcv_rsplen = wcqe->parameter; nCmd->status = 0; /* Get the NVME cmd details for this unique error. */ cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; /* Check if this is really an ERSP */ if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) { lpfc_ncmd->status = IOSTAT_SUCCESS; lpfc_ncmd->result = 0; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6084 NVME FCP_ERR ERSP: " "xri %x placed x%x opcode x%x cmd_id " "x%x cqe_status x%x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, wcqe->total_data_placed, cp->sqe.common.opcode, cp->sqe.common.command_id, ep->cqe.status); break; } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6081 NVME Completion Protocol Error: " "xri %x status x%x result x%x " "placed x%x opcode x%x cmd_id x%x, " "cqe_status x%x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->status, lpfc_ncmd->result, wcqe->total_data_placed, cp->sqe.common.opcode, cp->sqe.common.command_id, ep->cqe.status); break; case IOSTAT_LOCAL_REJECT: /* Let fall through to set command final state. */ if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6032 Delay Aborted cmd x%px " "nvme cmd x%px, xri x%x, " "xb %d\n", lpfc_ncmd, nCmd, lpfc_ncmd->cur_iocbq.sli4_xritag, bf_get(lpfc_wcqe_c_xb, wcqe)); fallthrough; default: out_err: lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6072 NVME Completion Error: xri %x " "status x%x result x%x [x%x] " "placed x%x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->status, lpfc_ncmd->result, wcqe->parameter, wcqe->total_data_placed); nCmd->transferred_length = 0; nCmd->rcv_rsplen = 0; nCmd->status = NVME_SC_INTERNAL; offline = pci_channel_offline(vport->phba->pcidev); } } /* pick up SLI4 exhange busy condition */ if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline) lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; else lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; /* Update stats and complete the IO. There is * no need for dma unprep because the nvme_transport * owns the dma address. */ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_ncmd->ts_cmd_start) { lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; lpfc_ncmd->ts_data_io = ktime_get_ns(); phba->ktime_last_cmd = lpfc_ncmd->ts_data_io; lpfc_io_ktime(phba, lpfc_ncmd); } if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) { cpu = raw_smp_processor_id(); this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); if (lpfc_ncmd->cpu != cpu) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6701 CPU Check cmpl: " "cpu %d expect %d\n", cpu, lpfc_ncmd->cpu); } #endif /* NVME targets need completion held off until the abort exchange * completes unless the NVME Rport is getting unregistered. */ if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { freqpriv = nCmd->private; freqpriv->nvme_buf = NULL; lpfc_ncmd->nvmeCmd = NULL; call_done = true; } spin_unlock(&lpfc_ncmd->buf_lock); /* Check if IO qualified for CMF */ if (phba->cmf_active_mode != LPFC_CFG_OFF && nCmd->io_dir == NVMEFC_FCP_READ && nCmd->payload_length) { /* Used when calculating average latency */ lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start; lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL); } if (call_done) nCmd->done(nCmd); /* Call release with XB=1 to queue the IO into the abort list. */ lpfc_release_nvme_buf(phba, lpfc_ncmd); } /** * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO * @vport: pointer to a host virtual N_Port data structure * @lpfc_ncmd: Pointer to lpfc scsi command * @pnode: pointer to a node-list data structure * @cstat: pointer to the control status structure * * Driver registers this routine as it io request handler. This * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq * data structure to the rport indicated in @lpfc_nvme_rport. * * Return value : * 0 - Success * TODO: What are the failure codes. **/ static int lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_ncmd, struct lpfc_nodelist *pnode, struct lpfc_fc4_ctrl_stat *cstat) { struct lpfc_hba *phba = vport->phba; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; struct nvme_common_command *sqe; struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq; union lpfc_wqe128 *wqe = &pwqeq->wqe; uint32_t req_len; /* * There are three possibilities here - use scatter-gather segment, use * the single mapping, or neither. */ if (nCmd->sg_cnt) { if (nCmd->io_dir == NVMEFC_FCP_WRITE) { /* From the iwrite template, initialize words 7 - 11 */ memcpy(&wqe->words[7], &lpfc_iwrite_cmd_template.words[7], sizeof(uint32_t) * 5); /* Word 4 */ wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length; /* Word 5 */ if ((phba->cfg_nvme_enable_fb) && (pnode->nlp_flag & NLP_FIRSTBURST)) { req_len = lpfc_ncmd->nvmeCmd->payload_length; if (req_len < pnode->nvme_fb_size) wqe->fcp_iwrite.initial_xfer_len = req_len; else wqe->fcp_iwrite.initial_xfer_len = pnode->nvme_fb_size; } else { wqe->fcp_iwrite.initial_xfer_len = 0; } cstat->output_requests++; } else { /* From the iread template, initialize words 7 - 11 */ memcpy(&wqe->words[7], &lpfc_iread_cmd_template.words[7], sizeof(uint32_t) * 5); /* Word 4 */ wqe->fcp_iread.total_xfer_len = nCmd->payload_length; /* Word 5 */ wqe->fcp_iread.rsrvd5 = 0; /* For a CMF Managed port, iod must be zero'ed */ if (phba->cmf_active_mode == LPFC_CFG_MANAGED) bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_NONE); cstat->input_requests++; } } else { /* From the icmnd template, initialize words 4 - 11 */ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], sizeof(uint32_t) * 8); cstat->control_requests++; } if (pnode->nlp_nvme_info & NLP_NVME_NSLER) { bf_set(wqe_erp, &wqe->generic.wqe_com, 1); sqe = &((struct nvme_fc_cmd_iu *) nCmd->cmdaddr)->sqe.common; if (sqe->opcode == nvme_admin_async_event) bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1); } /* * Finish initializing those WQE fields that are independent * of the nvme_cmnd request_buffer */ /* Word 3 */ bf_set(payload_offset_len, &wqe->fcp_icmd, (nCmd->rsplen + nCmd->cmdlen)); /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); /* Word 8 */ wqe->generic.wqe_com.abort_tag = pwqeq->iotag; /* Word 9 */ bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); /* Word 10 */ bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG); /* Words 13 14 15 are for PBDE support */ /* add the VMID tags as per switch response */ if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) { if (phba->pport->vmid_priority_tagging) { bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid); } else { bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1); bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id; } } pwqeq->vport = vport; return 0; } /** * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO * @vport: pointer to a host virtual N_Port data structure * @lpfc_ncmd: Pointer to lpfc scsi command * * Driver registers this routine as it io request handler. This * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq * data structure to the rport indicated in @lpfc_nvme_rport. * * Return value : * 0 - Success * TODO: What are the failure codes. **/ static int lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_ncmd) { struct lpfc_hba *phba = vport->phba; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; struct sli4_hybrid_sgl *sgl_xtra = NULL; struct scatterlist *data_sg; struct sli4_sge *first_data_sgl; struct ulp_bde64 *bde; dma_addr_t physaddr = 0; uint32_t dma_len = 0; uint32_t dma_offset = 0; int nseg, i, j; bool lsp_just_set = false; /* Fix up the command and response DMA stuff. */ lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd); /* * There are three possibilities here - use scatter-gather segment, use * the single mapping, or neither. */ if (nCmd->sg_cnt) { /* * Jump over the cmd and rsp SGEs. The fix routine * has already adjusted for this. */ sgl += 2; first_data_sgl = sgl; lpfc_ncmd->seg_cnt = nCmd->sg_cnt; if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6058 Too many sg segments from " "NVME Transport. Max %d, " "nvmeIO sg_cnt %d\n", phba->cfg_nvme_seg_cnt + 1, lpfc_ncmd->seg_cnt); lpfc_ncmd->seg_cnt = 0; return 1; } /* * The driver established a maximum scatter-gather segment count * during probe that limits the number of sg elements in any * single nvme command. Just run through the seg_cnt and format * the sge's. */ nseg = nCmd->sg_cnt; data_sg = nCmd->first_sgl; /* for tracking the segment boundaries */ j = 2; for (i = 0; i < nseg; i++) { if (data_sg == NULL) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6059 dptr err %d, nseg %d\n", i, nseg); lpfc_ncmd->seg_cnt = 0; return 1; } sgl->word2 = 0; if (nseg == 1) { bf_set(lpfc_sli4_sge_last, sgl, 1); bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); } else { bf_set(lpfc_sli4_sge_last, sgl, 0); /* expand the segment */ if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && ((nseg - 1) != i)) { /* set LSP type */ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); sgl_xtra = lpfc_get_sgl_per_hdwq( phba, lpfc_ncmd); if (unlikely(!sgl_xtra)) { lpfc_ncmd->seg_cnt = 0; return 1; } sgl->addr_lo = cpu_to_le32(putPaddrLow( sgl_xtra->dma_phys_sgl)); sgl->addr_hi = cpu_to_le32(putPaddrHigh( sgl_xtra->dma_phys_sgl)); } else { bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); } } if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { if ((nseg - 1) == i) bf_set(lpfc_sli4_sge_last, sgl, 1); physaddr = sg_dma_address(data_sg); dma_len = sg_dma_len(data_sg); sgl->addr_lo = cpu_to_le32( putPaddrLow(physaddr)); sgl->addr_hi = cpu_to_le32( putPaddrHigh(physaddr)); bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(dma_len); dma_offset += dma_len; data_sg = sg_next(data_sg); sgl++; lsp_just_set = false; } else { sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32( phba->cfg_sg_dma_buf_size); sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; i = i - 1; lsp_just_set = true; } j++; } /* PBDE support for first data SGE only */ if (nseg == 1 && phba->cfg_enable_pbde) { /* Words 13-15 */ bde = (struct ulp_bde64 *) &wqe->words[13]; bde->addrLow = first_data_sgl->addr_lo; bde->addrHigh = first_data_sgl->addr_hi; bde->tus.f.bdeSize = le32_to_cpu(first_data_sgl->sge_len); bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; bde->tus.w = cpu_to_le32(bde->tus.w); /* Word 11 - set PBDE bit */ bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); } else { memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); /* Word 11 - PBDE bit disabled by default template */ } } else { lpfc_ncmd->seg_cnt = 0; /* For this clause to be valid, the payload_length * and sg_cnt must zero. */ if (nCmd->payload_length != 0) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6063 NVME DMA Prep Err: sg_cnt %d " "payload_length x%x\n", nCmd->sg_cnt, nCmd->payload_length); return 1; } } return 0; } /** * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO * @pnvme_lport: Pointer to the driver's local port data * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue * @pnvme_fcreq: IO request from nvme fc to driver. * * Driver registers this routine as it io request handler. This * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq * data structure to the rport indicated in @lpfc_nvme_rport. * * Return value : * 0 - Success * TODO: What are the failure codes. **/ static int lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, struct nvme_fc_remote_port *pnvme_rport, void *hw_queue_handle, struct nvmefc_fcp_req *pnvme_fcreq) { int ret = 0; int expedite = 0; int idx, cpu; struct lpfc_nvme_lport *lport; struct lpfc_fc4_ctrl_stat *cstat; struct lpfc_vport *vport; struct lpfc_hba *phba; struct lpfc_nodelist *ndlp; struct lpfc_io_buf *lpfc_ncmd; struct lpfc_nvme_rport *rport; struct lpfc_nvme_qhandle *lpfc_queue_info; struct lpfc_nvme_fcpreq_priv *freqpriv; struct nvme_common_command *sqe; uint64_t start = 0; #if (IS_ENABLED(CONFIG_NVME_FC)) u8 *uuid = NULL; int err; enum dma_data_direction iodir; #endif /* Validate pointers. LLDD fault handling with transport does * have timing races. */ lport = (struct lpfc_nvme_lport *)pnvme_lport->private; if (unlikely(!lport)) { ret = -EINVAL; goto out_fail; } vport = lport->vport; if (unlikely(!hw_queue_handle)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6117 Fail IO, NULL hw_queue_handle\n"); atomic_inc(&lport->xmt_fcp_err); ret = -EBUSY; goto out_fail; } phba = vport->phba; if ((unlikely(vport->load_flag & FC_UNLOADING)) || phba->hba_flag & HBA_IOQ_FLUSH) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6124 Fail IO, Driver unload\n"); atomic_inc(&lport->xmt_fcp_err); ret = -ENODEV; goto out_fail; } freqpriv = pnvme_fcreq->private; if (unlikely(!freqpriv)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6158 Fail IO, NULL request data\n"); atomic_inc(&lport->xmt_fcp_err); ret = -EINVAL; goto out_fail; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (phba->ktime_on) start = ktime_get_ns(); #endif rport = (struct lpfc_nvme_rport *)pnvme_rport->private; lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle; /* * Catch race where our node has transitioned, but the * transport is still transitioning. */ ndlp = rport->ndlp; if (!ndlp) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, "6053 Busy IO, ndlp not ready: rport x%px " "ndlp x%px, DID x%06x\n", rport, ndlp, pnvme_rport->port_id); atomic_inc(&lport->xmt_fcp_err); ret = -EBUSY; goto out_fail; } /* The remote node has to be a mapped target or it's an error. */ if ((ndlp->nlp_type & NLP_NVME_TARGET) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, "6036 Fail IO, DID x%06x not ready for " "IO. State x%x, Type x%x Flg x%x\n", pnvme_rport->port_id, ndlp->nlp_state, ndlp->nlp_type, ndlp->fc4_xpt_flags); atomic_inc(&lport->xmt_fcp_bad_ndlp); ret = -EBUSY; goto out_fail; } /* Currently only NVME Keep alive commands should be expedited * if the driver runs out of a resource. These should only be * issued on the admin queue, qidx 0 */ if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) { sqe = &((struct nvme_fc_cmd_iu *) pnvme_fcreq->cmdaddr)->sqe.common; if (sqe->opcode == nvme_admin_keep_alive) expedite = 1; } /* Check if IO qualifies for CMF */ if (phba->cmf_active_mode != LPFC_CFG_OFF && pnvme_fcreq->io_dir == NVMEFC_FCP_READ && pnvme_fcreq->payload_length) { ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length); if (ret) { ret = -EBUSY; goto out_fail; } /* Get start time for IO latency */ start = ktime_get_ns(); } /* The node is shared with FCP IO, make sure the IO pending count does * not exceed the programmed depth. */ if (lpfc_ndlp_check_qdepth(phba, ndlp)) { if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && !expedite) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6174 Fail IO, ndlp qdepth exceeded: " "idx %d DID %x pend %d qdepth %d\n", lpfc_queue_info->index, ndlp->nlp_DID, atomic_read(&ndlp->cmd_pending), ndlp->cmd_qdepth); atomic_inc(&lport->xmt_fcp_qdepth); ret = -EBUSY; goto out_fail1; } } /* Lookup Hardware Queue index based on fcp_io_sched module parameter */ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { idx = lpfc_queue_info->index; } else { cpu = raw_smp_processor_id(); idx = phba->sli4_hba.cpu_map[cpu].hdwq; } lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite); if (lpfc_ncmd == NULL) { atomic_inc(&lport->xmt_fcp_noxri); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6065 Fail IO, driver buffer pool is empty: " "idx %d DID %x\n", lpfc_queue_info->index, ndlp->nlp_DID); ret = -EBUSY; goto out_fail1; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (start) { lpfc_ncmd->ts_cmd_start = start; lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd; } else { lpfc_ncmd->ts_cmd_start = 0; } #endif lpfc_ncmd->rx_cmd_start = start; /* * Store the data needed by the driver to issue, abort, and complete * an IO. * Do not let the IO hang out forever. There is no midlayer issuing * an abort so inform the FW of the maximum IO pending time. */ freqpriv->nvme_buf = lpfc_ncmd; lpfc_ncmd->nvmeCmd = pnvme_fcreq; lpfc_ncmd->ndlp = ndlp; lpfc_ncmd->qidx = lpfc_queue_info->qidx; #if (IS_ENABLED(CONFIG_NVME_FC)) /* check the necessary and sufficient condition to support VMID */ if (lpfc_is_vmid_enabled(phba) && (ndlp->vmid_support || phba->pport->vmid_priority_tagging == LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { /* is the I/O generated by a VM, get the associated virtual */ /* entity id */ uuid = nvme_fc_io_getuuid(pnvme_fcreq); if (uuid) { if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE) iodir = DMA_TO_DEVICE; else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ) iodir = DMA_FROM_DEVICE; else iodir = DMA_NONE; err = lpfc_vmid_get_appid(vport, uuid, iodir, (union lpfc_vmid_io_tag *) &lpfc_ncmd->cur_iocbq.vmid_tag); if (!err) lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID; } } #endif /* * Issue the IO on the WQ indicated by index in the hw_queue_handle. * This identfier was create in our hardware queue create callback * routine. The driver now is dependent on the IO queue steering from * the transport. We are trusting the upper NVME layers know which * index to use and that they have affinitized a CPU to this hardware * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. */ lpfc_ncmd->cur_iocbq.hba_wqidx = idx; cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat; lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat); ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd); if (ret) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6175 Fail IO, Prep DMA: " "idx %d DID %x\n", lpfc_queue_info->index, ndlp->nlp_DID); atomic_inc(&lport->xmt_fcp_err); ret = -ENOMEM; goto out_free_nvme_buf; } lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_queue_info->index, ndlp->nlp_DID); ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq); if (ret) { atomic_inc(&lport->xmt_fcp_wqerr); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6113 Fail IO, Could not issue WQE err %x " "sid: x%x did: x%x oxid: x%x\n", ret, vport->fc_myDID, ndlp->nlp_DID, lpfc_ncmd->cur_iocbq.sli4_xritag); goto out_free_nvme_buf; } if (phba->cfg_xri_rebalancing) lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_ncmd->ts_cmd_start) lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) { cpu = raw_smp_processor_id(); this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); lpfc_ncmd->cpu = cpu; if (idx != cpu) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6702 CPU Check cmd: " "cpu %d wq %d\n", lpfc_ncmd->cpu, lpfc_queue_info->index); } #endif return 0; out_free_nvme_buf: if (lpfc_ncmd->nvmeCmd->sg_cnt) { if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE) cstat->output_requests--; else cstat->input_requests--; } else cstat->control_requests--; lpfc_release_nvme_buf(phba, lpfc_ncmd); out_fail1: lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, pnvme_fcreq->payload_length, NULL); out_fail: return ret; } /** * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. * @phba: Pointer to HBA context object * @cmdiocb: Pointer to command iocb object. * @rspiocb: Pointer to response iocb object. * * This is the callback function for any NVME FCP IO that was aborted. * * Return value: * None **/ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl; lpfc_printf_log(phba, KERN_INFO, LOG_NVME, "6145 ABORT_XRI_CN completing on rpi x%x " "original iotag x%x, abort cmd iotag x%x " "req_tag x%x, status x%x, hwstatus x%x\n", bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com), get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag, bf_get(lpfc_wcqe_c_request_tag, abts_cmpl), bf_get(lpfc_wcqe_c_status, abts_cmpl), bf_get(lpfc_wcqe_c_hw_status, abts_cmpl)); lpfc_sli_release_iocbq(phba, cmdiocb); } /** * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS * @pnvme_lport: Pointer to the driver's local port data * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue * @pnvme_fcreq: IO request from nvme fc to driver. * * Driver registers this routine as its nvme request io abort handler. This * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq * data structure to the rport indicated in @lpfc_nvme_rport. This routine * is executed asynchronously - one the target is validated as "MAPPED" and * ready for IO, the driver issues the abort request and returns. * * Return value: * None **/ static void lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, struct nvme_fc_remote_port *pnvme_rport, void *hw_queue_handle, struct nvmefc_fcp_req *pnvme_fcreq) { struct lpfc_nvme_lport *lport; struct lpfc_vport *vport; struct lpfc_hba *phba; struct lpfc_io_buf *lpfc_nbuf; struct lpfc_iocbq *nvmereq_wqe; struct lpfc_nvme_fcpreq_priv *freqpriv; unsigned long flags; int ret_val; /* Validate pointers. LLDD fault handling with transport does * have timing races. */ lport = (struct lpfc_nvme_lport *)pnvme_lport->private; if (unlikely(!lport)) return; vport = lport->vport; if (unlikely(!hw_queue_handle)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, "6129 Fail Abort, HW Queue Handle NULL.\n"); return; } phba = vport->phba; freqpriv = pnvme_fcreq->private; if (unlikely(!freqpriv)) return; if (vport->load_flag & FC_UNLOADING) return; /* Announce entry to new IO submit field. */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, "6002 Abort Request to rport DID x%06x " "for nvme_fc_req x%px\n", pnvme_rport->port_id, pnvme_fcreq); lpfc_nbuf = freqpriv->nvme_buf; if (!lpfc_nbuf) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6140 NVME IO req has no matching lpfc nvme " "io buffer. Skipping abort req.\n"); return; } else if (!lpfc_nbuf->nvmeCmd) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6141 lpfc NVME IO req has no nvme_fcreq " "io buffer. Skipping abort req.\n"); return; } /* Guard against IO completion being called at same time */ spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags); /* If the hba is getting reset, this flag is set. It is * cleared when the reset is complete and rings reestablished. */ spin_lock(&phba->hbalock); /* driver queued commands are in process of being flushed */ if (phba->hba_flag & HBA_IOQ_FLUSH) { spin_unlock(&phba->hbalock); spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6139 Driver in reset cleanup - flushing " "NVME Req now. hba_flag x%x\n", phba->hba_flag); return; } nvmereq_wqe = &lpfc_nbuf->cur_iocbq; /* * The lpfc_nbuf and the mapped nvme_fcreq in the driver's * state must match the nvme_fcreq passed by the nvme * transport. If they don't match, it is likely the driver * has already completed the NVME IO and the nvme transport * has not seen it yet. */ if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6143 NVME req mismatch: " "lpfc_nbuf x%px nvmeCmd x%px, " "pnvme_fcreq x%px. Skipping Abort xri x%x\n", lpfc_nbuf, lpfc_nbuf->nvmeCmd, pnvme_fcreq, nvmereq_wqe->sli4_xritag); goto out_unlock; } /* Don't abort IOs no longer on the pending queue. */ if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6142 NVME IO req x%px not queued - skipping " "abort req xri x%x\n", pnvme_fcreq, nvmereq_wqe->sli4_xritag); goto out_unlock; } atomic_inc(&lport->xmt_fcp_abort); lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n", nvmereq_wqe->sli4_xritag, nvmereq_wqe->hba_wqidx, pnvme_rport->port_id); /* Outstanding abort is in progress */ if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6144 Outstanding NVME I/O Abort Request " "still pending on nvme_fcreq x%px, " "lpfc_ncmd x%px xri x%x\n", pnvme_fcreq, lpfc_nbuf, nvmereq_wqe->sli4_xritag); goto out_unlock; } ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe, lpfc_nvme_abort_fcreq_cmpl); spin_unlock(&phba->hbalock); spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); if (ret_val != WQE_SUCCESS) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6137 Failed abts issue_wqe with status x%x " "for nvme_fcreq x%px.\n", ret_val, pnvme_fcreq); return; } lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, "6138 Transport Abort NVME Request Issued for " "ox_id x%x\n", nvmereq_wqe->sli4_xritag); return; out_unlock: spin_unlock(&phba->hbalock); spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); return; } /* Declare and initialization an instance of the FC NVME template. */ static struct nvme_fc_port_template lpfc_nvme_template = { /* initiator-based functions */ .localport_delete = lpfc_nvme_localport_delete, .remoteport_delete = lpfc_nvme_remoteport_delete, .create_queue = lpfc_nvme_create_queue, .delete_queue = lpfc_nvme_delete_queue, .ls_req = lpfc_nvme_ls_req, .fcp_io = lpfc_nvme_fcp_io_submit, .ls_abort = lpfc_nvme_ls_abort, .fcp_abort = lpfc_nvme_fcp_abort, .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp, .max_hw_queues = 1, .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS, .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS, .dma_boundary = 0xFFFFFFFF, /* Sizes of additional private data for data structures. * No use for the last two sizes at this time. */ .local_priv_sz = sizeof(struct lpfc_nvme_lport), .remote_priv_sz = sizeof(struct lpfc_nvme_rport), .lsrqst_priv_sz = 0, .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv), }; /* * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA * * This routine removes a nvme buffer from head of @hdwq io_buf_list * and returns to caller. * * Return codes: * NULL - Error * Pointer to lpfc_nvme_buf - Success **/ static struct lpfc_io_buf * lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx, int expedite) { struct lpfc_io_buf *lpfc_ncmd; struct lpfc_sli4_hdw_queue *qp; struct sli4_sge *sgl; struct lpfc_iocbq *pwqeq; union lpfc_wqe128 *wqe; lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite); if (lpfc_ncmd) { pwqeq = &(lpfc_ncmd->cur_iocbq); wqe = &pwqeq->wqe; /* Setup key fields in buffer that may have been changed * if other protocols used this buffer. */ pwqeq->cmd_flag = LPFC_IO_NVME; pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl; lpfc_ncmd->start_time = jiffies; lpfc_ncmd->flags = 0; /* Rsp SGE will be filled in when we rcv an IO * from the NVME Layer to be sent. * The cmd is going to be embedded so we need a SKIP SGE. */ sgl = lpfc_ncmd->dma_sgl; bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); bf_set(lpfc_sli4_sge_last, sgl, 0); sgl->word2 = cpu_to_le32(sgl->word2); /* Fill in word 3 / sgl_len during cmd submission */ /* Initialize 64 bytes only */ memset(wqe, 0, sizeof(union lpfc_wqe)); if (lpfc_ndlp_check_qdepth(phba, ndlp)) { atomic_inc(&ndlp->cmd_pending); lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH; } } else { qp = &phba->sli4_hba.hdwq[idx]; qp->empty_io_bufs++; } return lpfc_ncmd; } /** * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list. * @phba: The Hba for which this call is being executed. * @lpfc_ncmd: The nvme buffer which is being released. * * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer * and cannot be reused for at least RA_TOV amount of time if it was * aborted. **/ static void lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) { struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp) atomic_dec(&lpfc_ncmd->ndlp->cmd_pending); lpfc_ncmd->ndlp = NULL; lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH; qp = lpfc_ncmd->hdwq; if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6310 XB release deferred for " "ox_id x%x on reqtag x%x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->cur_iocbq.iotag); spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); list_add_tail(&lpfc_ncmd->list, &qp->lpfc_abts_io_buf_list); qp->abts_nvme_io_bufs++; spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); } else lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp); } /** * lpfc_nvme_create_localport - Create/Bind an nvme localport instance. * @vport: the lpfc_vport instance requesting a localport. * * This routine is invoked to create an nvme localport instance to bind * to the nvme_fc_transport. It is called once during driver load * like lpfc_create_shost after all other services are initialized. * It requires a vport, vpi, and wwns at call time. Other localport * parameters are modified as the driver's FCID and the Fabric WWN * are established. * * Return codes * 0 - successful * -ENOMEM - no heap memory available * other values - from nvme registration upcall **/ int lpfc_nvme_create_localport(struct lpfc_vport *vport) { int ret = 0; struct lpfc_hba *phba = vport->phba; struct nvme_fc_port_info nfcp_info; struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; /* Initialize this localport instance. The vport wwn usage ensures * that NPIV is accounted for. */ memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info)); nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR; nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); /* We need to tell the transport layer + 1 because it takes page * alignment into account. When space for the SGL is allocated we * allocate + 3, one for cmd, one for rsp and one for this alignment */ lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; /* Advertise how many hw queues we support based on cfg_hdw_queue, * which will not exceed cpu count. */ lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; if (!IS_ENABLED(CONFIG_NVME_FC)) return ret; /* localport is allocated from the stack, but the registration * call allocates heap memory as well as the private area. */ ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, &vport->phba->pcidev->dev, &localport); if (!ret) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, "6005 Successfully registered local " "NVME port num %d, localP x%px, private " "x%px, sg_seg %d\n", localport->port_num, localport, localport->private, lpfc_nvme_template.max_sgl_segments); /* Private is our lport size declared in the template. */ lport = (struct lpfc_nvme_lport *)localport->private; vport->localport = localport; lport->vport = vport; vport->nvmei_support = 1; atomic_set(&lport->xmt_fcp_noxri, 0); atomic_set(&lport->xmt_fcp_bad_ndlp, 0); atomic_set(&lport->xmt_fcp_qdepth, 0); atomic_set(&lport->xmt_fcp_err, 0); atomic_set(&lport->xmt_fcp_wqerr, 0); atomic_set(&lport->xmt_fcp_abort, 0); atomic_set(&lport->xmt_ls_abort, 0); atomic_set(&lport->xmt_ls_err, 0); atomic_set(&lport->cmpl_fcp_xb, 0); atomic_set(&lport->cmpl_fcp_err, 0); atomic_set(&lport->cmpl_ls_xb, 0); atomic_set(&lport->cmpl_ls_err, 0); atomic_set(&lport->fc4NvmeLsRequests, 0); atomic_set(&lport->fc4NvmeLsCmpls, 0); } return ret; } #if (IS_ENABLED(CONFIG_NVME_FC)) /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg. * * The driver has to wait for the host nvme transport to callback * indicating the localport has successfully unregistered all * resources. Since this is an uninterruptible wait, loop every ten * seconds and print a message indicating no progress. * * An uninterruptible wait is used because of the risk of transport-to- * driver state mismatch. */ static void lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, struct lpfc_nvme_lport *lport, struct completion *lport_unreg_cmp) { u32 wait_tmo; int ret, i, pending = 0; struct lpfc_sli_ring *pring; struct lpfc_hba *phba = vport->phba; struct lpfc_sli4_hdw_queue *qp; int abts_scsi, abts_nvme; /* Host transport has to clean up and confirm requiring an indefinite * wait. Print a message if a 10 second wait expires and renew the * wait. This is unexpected. */ wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); while (true) { ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); if (unlikely(!ret)) { pending = 0; abts_scsi = 0; abts_nvme = 0; for (i = 0; i < phba->cfg_hdw_queue; i++) { qp = &phba->sli4_hba.hdwq[i]; if (!vport->localport || !qp || !qp->io_wq) return; pring = qp->io_wq->pring; if (!pring) continue; pending += pring->txcmplq_cnt; abts_scsi += qp->abts_scsi_io_bufs; abts_nvme += qp->abts_nvme_io_bufs; } if (!vport->localport || test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) || phba->link_state == LPFC_HBA_ERROR || vport->load_flag & FC_UNLOADING) return; lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6176 Lport x%px Localport x%px wait " "timed out. Pending %d [%d:%d]. " "Renewing.\n", lport, vport->localport, pending, abts_scsi, abts_nvme); continue; } break; } lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6177 Lport x%px Localport x%px Complete Success\n", lport, vport->localport); } #endif /** * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport. * @vport: pointer to a host virtual N_Port data structure * * This routine is invoked to destroy all lports bound to the phba. * The lport memory was allocated by the nvme fc transport and is * released there. This routine ensures all rports bound to the * lport have been disconnected. * **/ void lpfc_nvme_destroy_localport(struct lpfc_vport *vport) { #if (IS_ENABLED(CONFIG_NVME_FC)) struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; int ret; DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp); if (vport->nvmei_support == 0) return; localport = vport->localport; if (!localport) return; lport = (struct lpfc_nvme_lport *)localport->private; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6011 Destroying NVME localport x%px\n", localport); /* lport's rport list is clear. Unregister * lport and release resources. */ lport->lport_unreg_cmp = &lport_unreg_cmp; ret = nvme_fc_unregister_localport(localport); /* Wait for completion. This either blocks * indefinitely or succeeds */ lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp); vport->localport = NULL; /* Regardless of the unregister upcall response, clear * nvmei_support. All rports are unregistered and the * driver will clean up. */ vport->nvmei_support = 0; if (ret == 0) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6009 Unregistered lport Success\n"); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6010 Unregistered lport " "Failed, status x%x\n", ret); } #endif } void lpfc_nvme_update_localport(struct lpfc_vport *vport) { #if (IS_ENABLED(CONFIG_NVME_FC)) struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; localport = vport->localport; if (!localport) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, "6710 Update NVME fail. No localport\n"); return; } lport = (struct lpfc_nvme_lport *)localport->private; if (!lport) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, "6171 Update NVME fail. localP x%px, No lport\n", localport); return; } lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6012 Update NVME lport x%px did x%x\n", localport, vport->fc_myDID); localport->port_id = vport->fc_myDID; if (localport->port_id == 0) localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY; else localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6030 bound lport x%px to DID x%06x\n", lport, localport->port_id); #endif } int lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { #if (IS_ENABLED(CONFIG_NVME_FC)) int ret = 0; struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; struct lpfc_nvme_rport *rport; struct lpfc_nvme_rport *oldrport; struct nvme_fc_remote_port *remote_port; struct nvme_fc_port_info rpinfo; struct lpfc_nodelist *prev_ndlp = NULL; struct fc_rport *srport = ndlp->rport; lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, "6006 Register NVME PORT. DID x%06x nlptype x%x\n", ndlp->nlp_DID, ndlp->nlp_type); localport = vport->localport; if (!localport) return 0; lport = (struct lpfc_nvme_lport *)localport->private; /* NVME rports are not preserved across devloss. * Just register this instance. Note, rpinfo->dev_loss_tmo * is left 0 to indicate accept transport defaults. The * driver communicates port role capabilities consistent * with the PRLI response data. */ memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info)); rpinfo.port_id = ndlp->nlp_DID; if (ndlp->nlp_type & NLP_NVME_TARGET) rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET; if (ndlp->nlp_type & NLP_NVME_INITIATOR) rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; if (ndlp->nlp_type & NLP_NVME_DISCOVERY) rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); if (srport) rpinfo.dev_loss_tmo = srport->dev_loss_tmo; else rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo; spin_lock_irq(&ndlp->lock); /* If an oldrport exists, so does the ndlp reference. If not * a new reference is needed because either the node has never * been registered or it's been unregistered and getting deleted. */ oldrport = lpfc_ndlp_get_nrport(ndlp); if (oldrport) { prev_ndlp = oldrport->ndlp; spin_unlock_irq(&ndlp->lock); } else { spin_unlock_irq(&ndlp->lock); if (!lpfc_nlp_get(ndlp)) { dev_warn(&vport->phba->pcidev->dev, "Warning - No node ref - exit register\n"); return 0; } } ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port); if (!ret) { /* If the ndlp already has an nrport, this is just * a resume of the existing rport. Else this is a * new rport. */ /* Guard against an unregister/reregister * race that leaves the WAIT flag set. */ spin_lock_irq(&ndlp->lock); ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; ndlp->fc4_xpt_flags |= NVME_XPT_REGD; spin_unlock_irq(&ndlp->lock); rport = remote_port->private; if (oldrport) { /* Sever the ndlp<->rport association * before dropping the ndlp ref from * register. */ spin_lock_irq(&ndlp->lock); ndlp->nrport = NULL; ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; spin_unlock_irq(&ndlp->lock); rport->ndlp = NULL; rport->remoteport = NULL; /* Reference only removed if previous NDLP is no longer * active. It might be just a swap and removing the * reference would cause a premature cleanup. */ if (prev_ndlp && prev_ndlp != ndlp) { if (!prev_ndlp->nrport) lpfc_nlp_put(prev_ndlp); } } /* Clean bind the rport to the ndlp. */ rport->remoteport = remote_port; rport->lport = lport; rport->ndlp = ndlp; spin_lock_irq(&ndlp->lock); ndlp->nrport = rport; spin_unlock_irq(&ndlp->lock); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NODE, "6022 Bind lport x%px to remoteport x%px " "rport x%px WWNN 0x%llx, " "Rport WWPN 0x%llx DID " "x%06x Role x%x, ndlp %p prev_ndlp x%px\n", lport, remote_port, rport, rpinfo.node_name, rpinfo.port_name, rpinfo.port_id, rpinfo.port_role, ndlp, prev_ndlp); } else { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6031 RemotePort Registration failed " "err: %d, DID x%06x ref %u\n", ret, ndlp->nlp_DID, kref_read(&ndlp->kref)); lpfc_nlp_put(ndlp); } return ret; #else return 0; #endif } /* * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport * * If the ndlp represents an NVME Target, that we are logged into, * ping the NVME FC Transport layer to initiate a device rescan * on this remote NPort. */ void lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { #if (IS_ENABLED(CONFIG_NVME_FC)) struct lpfc_nvme_rport *nrport; struct nvme_fc_remote_port *remoteport = NULL; spin_lock_irq(&ndlp->lock); nrport = lpfc_ndlp_get_nrport(ndlp); if (nrport) remoteport = nrport->remoteport; spin_unlock_irq(&ndlp->lock); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6170 Rescan NPort DID x%06x type x%x " "state x%x nrport x%px remoteport x%px\n", ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, nrport, remoteport); if (!nrport || !remoteport) goto rescan_exit; /* Rescan an NVME target in MAPPED state with DISCOVERY role set */ if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY && ndlp->nlp_state == NLP_STE_MAPPED_NODE) { nvme_fc_rescan_remoteport(remoteport); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6172 NVME rescanned DID x%06x " "port_state x%x\n", ndlp->nlp_DID, remoteport->port_state); } return; rescan_exit: lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6169 Skip NVME Rport Rescan, NVME remoteport " "unregistered\n"); #endif } /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport. * * There is no notion of Devloss or rport recovery from the current * nvme_transport perspective. Loss of an rport just means IO cannot * be sent and recovery is completely up to the initator. * For now, the driver just unbinds the DID and port_role so that * no further IO can be issued. */ void lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { #if (IS_ENABLED(CONFIG_NVME_FC)) int ret; struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; struct lpfc_nvme_rport *rport; struct nvme_fc_remote_port *remoteport = NULL; localport = vport->localport; /* This is fundamental error. The localport is always * available until driver unload. Just exit. */ if (!localport) return; lport = (struct lpfc_nvme_lport *)localport->private; if (!lport) goto input_err; spin_lock_irq(&ndlp->lock); rport = lpfc_ndlp_get_nrport(ndlp); if (rport) remoteport = rport->remoteport; spin_unlock_irq(&ndlp->lock); if (!remoteport) goto input_err; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6033 Unreg nvme remoteport x%px, portname x%llx, " "port_id x%06x, portstate x%x port type x%x " "refcnt %d\n", remoteport, remoteport->port_name, remoteport->port_id, remoteport->port_state, ndlp->nlp_type, kref_read(&ndlp->kref)); /* Sanity check ndlp type. Only call for NVME ports. Don't * clear any rport state until the transport calls back. */ if (ndlp->nlp_type & NLP_NVME_TARGET) { /* No concern about the role change on the nvme remoteport. * The transport will update it. */ spin_lock_irq(&vport->phba->hbalock); ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT; spin_unlock_irq(&vport->phba->hbalock); /* Don't let the host nvme transport keep sending keep-alives * on this remoteport. Vport is unloading, no recovery. The * return values is ignored. The upcall is a courtesy to the * transport. */ if (vport->load_flag & FC_UNLOADING || unlikely(vport->phba->link_state == LPFC_HBA_ERROR)) (void)nvme_fc_set_remoteport_devloss(remoteport, 0); ret = nvme_fc_unregister_remoteport(remoteport); /* The driver no longer knows if the nrport memory is valid. * because the controller teardown process has begun and * is asynchronous. Break the binding in the ndlp. Also * remove the register ndlp reference to setup node release. */ ndlp->nrport = NULL; lpfc_nlp_put(ndlp); if (ret != 0) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6167 NVME unregister failed %d " "port_state x%x\n", ret, remoteport->port_state); if (vport->load_flag & FC_UNLOADING) { /* Only 1 thread can drop the initial node * reference. Check if another thread has set * NLP_DROPPED. */ spin_lock_irq(&ndlp->lock); if (!(ndlp->nlp_flag & NLP_DROPPED)) { ndlp->nlp_flag |= NLP_DROPPED; spin_unlock_irq(&ndlp->lock); lpfc_nlp_put(ndlp); return; } spin_unlock_irq(&ndlp->lock); } } } return; input_err: #endif lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6168 State error: lport x%px, rport x%px FCID x%06x\n", vport->localport, ndlp->rport, ndlp->nlp_DID); } /** * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort * @phba: pointer to lpfc hba data structure. * @lpfc_ncmd: The nvme job structure for the request being aborted. * * This routine is invoked by the worker thread to process a SLI4 fast-path * NVME aborted xri. Aborted NVME IO commands are completed to the transport * here. **/ void lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) { struct nvmefc_fcp_req *nvme_cmd = NULL; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6533 %s nvme_cmd %p tag x%x abort complete and " "xri released\n", __func__, lpfc_ncmd->nvmeCmd, lpfc_ncmd->cur_iocbq.iotag); /* Aborted NVME commands are required to not complete * before the abort exchange command fully completes. * Once completed, it is available via the put list. */ if (lpfc_ncmd->nvmeCmd) { nvme_cmd = lpfc_ncmd->nvmeCmd; nvme_cmd->transferred_length = 0; nvme_cmd->rcv_rsplen = 0; nvme_cmd->status = NVME_SC_INTERNAL; nvme_cmd->done(nvme_cmd); lpfc_ncmd->nvmeCmd = NULL; } lpfc_release_nvme_buf(phba, lpfc_ncmd); } /** * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort * @phba: pointer to lpfc hba data structure. * @axri: pointer to the fcp xri abort wcqe structure. * @lpfc_ncmd: The nvme job structure for the request being aborted. * * This routine is invoked by the worker thread to process a SLI4 fast-path * NVME aborted xri. Aborted NVME IO commands are completed to the transport * here. **/ void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri, struct lpfc_io_buf *lpfc_ncmd) { uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); struct nvmefc_fcp_req *nvme_cmd = NULL; struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp; if (ndlp) lpfc_sli4_abts_err_handler(phba, ndlp, axri); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6311 nvme_cmd %p xri x%x tag x%x abort complete and " "xri released\n", lpfc_ncmd->nvmeCmd, xri, lpfc_ncmd->cur_iocbq.iotag); /* Aborted NVME commands are required to not complete * before the abort exchange command fully completes. * Once completed, it is available via the put list. */ if (lpfc_ncmd->nvmeCmd) { nvme_cmd = lpfc_ncmd->nvmeCmd; nvme_cmd->done(nvme_cmd); lpfc_ncmd->nvmeCmd = NULL; } lpfc_release_nvme_buf(phba, lpfc_ncmd); } /** * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete * @phba: Pointer to HBA context object. * * This function flushes all wqes in the nvme rings and frees all resources * in the txcmplq. This function does not issue abort wqes for the IO * commands in txcmplq, they will just be returned with * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI * slot has been permanently disabled. **/ void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) { struct lpfc_sli_ring *pring; u32 i, wait_cnt = 0; if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq) return; /* Cycle through all IO rings and make sure all outstanding * WQEs have been removed from the txcmplqs. */ for (i = 0; i < phba->cfg_hdw_queue; i++) { if (!phba->sli4_hba.hdwq[i].io_wq) continue; pring = phba->sli4_hba.hdwq[i].io_wq->pring; if (!pring) continue; /* Retrieve everything on the txcmplq */ while (!list_empty(&pring->txcmplq)) { msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); wait_cnt++; /* The sleep is 10mS. Every ten seconds, * dump a message. Something is wrong. */ if ((wait_cnt % 1000) == 0) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6178 NVME IO not empty, " "cnt %d\n", wait_cnt); } } } /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); } void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, uint32_t stat, uint32_t param) { #if (IS_ENABLED(CONFIG_NVME_FC)) struct lpfc_io_buf *lpfc_ncmd; struct nvmefc_fcp_req *nCmd; struct lpfc_wcqe_complete wcqe; struct lpfc_wcqe_complete *wcqep = &wcqe; lpfc_ncmd = pwqeIn->io_buf; if (!lpfc_ncmd) { lpfc_sli_release_iocbq(phba, pwqeIn); return; } /* For abort iocb just return, IO iocb will do a done call */ if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) == CMD_ABORT_XRI_CX) { lpfc_sli_release_iocbq(phba, pwqeIn); return; } spin_lock(&lpfc_ncmd->buf_lock); nCmd = lpfc_ncmd->nvmeCmd; if (!nCmd) { spin_unlock(&lpfc_ncmd->buf_lock); lpfc_release_nvme_buf(phba, lpfc_ncmd); return; } spin_unlock(&lpfc_ncmd->buf_lock); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6194 NVME Cancel xri %x\n", lpfc_ncmd->cur_iocbq.sli4_xritag); wcqep->word0 = 0; bf_set(lpfc_wcqe_c_status, wcqep, stat); wcqep->parameter = param; wcqep->total_data_placed = 0; wcqep->word3 = 0; /* xb is 0 */ /* Call release with XB=1 to queue the IO into the abort list. */ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) bf_set(lpfc_wcqe_c_xb, wcqep, 1); memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep)); (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn); #endif }
linux-master
drivers/scsi/lpfc/lpfc_nvme.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/interrupt.h> #include <linux/dma-direction.h> #include <scsi/scsi_transport_fc.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc.h" #include "lpfc_crtn.h" /* * lpfc_get_vmid_from_hashtable - search the UUID in the hash table * @vport: The virtual port for which this call is being executed. * @hash: calculated hash value * @buf: uuid associated with the VE * Return the VMID entry associated with the UUID * Make sure to acquire the appropriate lock before invoking this routine. */ struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport, u32 hash, u8 *buf) { struct lpfc_vmid *vmp; hash_for_each_possible(vport->hash_table, vmp, hnode, hash) { if (memcmp(&vmp->host_vmid[0], buf, 16) == 0) return vmp; } return NULL; } /* * lpfc_put_vmid_in_hashtable - put the VMID in the hash table * @vport: The virtual port for which this call is being executed. * @hash - calculated hash value * @vmp: Pointer to a VMID entry representing a VM sending I/O * * This routine will insert the newly acquired VMID entity in the hash table. * Make sure to acquire the appropriate lock before invoking this routine. */ static void lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash, struct lpfc_vmid *vmp) { hash_add(vport->hash_table, &vmp->hnode, hash); } /* * lpfc_vmid_hash_fn - create a hash value of the UUID * @vmid: uuid associated with the VE * @len: length of the VMID string * Returns the calculated hash value */ int lpfc_vmid_hash_fn(const char *vmid, int len) { int c; int hash = 0; if (len == 0) return 0; while (len--) { c = *vmid++; if (c >= 'A' && c <= 'Z') c += 'a' - 'A'; hash = (hash + (c << LPFC_VMID_HASH_SHIFT) + (c >> LPFC_VMID_HASH_SHIFT)) * 19; } return hash & LPFC_VMID_HASH_MASK; } /* * lpfc_vmid_update_entry - update the vmid entry in the hash table * @vport: The virtual port for which this call is being executed. * @iodir: io direction * @vmp: Pointer to a VMID entry representing a VM sending I/O * @tag: VMID tag */ static void lpfc_vmid_update_entry(struct lpfc_vport *vport, enum dma_data_direction iodir, struct lpfc_vmid *vmp, union lpfc_vmid_io_tag *tag) { u64 *lta; if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid; else if (vport->phba->cfg_vmid_app_header) tag->app_id = vmp->un.app_id; if (iodir == DMA_TO_DEVICE) vmp->io_wr_cnt++; else if (iodir == DMA_FROM_DEVICE) vmp->io_rd_cnt++; /* update the last access timestamp in the table */ lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id()); *lta = jiffies; } static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, struct lpfc_vmid *vmid) { u32 hash; struct lpfc_vmid *pvmid; if (vport->port_type == LPFC_PHYSICAL_PORT) { vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); } else { hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len); pvmid = lpfc_get_vmid_from_hashtable(vport->phba->pport, hash, vmid->host_vmid); if (pvmid) vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid; else vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); } } /* * lpfc_vmid_get_appid - get the VMID associated with the UUID * @vport: The virtual port for which this call is being executed. * @uuid: UUID associated with the VE * @cmd: address of scsi_cmd descriptor * @iodir: io direction * @tag: VMID tag * Returns status of the function */ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, enum dma_data_direction iodir, union lpfc_vmid_io_tag *tag) { struct lpfc_vmid *vmp = NULL; int hash, len, rc = -EPERM, i; /* check if QFPA is complete */ if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) && (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) { vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; return -EAGAIN; } /* search if the UUID has already been mapped to the VMID */ len = strlen(uuid); hash = lpfc_vmid_hash_fn(uuid, len); /* search for the VMID in the table */ read_lock(&vport->vmid_lock); vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); /* if found, check if its already registered */ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { read_unlock(&vport->vmid_lock); lpfc_vmid_update_entry(vport, iodir, vmp, tag); rc = 0; } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER || vmp->flag & LPFC_VMID_DE_REGISTER)) { /* else if register or dereg request has already been sent */ /* Hence VMID tag will not be added for this I/O */ read_unlock(&vport->vmid_lock); rc = -EBUSY; } else { /* The VMID was not found in the hashtable. At this point, */ /* drop the read lock first before proceeding further */ read_unlock(&vport->vmid_lock); /* start the process to obtain one as per the */ /* type of the VMID indicated */ write_lock(&vport->vmid_lock); vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); /* while the read lock was released, in case the entry was */ /* added by other context or is in process of being added */ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { lpfc_vmid_update_entry(vport, iodir, vmp, tag); write_unlock(&vport->vmid_lock); return 0; } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) { write_unlock(&vport->vmid_lock); return -EBUSY; } /* else search and allocate a free slot in the hash table */ if (vport->cur_vmid_cnt < vport->max_vmid) { for (i = 0; i < vport->max_vmid; i++) { vmp = vport->vmid + i; if (vmp->flag == LPFC_VMID_SLOT_FREE) break; } if (i == vport->max_vmid) vmp = NULL; } else { vmp = NULL; } if (!vmp) { write_unlock(&vport->vmid_lock); return -ENOMEM; } /* Add the vmid and register */ lpfc_put_vmid_in_hashtable(vport, hash, vmp); vmp->vmid_len = len; memcpy(vmp->host_vmid, uuid, vmp->vmid_len); vmp->io_rd_cnt = 0; vmp->io_wr_cnt = 0; vmp->flag = LPFC_VMID_SLOT_USED; vmp->delete_inactive = vport->vmid_inactivity_timeout ? 1 : 0; /* if type priority tag, get next available VMID */ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) lpfc_vmid_assign_cs_ctl(vport, vmp); /* allocate the per cpu variable for holding */ /* the last access time stamp only if VMID is enabled */ if (!vmp->last_io_time) vmp->last_io_time = alloc_percpu_gfp(u64, GFP_ATOMIC); if (!vmp->last_io_time) { hash_del(&vmp->hnode); vmp->flag = LPFC_VMID_SLOT_FREE; write_unlock(&vport->vmid_lock); return -EIO; } write_unlock(&vport->vmid_lock); /* complete transaction with switch */ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) rc = lpfc_vmid_uvem(vport, vmp, true); else if (vport->phba->cfg_vmid_app_header) rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp); if (!rc) { write_lock(&vport->vmid_lock); vport->cur_vmid_cnt++; vmp->flag |= LPFC_VMID_REQ_REGISTER; write_unlock(&vport->vmid_lock); } else { write_lock(&vport->vmid_lock); hash_del(&vmp->hnode); vmp->flag = LPFC_VMID_SLOT_FREE; free_percpu(vmp->last_io_time); write_unlock(&vport->vmid_lock); return -EIO; } /* finally, enable the idle timer once */ if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) { mod_timer(&vport->phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * LPFC_VMID_TIMER)); vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD; } } return rc; } /* * lpfc_reinit_vmid - reinitializes the vmid data structure * @vport: pointer to vport data structure * * This routine reinitializes the vmid post flogi completion * * Return codes * None */ void lpfc_reinit_vmid(struct lpfc_vport *vport) { u32 bucket, i, cpu; struct lpfc_vmid *cur; struct lpfc_vmid *vmp = NULL; struct hlist_node *tmp; write_lock(&vport->vmid_lock); vport->cur_vmid_cnt = 0; for (i = 0; i < vport->max_vmid; i++) { vmp = &vport->vmid[i]; vmp->flag = LPFC_VMID_SLOT_FREE; memset(vmp->host_vmid, 0, sizeof(vmp->host_vmid)); vmp->io_rd_cnt = 0; vmp->io_wr_cnt = 0; if (vmp->last_io_time) for_each_possible_cpu(cpu) *per_cpu_ptr(vmp->last_io_time, cpu) = 0; } /* for all elements in the hash table */ if (!hash_empty(vport->hash_table)) hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode) hash_del(&cur->hnode); write_unlock(&vport->vmid_lock); }
linux-master
drivers/scsi/lpfc/lpfc_vmid.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc.h" #include "lpfc_scsi.h" #include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_debugfs.h" /* Called to verify a rcv'ed ADISC was intended for us. */ static int lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_name *nn, struct lpfc_name *pn) { /* First, we MUST have a RPI registered */ if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) return 0; /* Compare the ADISC rsp WWNN / WWPN matches our internal node * table entry for that node. */ if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name))) return 0; if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name))) return 0; /* we match, return success */ return 1; } int lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct serv_parm *sp, uint32_t class, int flogi) { volatile struct serv_parm *hsp = &vport->fc_sparam; uint16_t hsp_value, ssp_value = 0; /* * The receive data field size and buffer-to-buffer receive data field * size entries are 16 bits but are represented as two 8-bit fields in * the driver data structure to account for rsvd bits and other control * bits. Reconstruct and compare the fields as a 16-bit values before * correcting the byte values. */ if (sp->cls1.classValid) { if (!flogi) { hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) | hsp->cls1.rcvDataSizeLsb); ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) | sp->cls1.rcvDataSizeLsb); if (!ssp_value) goto bad_service_param; if (ssp_value > hsp_value) { sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; } } } else if (class == CLASS1) goto bad_service_param; if (sp->cls2.classValid) { if (!flogi) { hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) | hsp->cls2.rcvDataSizeLsb); ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) | sp->cls2.rcvDataSizeLsb); if (!ssp_value) goto bad_service_param; if (ssp_value > hsp_value) { sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; } } } else if (class == CLASS2) goto bad_service_param; if (sp->cls3.classValid) { if (!flogi) { hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) | hsp->cls3.rcvDataSizeLsb); ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) | sp->cls3.rcvDataSizeLsb); if (!ssp_value) goto bad_service_param; if (ssp_value > hsp_value) { sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; } } } else if (class == CLASS3) goto bad_service_param; /* * Preserve the upper four bits of the MSB from the PLOGI response. * These bits contain the Buffer-to-Buffer State Change Number * from the target and need to be passed to the FW. */ hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb; ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb; if (ssp_value > hsp_value) { sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb; sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) | (hsp->cmn.bbRcvSizeMsb & 0x0F); } memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); return 1; bad_service_param: lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0207 Device %x " "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent " "invalid service parameters. Ignoring device.\n", ndlp->nlp_DID, sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1], sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3], sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5], sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]); return 0; } static void * lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_dmabuf *pcmd, *prsp; uint32_t *lp; void *ptr = NULL; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); pcmd = cmdiocb->cmd_dmabuf; /* For lpfc_els_abort, cmd_dmabuf could be zero'ed to delay * freeing associated memory till after ABTS completes. */ if (pcmd) { prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); if (prsp) { lp = (uint32_t *) prsp->virt; ptr = (void *)((uint8_t *)lp + sizeof(uint32_t)); } } else { /* Force ulp_status error since we are returning NULL ptr */ if (!(ulp_status)) { if (phba->sli_rev == LPFC_SLI_REV4) { bf_set(lpfc_wcqe_c_status, &rspiocb->wcqe_cmpl, IOSTAT_LOCAL_REJECT); rspiocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; } else { rspiocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; rspiocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; } } ptr = NULL; } return ptr; } /* * Free resources / clean up outstanding I/Os * associated with a LPFC_NODELIST entry. This * routine effectively results in a "software abort". */ void lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(abort_list); struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; pring = lpfc_phba_elsring(phba); /* In case of error recovery path, we might have a NULL pring here */ if (unlikely(!pring)) return; /* Abort outstanding I/O on NPort <nlp_DID> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, "2819 Abort outstanding I/O on NPort x%x " "Data: x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); /* Clean up all fabric IOs first.*/ lpfc_fabric_abort_nport(ndlp); /* * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list * of all ELS IOs that need an ABTS. The IOs need to stay on the * txcmplq so that the abort operation completes them successfully. */ spin_lock_irq(&phba->hbalock); if (phba->sli_rev == LPFC_SLI_REV4) spin_lock(&pring->ring_lock); list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { /* Add to abort_list on on NDLP match. */ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) list_add_tail(&iocb->dlist, &abort_list); } if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); /* Abort the targeted IOs and remove them from the abort list. */ list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) { spin_lock_irq(&phba->hbalock); list_del_init(&iocb->dlist); lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); spin_unlock_irq(&phba->hbalock); } /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); INIT_LIST_HEAD(&abort_list); /* Now process the txq */ spin_lock_irq(&phba->hbalock); if (phba->sli_rev == LPFC_SLI_REV4) spin_lock(&pring->ring_lock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { /* Check to see if iocb matches the nport we are looking for */ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { list_del_init(&iocb->list); list_add_tail(&iocb->list, &abort_list); } } if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &abort_list, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); } /* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes * @phba: pointer to lpfc hba data structure. * @login_mbox: pointer to REG_RPI mailbox object * * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes */ static void lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox) { struct lpfc_iocbq *save_iocb; struct lpfc_nodelist *ndlp; MAILBOX_t *mb = &login_mbox->u.mb; int rc; ndlp = login_mbox->ctx_ndlp; save_iocb = login_mbox->context3; if (mb->mbxStatus == MBX_SUCCESS) { /* Now that REG_RPI completed successfully, * we can now proceed with sending the PLOGI ACC. */ rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI, save_iocb, ndlp, NULL); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "4576 PLOGI ACC fails pt2pt discovery: " "DID %x Data: %x\n", ndlp->nlp_DID, rc); } } /* Now process the REG_RPI cmpl */ lpfc_mbx_cmpl_reg_login(phba, login_mbox); ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; kfree(save_iocb); } static int lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) { struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; uint64_t nlp_portwwn = 0; uint32_t *lp; union lpfc_wqe128 *wqe; IOCB_t *icmd; struct serv_parm *sp; uint32_t ed_tov; LPFC_MBOXQ_t *link_mbox; LPFC_MBOXQ_t *login_mbox; struct lpfc_iocbq *save_iocb; struct ls_rjt stat; uint32_t vid, flag; int rc; u32 remote_did; memset(&stat, 0, sizeof (struct ls_rjt)); pcmd = cmdiocb->cmd_dmabuf; lp = (uint32_t *) pcmd->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); if (wwn_to_u64(sp->portName.u.wwn) == 0) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0140 PLOGI Reject: invalid pname\n"); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } if (wwn_to_u64(sp->nodeName.u.wwn) == 0) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0141 PLOGI Reject: invalid nname\n"); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn); if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) { /* Reject this request because invalid parameters */ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } if (phba->sli_rev == LPFC_SLI_REV4) wqe = &cmdiocb->wqe; else icmd = &cmdiocb->iocb; /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0114 PLOGI chkparm OK Data: x%x x%x x%x " "x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi, vport->port_state, vport->fc_flag); if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) ndlp->nlp_fcp_info |= CLASS2; else ndlp->nlp_fcp_info |= CLASS3; ndlp->nlp_class_sup = 0; if (sp->cls1.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS1; if (sp->cls2.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS2; if (sp->cls3.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS3; if (sp->cls4.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; /* if already logged in, do implicit logout */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) break; fallthrough; case NLP_STE_REG_LOGIN_ISSUE: case NLP_STE_PRLI_ISSUE: case NLP_STE_UNMAPPED_NODE: case NLP_STE_MAPPED_NODE: /* For initiators, lpfc_plogi_confirm_nport skips fabric did. * For target mode, execute implicit logo. * Fabric nodes go into NPR. */ if (!(ndlp->nlp_type & NLP_FABRIC) && !(phba->nvmet_support)) { /* Clear ndlp info, since follow up PRLI may have * updated ndlp information */ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; ndlp->nlp_flag &= ~NLP_FIRSTBURST; lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); return 1; } if (nlp_portwwn != 0 && nlp_portwwn != wwn_to_u64(sp->portName.u.wwn)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0143 PLOGI recv'd from DID: x%x " "WWPN changed: old %llx new %llx\n", ndlp->nlp_DID, (unsigned long long)nlp_portwwn, (unsigned long long) wwn_to_u64(sp->portName.u.wwn)); /* Notify transport of connectivity loss to trigger cleanup. */ if (phba->nvmet_support && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) lpfc_nvmet_invalidate_host(phba, ndlp); ndlp->nlp_prev_state = ndlp->nlp_state; /* rport needs to be unregistered first */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); break; } ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; ndlp->nlp_flag &= ~NLP_FIRSTBURST; login_mbox = NULL; link_mbox = NULL; save_iocb = NULL; /* Check for Nport to NPort pt2pt protocol */ if ((vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_PT2PT_PLOGI)) { /* rcv'ed PLOGI decides what our NPortId will be */ if (phba->sli_rev == LPFC_SLI_REV4) { vport->fc_myDID = bf_get(els_rsp64_sid, &cmdiocb->wqe.xmit_els_rsp); } else { vport->fc_myDID = icmd->un.rcvels.parmRo; } /* If there is an outstanding FLOGI, abort it now. * The remote NPort is not going to ACC our FLOGI * if its already issuing a PLOGI for pt2pt mode. * This indicates our FLOGI was dropped; however, we * must have ACCed the remote NPorts FLOGI to us * to make it here. */ if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) lpfc_els_abort_flogi(phba); ed_tov = be32_to_cpu(sp->cmn.e_d_tov); if (sp->cmn.edtovResolution) { /* E_D_TOV ticks are in nanoseconds */ ed_tov = (phba->fc_edtov + 999999) / 1000000; } /* * For pt-to-pt, use the larger EDTOV * RATOV = 2 * EDTOV */ if (ed_tov > phba->fc_edtov) phba->fc_edtov = ed_tov; phba->fc_ratov = (2 * phba->fc_edtov) / 1000; memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4, * to account for updated TOV's / parameters */ if (phba->sli_rev == LPFC_SLI_REV4) lpfc_issue_reg_vfi(vport); else { link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!link_mbox) goto out; lpfc_config_link(phba, link_mbox); link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; link_mbox->vport = vport; /* The default completion handling for CONFIG_LINK * does not require the ndlp so no reference is needed. */ link_mbox->ctx_ndlp = ndlp; rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(link_mbox, phba->mbox_mem_pool); goto out; } } lpfc_can_disctmo(vport); } ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && sp->cmn.valid_vendor_ver_level) { vid = be32_to_cpu(sp->un.vv.vid); flag = be32_to_cpu(sp->un.vv.flags); if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) ndlp->nlp_flag |= NLP_SUPPRESS_RSP; } login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!login_mbox) goto out; save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); if (!save_iocb) goto out; /* Save info from cmd IOCB to be used in rsp after all mbox completes */ memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb, sizeof(struct lpfc_iocbq)); /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */ if (phba->sli_rev == LPFC_SLI_REV4) lpfc_unreg_rpi(vport, ndlp); /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will * always be deferring the ACC. */ if (phba->sli_rev == LPFC_SLI_REV4) remote_did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); else remote_did = icmd->un.rcvels.remoteID; rc = lpfc_reg_rpi(phba, vport->vpi, remote_did, (uint8_t *)sp, login_mbox, ndlp->nlp_rpi); if (rc) goto out; login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; login_mbox->vport = vport; /* * If there is an outstanding PLOGI issued, abort it before * sending ACC rsp for received PLOGI. If pending plogi * is not canceled here, the plogi will be rejected by * remote port and will be retried. On a configuration with * single discovery thread, this will cause a huge delay in * discovery. Also this will cause multiple state machines * running in parallel for this node. * This only applies to a fabric environment. */ if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) && (vport->fc_flag & FC_FABRIC)) { /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp); } if ((vport->port_type == LPFC_NPIV_PORT && vport->cfg_restrict_login)) { /* no deferred ACC */ kfree(save_iocb); /* This is an NPIV SLI4 instance that does not need to register * a default RPI. */ if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_mbox_rsrc_cleanup(phba, login_mbox, MBOX_THD_UNLOCKED); login_mbox = NULL; } else { /* In order to preserve RPIs, we want to cleanup * the default RPI the firmware created to rcv * this ELS request. The only way to do this is * to register, then unregister the RPI. */ spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); spin_unlock_irq(&ndlp->lock); } stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, login_mbox); if (rc && login_mbox) lpfc_mbox_rsrc_cleanup(phba, login_mbox, MBOX_THD_UNLOCKED); return 1; } /* So the order here should be: * SLI3 pt2pt * Issue CONFIG_LINK mbox * CONFIG_LINK cmpl * SLI4 pt2pt * Issue REG_VFI mbox * REG_VFI cmpl * SLI4 * Issue UNREG RPI mbx * UNREG RPI cmpl * Issue REG_RPI mbox * REG RPI cmpl * Issue PLOGI ACC * PLOGI ACC cmpl */ login_mbox->mbox_cmpl = lpfc_defer_plogi_acc; login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp); if (!login_mbox->ctx_ndlp) goto out; login_mbox->context3 = save_iocb; /* For PLOGI ACC */ spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); spin_unlock_irq(&ndlp->lock); /* Start the ball rolling by issuing REG_LOGIN here */ rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_nlp_put(ndlp); goto out; } lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); return 1; out: kfree(save_iocb); if (login_mbox) mempool_free(login_mbox, phba->mbox_mem_pool); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } /** * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object * * This routine is invoked to issue a completion to a rcv'ed * ADISC or PDISC after the paused RPI has been resumed. **/ static void lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport; struct lpfc_iocbq *elsiocb; struct lpfc_nodelist *ndlp; uint32_t cmd; elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf; ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp; vport = mboxq->vport; cmd = elsiocb->drvrTimeout; if (cmd == ELS_CMD_ADISC) { lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp); } else { lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb, ndlp, NULL); } /* This nlp_put pairs with lpfc_sli4_resume_rpi */ lpfc_nlp_put(ndlp); kfree(elsiocb); mempool_free(mboxq, phba->mbox_mem_pool); } static int lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *elsiocb; struct lpfc_dmabuf *pcmd; struct serv_parm *sp; struct lpfc_name *pnn, *ppn; struct ls_rjt stat; ADISC *ap; uint32_t *lp; uint32_t cmd; pcmd = cmdiocb->cmd_dmabuf; lp = (uint32_t *) pcmd->virt; cmd = *lp++; if (cmd == ELS_CMD_ADISC) { ap = (ADISC *) lp; pnn = (struct lpfc_name *) & ap->nodeName; ppn = (struct lpfc_name *) & ap->portName; } else { sp = (struct serv_parm *) lp; pnn = (struct lpfc_name *) & sp->nodeName; ppn = (struct lpfc_name *) & sp->portName; } if (get_job_ulpstatus(phba, cmdiocb) == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) { /* * As soon as we send ACC, the remote NPort can * start sending us data. Thus, for SLI4 we must * resume the RPI before the ACC goes out. */ if (vport->phba->sli_rev == LPFC_SLI_REV4) { elsiocb = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); if (elsiocb) { /* Save info from cmd IOCB used in rsp */ memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb, sizeof(struct lpfc_iocbq)); /* Save the ELS cmd */ elsiocb->drvrTimeout = cmd; lpfc_sli4_resume_rpi(ndlp, lpfc_mbx_cmpl_resume_rpi, elsiocb); goto out; } } if (cmd == ELS_CMD_ADISC) { lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp); } else { lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); } out: /* If we are authenticated, move to the proper state. * It is possible an ADISC arrived and the remote nport * is already in MAPPED or UNMAPPED state. Catch this * condition and don't set the nlp_state again because * it causes an unnecessary transport unregister/register. * * Nodes marked for ADISC will move MAPPED or UNMAPPED state * after issuing ADISC */ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && !(ndlp->nlp_flag & NLP_NPR_ADISC)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } return 1; } /* Reject this request because invalid parameters */ stat.un.b.lsRjtRsvd0 = 0; stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; stat.un.b.vendorUnique = 0; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(&ndlp->lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return 0; } static int lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb, uint32_t els_cmd) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_vport **vports; int i, active_vlink_present = 0 ; /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */ /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary * PLOGIs during LOGO storms from a device. */ spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_LOGO_ACC; spin_unlock_irq(&ndlp->lock); if (els_cmd == ELS_CMD_PRLO) lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); /* This clause allows the initiator to ACC the LOGO back to the * Fabric Domain Controller. It does deliberately skip all other * steps because some fabrics send RDP requests after logging out * from the initiator. */ if (ndlp->nlp_type & NLP_FABRIC && ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) return 0; /* Notify transport of connectivity loss to trigger cleanup. */ if (phba->nvmet_support && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) lpfc_nvmet_invalidate_host(phba, ndlp); if (ndlp->nlp_DID == Fabric_DID) { if (vport->port_state <= LPFC_FDISC || vport->fc_flag & FC_PT2PT) goto out; lpfc_linkdown_port(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_LOGO_RCVD; spin_unlock_irq(shost->host_lock); vports = lpfc_create_vport_work_array(phba); if (vports) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if ((!(vports[i]->fc_flag & FC_VPORT_LOGO_RCVD)) && (vports[i]->port_state > LPFC_FDISC)) { active_vlink_present = 1; break; } } lpfc_destroy_vport_work_array(phba, vports); } /* * Don't re-instantiate if vport is marked for deletion. * If we are here first then vport_delete is going to wait * for discovery to complete. */ if (!(vport->load_flag & FC_UNLOADING) && active_vlink_present) { /* * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(&ndlp->lock); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { spin_lock_irq(shost->host_lock); phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG; spin_unlock_irq(shost->host_lock); lpfc_retry_pport_discovery(phba); } } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, "3203 LOGO recover nport x%06x state x%x " "ntype x%x fc_flag x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_type, vport->fc_flag); /* Special cases for rports that recover post LOGO. */ if ((!(ndlp->nlp_type == NLP_FABRIC) && (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) || vport->fc_flag & FC_PT2PT)) || (ndlp->nlp_state >= NLP_STE_ADISC_ISSUE || ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) { mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(&ndlp->lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, "3204 Start nlpdelay on DID x%06x " "nflag x%x lastels x%x ref cnt %u", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_last_elscmd, kref_read(&ndlp->kref)); } } out: /* Unregister from backend, could have been skipped due to ADISC */ lpfc_nlp_unreg_node(vport, ndlp); ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(&ndlp->lock); /* The driver has to wait until the ACC completes before it continues * processing the LOGO. The action will resume in * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an * unreg_login, the driver waits so the ACC does not get aborted. */ return 0; } static uint32_t lpfc_rcv_prli_support_check(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) { struct ls_rjt stat; uint32_t *payload; uint32_t cmd; payload = cmdiocb->cmd_dmabuf->virt; cmd = *payload; if (vport->phba->nvmet_support) { /* Must be a NVME PRLI */ if (cmd == ELS_CMD_PRLI) goto out; } else { /* Initiator mode. */ if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI)) goto out; } return 1; out: lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC, "6115 Rcv PRLI (%x) check failed: ndlp rpi %d " "state x%x flags x%x\n", cmd, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag); memset(&stat, 0, sizeof(struct ls_rjt)); stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED; stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } static void lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) { struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; uint32_t *lp; PRLI *npr; struct fc_rport *rport = ndlp->rport; u32 roles; pcmd = cmdiocb->cmd_dmabuf; lp = (uint32_t *)pcmd->virt; npr = (PRLI *)((uint8_t *)lp + sizeof(uint32_t)); if ((npr->prliType == PRLI_FCP_TYPE) || (npr->prliType == PRLI_NVME_TYPE)) { if (npr->initiatorFunc) { if (npr->prliType == PRLI_FCP_TYPE) ndlp->nlp_type |= NLP_FCP_INITIATOR; if (npr->prliType == PRLI_NVME_TYPE) ndlp->nlp_type |= NLP_NVME_INITIATOR; } if (npr->targetFunc) { if (npr->prliType == PRLI_FCP_TYPE) ndlp->nlp_type |= NLP_FCP_TARGET; if (npr->prliType == PRLI_NVME_TYPE) ndlp->nlp_type |= NLP_NVME_TARGET; if (npr->writeXferRdyDis) ndlp->nlp_flag |= NLP_FIRSTBURST; } if (npr->Retry && ndlp->nlp_type & (NLP_FCP_INITIATOR | NLP_FCP_TARGET)) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; if (npr->Retry && phba->nsler && ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET)) ndlp->nlp_nvme_info |= NLP_NVME_NSLER; /* If this driver is in nvme target mode, set the ndlp's fc4 * type to NVME provided the PRLI response claims NVME FC4 * type. Target mode does not issue gft_id so doesn't get * the fc4 type set until now. */ if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) { ndlp->nlp_fc4_type |= NLP_FC4_NVME; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } /* Fabric Controllers send FCP PRLI as an initiator but should * not get recognized as FCP type and registered with transport. */ if (npr->prliType == PRLI_FCP_TYPE && !(ndlp->nlp_type & NLP_FABRIC)) ndlp->nlp_fc4_type |= NLP_FC4_FCP; } if (rport) { /* We need to update the rport role values */ roles = FC_RPORT_ROLE_UNKNOWN; if (ndlp->nlp_type & NLP_FCP_INITIATOR) roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (ndlp->nlp_type & NLP_FCP_TARGET) roles |= FC_RPORT_ROLE_FCP_TARGET; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport rolechg: role:x%x did:x%x flg:x%x", roles, ndlp->nlp_DID, ndlp->nlp_flag); if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) fc_remote_port_rolechg(rport, roles); } } static uint32_t lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(&ndlp->lock); return 0; } if (!(vport->fc_flag & FC_PT2PT)) { /* Check config parameter use-adisc or FCP-2 */ if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) || ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && (ndlp->nlp_type & NLP_FCP_TARGET)))) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NPR_ADISC; spin_unlock_irq(&ndlp->lock); return 1; } } spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(&ndlp->lock); lpfc_unreg_rpi(vport, ndlp); return 0; } /** * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd. * @phba : Pointer to lpfc_hba structure. * @vport: Pointer to lpfc_vport structure. * @ndlp: Pointer to lpfc_nodelist structure. * @rpi : rpi to be release. * * This function will send a unreg_login mailbox command to the firmware * to release a rpi. **/ static void lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint16_t rpi) { LPFC_MBOXQ_t *pmb; int rc; /* If there is already an UNREG in progress for this ndlp, * no need to queue up another one. */ if (ndlp->nlp_flag & NLP_UNREG_INP) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1435 release_rpi SKIP UNREG x%x on " "NPort x%x deferred x%x flg x%x " "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp->nlp_flag, ndlp); return; } pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2796 mailbox memory allocation failed \n"); else { lpfc_unreg_login(phba, vport->vpi, rpi, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; pmb->vport = vport; pmb->ctx_ndlp = lpfc_nlp_get(ndlp); if (!pmb->ctx_ndlp) { mempool_free(pmb, phba->mbox_mem_pool); return; } if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && (!(vport->fc_flag & FC_OFFLINE_MODE))) ndlp->nlp_flag |= NLP_UNREG_INP; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1437 release_rpi UNREG x%x " "on NPort x%x flg x%x\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_nlp_put(ndlp); mempool_free(pmb, phba->mbox_mem_pool); } } } static uint32_t lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba; LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; uint16_t rpi; phba = vport->phba; /* Release the RPI if reglogin completing */ if (!(phba->pport->load_flag & FC_UNLOADING) && (evt == NLP_EVT_CMPL_REG_LOGIN) && (!pmb->u.mb.mbxStatus)) { rpi = pmb->u.mb.un.varWords[0]; lpfc_release_rpi(phba, vport, ndlp, rpi); } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0271 Illegal State Transition: node x%x " "event x%x, state x%x Data: x%x x%x\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { /* This transition is only legal if we previously * rcv'ed a PLOGI. Since we don't want 2 discovery threads * working on the same NPortID, do nothing for this thread * to stop it. */ if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0272 Illegal State Transition: node x%x " "event x%x, state x%x Data: x%x x%x\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); } return ndlp->nlp_state; } /* Start of Discovery State Machine routines */ static uint32_t lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { return ndlp->nlp_state; } return NLP_STE_FREED_NODE; } static uint32_t lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { lpfc_issue_els_logo(vport, ndlp, 0); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_LOGO_ACC; spin_unlock_irq(&ndlp->lock); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { return NLP_STE_FREED_NODE; } static uint32_t lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { return NLP_STE_FREED_NODE; } static uint32_t lpfc_device_recov_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb = arg; struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; uint32_t *lp = (uint32_t *) pcmd->virt; struct serv_parm *sp = (struct serv_parm *) (lp + 1); struct ls_rjt stat; int port_cmp; memset(&stat, 0, sizeof (struct ls_rjt)); /* For a PLOGI, we only accept if our portname is less * than the remote portname. */ phba->fc_stat.elsLogiCol++; port_cmp = memcmp(&vport->fc_portname, &sp->portName, sizeof(struct lpfc_name)); if (port_cmp >= 0) { /* Reject this request because the remote node will accept ours */ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); } else { if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && (ndlp->nlp_flag & NLP_NPR_2B_DISC) && (vport->num_disc_nodes)) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); /* Check if there are more PLOGIs to be sent */ lpfc_more_plogi(vport); if (vport->num_disc_nodes == 0) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); lpfc_end_rscn(vport); } } } /* If our portname was less */ return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; struct ls_rjt stat; memset(&stat, 0, sizeof (struct ls_rjt)); stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */ if (vport->phba->sli_rev == LPFC_SLI_REV3) ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag; /* software abort outstanding PLOGI */ lpfc_els_abort(vport->phba, ndlp); lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp); if (evt == NLP_EVT_RCV_LOGO) { lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); } else { lpfc_issue_els_logo(vport, ndlp, 0); } /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(&ndlp->lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb, *rspiocb; struct lpfc_dmabuf *pcmd, *prsp; uint32_t *lp; uint32_t vid, flag; struct serv_parm *sp; uint32_t ed_tov; LPFC_MBOXQ_t *mbox; int rc; u32 ulp_status; u32 did; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->rsp_iocb; ulp_status = get_job_ulpstatus(phba, rspiocb); if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { /* Recovery from PLOGI collision logic */ return ndlp->nlp_state; } if (ulp_status) goto out; pcmd = cmdiocb->cmd_dmabuf; prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); if (!prsp) goto out; lp = (uint32_t *) prsp->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); /* Some switches have FDMI servers returning 0 for WWN */ if ((ndlp->nlp_DID != FDMI_DID) && (wwn_to_u64(sp->portName.u.wwn) == 0 || wwn_to_u64(sp->nodeName.u.wwn) == 0)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0142 PLOGI RSP: Invalid WWN.\n"); goto out; } if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0)) goto out; /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) ndlp->nlp_fcp_info |= CLASS2; else ndlp->nlp_fcp_info |= CLASS3; ndlp->nlp_class_sup = 0; if (sp->cls1.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS1; if (sp->cls2.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS2; if (sp->cls3.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS3; if (sp->cls4.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_PLOGI)) { ed_tov = be32_to_cpu(sp->cmn.e_d_tov); if (sp->cmn.edtovResolution) { /* E_D_TOV ticks are in nanoseconds */ ed_tov = (phba->fc_edtov + 999999) / 1000000; } ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && sp->cmn.valid_vendor_ver_level) { vid = be32_to_cpu(sp->un.vv.vid); flag = be32_to_cpu(sp->un.vv.flags); if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) ndlp->nlp_flag |= NLP_SUPPRESS_RSP; } /* * Use the larger EDTOV * RATOV = 2 * EDTOV for pt-to-pt */ if (ed_tov > phba->fc_edtov) phba->fc_edtov = ed_tov; phba->fc_ratov = (2 * phba->fc_edtov) / 1000; memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); /* Issue config_link / reg_vfi to account for updated TOV's */ if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_issue_reg_vfi(vport); } else { mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0133 PLOGI: no memory " "for config_link " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; } lpfc_config_link(phba, mbox); mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); goto out; } } } lpfc_unreg_rpi(vport, ndlp); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0018 PLOGI: no memory for reg_login " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; } did = get_job_els_rsp64_did(phba, cmdiocb); if (lpfc_reg_rpi(phba, vport->vpi, did, (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) { switch (ndlp->nlp_DID) { case NameServer_DID: mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; /* Fabric Controller Node needs these parameters. */ memcpy(&ndlp->fc_sparam, sp, sizeof(struct serv_parm)); break; case FDMI_DID: mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; break; default: ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; } mbox->ctx_ndlp = lpfc_nlp_get(ndlp); if (!mbox->ctx_ndlp) goto out; mbox->vport = vport; if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) != MBX_NOT_FINISHED) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); return ndlp->nlp_state; } if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; /* decrement node reference count to the failed mbox * command */ lpfc_nlp_put(ndlp); lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0134 PLOGI: cannot issue reg_login " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } else { mempool_free(mbox, phba->mbox_mem_pool); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0135 PLOGI: cannot format reg_login " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } out: if (ndlp->nlp_DID == NameServer_DID) { lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0261 Cannot Register NameServer login\n"); } /* ** In case the node reference counter does not go to zero, ensure that ** the stale state for the node is not processed. */ ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return NLP_STE_FREED_NODE; } static uint32_t lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { return ndlp->nlp_state; } static uint32_t lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba; LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; MAILBOX_t *mb = &pmb->u.mb; uint16_t rpi; phba = vport->phba; /* Release the RPI */ if (!(phba->pport->load_flag & FC_UNLOADING) && !mb->mbxStatus) { rpi = pmb->u.mb.un.varWords[0]; lpfc_release_rpi(phba, vport, ndlp, rpi); } return ndlp->nlp_state; } static uint32_t lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; } else { /* software abort outstanding PLOGI */ lpfc_els_abort(vport->phba, ndlp); lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } static uint32_t lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; /* Don't do anything that will mess up processing of the * previous RSCN. */ if (vport->fc_flag & FC_RSCN_DEFERRED) return ndlp->nlp_state; /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp); ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; /* software abort outstanding ADISC */ lpfc_els_abort(phba, ndlp); cmdiocb = (struct lpfc_iocbq *) arg; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); if (vport->num_disc_nodes) lpfc_more_adisc(vport); } return ndlp->nlp_state; } ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; /* software abort outstanding ADISC */ lpfc_els_abort(phba, ndlp); lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; /* Treat like rcv logo */ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb, *rspiocb; ADISC *ap; int rc; u32 ulp_status; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->rsp_iocb; ulp_status = get_job_ulpstatus(phba, rspiocb); ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); if ((ulp_status) || (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(&ndlp->lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_unreg_rpi(vport, ndlp); return ndlp->nlp_state; } if (phba->sli_rev == LPFC_SLI_REV4) { rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL); if (rc) { /* Stay in state and retry. */ ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; return ndlp->nlp_state; } } if (ndlp->nlp_type & NLP_FCP_TARGET) ndlp->nlp_fc4_type |= NLP_FC4_FCP; if (ndlp->nlp_type & NLP_NVME_TARGET) ndlp->nlp_fc4_type |= NLP_FC4_NVME; if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } else { ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } return ndlp->nlp_state; } static uint32_t lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; } else { /* software abort outstanding ADISC */ lpfc_els_abort(vport->phba, ndlp); lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } static uint32_t lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; /* Don't do anything that will mess up processing of the * previous RSCN. */ if (vport->fc_flag & FC_RSCN_DEFERRED) return ndlp->nlp_state; /* software abort outstanding ADISC */ lpfc_els_abort(phba, ndlp); ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(&ndlp->lock); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_plogi(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; struct ls_rjt stat; if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) { return ndlp->nlp_state; } if (vport->phba->nvmet_support) { /* NVME Target mode. Handle and respond to the PRLI and * transition to UNMAPPED provided the RPI has completed * registration. */ if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { lpfc_rcv_prli(vport, ndlp, cmdiocb); lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); } else { /* RPI registration has not completed. Reject the PRLI * to prevent an illegal state transition when the * rpi registration does complete. */ memset(&stat, 0, sizeof(struct ls_rjt)); stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } } else { /* Initiator mode. */ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); } return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; LPFC_MBOXQ_t *mb; LPFC_MBOXQ_t *nextmb; cmdiocb = (struct lpfc_iocbq *) arg; /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; lpfc_nlp_put(ndlp); mb->ctx_ndlp = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; lpfc_nlp_put(ndlp); list_del(&mb->list); phba->sli.mboxq_cnt--; lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED); } } spin_unlock_irq(&phba->hbalock); lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; MAILBOX_t *mb = &pmb->u.mb; uint32_t did = mb->un.varWords[1]; if (mb->mbxStatus) { /* RegLogin failed */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0246 RegLogin failed Data: x%x x%x x%x x%x " "x%x\n", did, mb->mbxStatus, vport->port_state, mb->un.varRegLogin.vpi, mb->un.varRegLogin.rpi); /* * If RegLogin failed due to lack of HBA resources do not * retry discovery. */ if (mb->mbxStatus == MBXERR_RPI_FULL) { ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(&ndlp->lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_issue_els_logo(vport, ndlp, 0); return ndlp->nlp_state; } /* SLI4 ports have preallocated logical rpis. */ if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; /* Only if we are not a fabric nport do we issue PRLI */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "3066 RegLogin Complete on x%x x%x x%x\n", did, ndlp->nlp_type, ndlp->nlp_fc4_type); if (!(ndlp->nlp_type & NLP_FABRIC) && (phba->nvmet_support == 0)) { /* The driver supports FCP and NVME concurrently. If the * ndlp's nlp_fc4_type is still zero, the driver doesn't * know what PRLI to send yet. Figure that out now and * call PRLI depending on the outcome. */ if (vport->fc_flag & FC_PT2PT) { /* If we are pt2pt, there is no Fabric to determine * the FC4 type of the remote nport. So if NVME * is configured try it. */ ndlp->nlp_fc4_type |= NLP_FC4_FCP; if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) && (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { ndlp->nlp_fc4_type |= NLP_FC4_NVME; /* We need to update the localport also */ lpfc_nvme_update_localport(vport); } } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { ndlp->nlp_fc4_type |= NLP_FC4_FCP; } else if (ndlp->nlp_fc4_type == 0) { /* If we are only configured for FCP, the driver * should just issue PRLI for FCP. Otherwise issue * GFT_ID to determine if remote port supports NVME. */ if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) { lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0, ndlp->nlp_DID); return ndlp->nlp_state; } ndlp->nlp_fc4_type = NLP_FC4_FCP; } ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); if (lpfc_issue_els_prli(vport, ndlp, 0)) { lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } } else { if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support) phba->targetport->port_id = vport->fc_myDID; /* Only Fabric ports should transition. NVME target * must complete PRLI. */ if (ndlp->nlp_type & NLP_FABRIC) { ndlp->nlp_fc4_type &= ~NLP_FC4_FCP; ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } } return ndlp->nlp_state; } static uint32_t lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; } else { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } static uint32_t lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { /* Don't do anything that will mess up processing of the * previous RSCN. */ if (vport->fc_flag & FC_RSCN_DEFERRED) return ndlp->nlp_state; ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(&ndlp->lock); /* If we are a target we won't immediately transition into PRLI, * so if REG_LOGIN already completed we don't need to ignore it. */ if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) || !vport->phba->nvmet_support) ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(&ndlp->lock); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_plogi(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) return ndlp->nlp_state; lpfc_rcv_prli(vport, ndlp, cmdiocb); lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Software abort outstanding PRLI before sending acc */ lpfc_els_abort(vport->phba, ndlp); lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } /* This routine is envoked when we rcv a PRLO request from a nport * we are logged into. We should send back a PRLO rsp setting the * appropriate bits. * NEXT STATE = PRLI_ISSUE */ static uint32_t lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; struct lpfc_hba *phba = vport->phba; PRLI *npr; struct lpfc_nvme_prli *nvpr; void *temp_ptr; u32 ulp_status; bool acc_imode_sps = false; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->rsp_iocb; ulp_status = get_job_ulpstatus(phba, rspiocb); /* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp * format is different so NULL the two PRLI types so that the * driver correctly gets the correct context. */ npr = NULL; nvpr = NULL; temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); if (cmdiocb->cmd_flag & LPFC_PRLI_FCP_REQ) npr = (PRLI *) temp_ptr; else if (cmdiocb->cmd_flag & LPFC_PRLI_NVME_REQ) nvpr = (struct lpfc_nvme_prli *) temp_ptr; if (ulp_status) { if ((vport->port_type == LPFC_NPIV_PORT) && vport->cfg_restrict_login) { goto out; } /* Adjust the nlp_type accordingly if the PRLI failed */ if (npr) ndlp->nlp_fc4_type &= ~NLP_FC4_FCP; if (nvpr) ndlp->nlp_fc4_type &= ~NLP_FC4_NVME; /* We can't set the DSM state till BOTH PRLIs complete */ goto out_err; } if (npr && npr->prliType == PRLI_FCP_TYPE) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, "6028 FCP NPR PRLI Cmpl Init %d Target %d " "EIP %d AccCode x%x\n", npr->initiatorFunc, npr->targetFunc, npr->estabImagePair, npr->acceptRspCode); if (npr->acceptRspCode == PRLI_INV_SRV_PARM) { /* Strict initiators don't establish an image pair. */ if (npr->initiatorFunc && !npr->targetFunc && !npr->estabImagePair) acc_imode_sps = true; } if (npr->acceptRspCode == PRLI_REQ_EXECUTED || acc_imode_sps) { if (npr->initiatorFunc) ndlp->nlp_type |= NLP_FCP_INITIATOR; if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; if (npr->writeXferRdyDis) ndlp->nlp_flag |= NLP_FIRSTBURST; } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; } } else if (nvpr && (bf_get_be32(prli_acc_rsp_code, nvpr) == PRLI_REQ_EXECUTED) && (bf_get_be32(prli_type_code, nvpr) == PRLI_NVME_TYPE)) { /* Complete setting up the remote ndlp personality. */ if (bf_get_be32(prli_init, nvpr)) ndlp->nlp_type |= NLP_NVME_INITIATOR; if (phba->nsler && bf_get_be32(prli_nsler, nvpr) && bf_get_be32(prli_conf, nvpr)) ndlp->nlp_nvme_info |= NLP_NVME_NSLER; else ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; /* Target driver cannot solicit NVME FB. */ if (bf_get_be32(prli_tgt, nvpr)) { /* Complete the nvme target roles. The transport * needs to know if the rport is capable of * discovery in addition to its role. */ ndlp->nlp_type |= NLP_NVME_TARGET; if (bf_get_be32(prli_disc, nvpr)) ndlp->nlp_type |= NLP_NVME_DISCOVERY; /* * If prli_fba is set, the Target supports FirstBurst. * If prli_fb_sz is 0, the FirstBurst size is unlimited, * otherwise it defines the actual size supported by * the NVME Target. */ if ((bf_get_be32(prli_fba, nvpr) == 1) && (phba->cfg_nvme_enable_fb) && (!phba->nvmet_support)) { /* Both sides support FB. The target's first * burst size is a 512 byte encoded value. */ ndlp->nlp_flag |= NLP_FIRSTBURST; ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz, nvpr); /* Expressed in units of 512 bytes */ if (ndlp->nvme_fb_size) ndlp->nvme_fb_size <<= LPFC_NVME_FB_SHIFT; else ndlp->nvme_fb_size = LPFC_NVME_MAX_FB; } } lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6029 NVME PRLI Cmpl w1 x%08x " "w4 x%08x w5 x%08x flag x%x, " "fcp_info x%x nlp_type x%x\n", be32_to_cpu(nvpr->word1), be32_to_cpu(nvpr->word4), be32_to_cpu(nvpr->word5), ndlp->nlp_flag, ndlp->nlp_fcp_info, ndlp->nlp_type); } if (!(ndlp->nlp_type & NLP_FCP_TARGET) && (vport->port_type == LPFC_NPIV_PORT) && vport->cfg_restrict_login) { out: spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_TARGET_REMOVE; spin_unlock_irq(&ndlp->lock); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } out_err: /* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs * are complete. */ if (ndlp->fc4_prli_sent == 0) { ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); else if (ndlp->nlp_type & (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } else lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3067 PRLI's still outstanding " "on x%06x - count %d, Pend Node Mode " "transition...\n", ndlp->nlp_DID, ndlp->fc4_prli_sent); return ndlp->nlp_state; } /*! lpfc_device_rm_prli_issue * * \pre * \post * \param phba * \param ndlp * \param arg * \param evt * \return uint32_t * * \b Description: * This routine is envoked when we a request to remove a nport we are in the * process of PRLIing. We should software abort outstanding prli, unreg * login, send a logout. We will change node state to UNUSED_NODE, put it * on plogi list so it can be freed when LOGO completes. * */ static uint32_t lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; } else { /* software abort outstanding PLOGI */ lpfc_els_abort(vport->phba, ndlp); lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } /*! lpfc_device_recov_prli_issue * * \pre * \post * \param phba * \param ndlp * \param arg * \param evt * \return uint32_t * * \b Description: * The routine is envoked when the state of a device is unknown, like * during a link down. We should remove the nodelist entry from the * unmapped list, issue a UNREG_LOGIN, do a software abort of the * outstanding PRLI command, then free the node entry. */ static uint32_t lpfc_device_recov_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; /* Don't do anything that will mess up processing of the * previous RSCN. */ if (vport->fc_flag & FC_RSCN_DEFERRED) return ndlp->nlp_state; /* software abort outstanding PRLI */ lpfc_els_abort(phba, ndlp); ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(&ndlp->lock); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; struct ls_rjt stat; memset(&stat, 0, sizeof(struct ls_rjt)); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; struct ls_rjt stat; memset(&stat, 0, sizeof(struct ls_rjt)); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_LOGO_ACC; spin_unlock_irq(&ndlp->lock); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; struct ls_rjt stat; memset(&stat, 0, sizeof(struct ls_rjt)); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; struct ls_rjt stat; memset(&stat, 0, sizeof(struct ls_rjt)); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(&ndlp->lock); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { /* * DevLoss has timed out and is calling for Device Remove. * In this case, abort the LOGO and cleanup the ndlp */ lpfc_unreg_rpi(vport, ndlp); /* software abort outstanding PLOGI */ lpfc_els_abort(vport->phba, ndlp); lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } static uint32_t lpfc_device_recov_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { /* * Device Recovery events have no meaning for a node with a LOGO * outstanding. The LOGO has to complete first and handle the * node from that point. */ return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_plogi(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) return ndlp->nlp_state; lpfc_rcv_prli(vport, ndlp, cmdiocb); lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } static uint32_t lpfc_device_rm_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } static uint32_t lpfc_device_recov_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_plogi(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) return ndlp->nlp_state; lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* flush the target */ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); /* Treat like rcv logo */ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); return ndlp->nlp_state; } static uint32_t lpfc_device_recov_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { lpfc_disc_set_adisc(vport, ndlp); ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; } static uint32_t lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Ignore PLOGI if we have an outstanding LOGO */ if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) return ndlp->nlp_state; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { lpfc_cancel_retry_delay_tmo(vport, ndlp); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); spin_unlock_irq(&ndlp->lock); } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { /* send PLOGI immediately, move to PLOGI issue state */ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } } return ndlp->nlp_state; } static uint32_t lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; struct ls_rjt stat; memset(&stat, 0, sizeof (struct ls_rjt)); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { /* * ADISC nodes will be handled in regular discovery path after * receiving response from NS. * * For other nodes, Send PLOGI to trigger an implicit LOGO. */ if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } } return ndlp->nlp_state; } static uint32_t lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); /* * Do not start discovery if discovery is about to start * or discovery in progress for this node. Starting discovery * here will affect the counting of discovery threads. */ if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { /* * ADISC nodes will be handled in regular discovery path after * receiving response from NS. * * For other nodes, Send PLOGI to trigger an implicit LOGO. */ if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } } return ndlp->nlp_state; } static uint32_t lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_LOGO_ACC; spin_unlock_irq(&ndlp->lock); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_DELAY_TMO; ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(&ndlp->lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; } else { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(&ndlp->lock); } return ndlp->nlp_state; } static uint32_t lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb, *rspiocb; u32 ulp_status; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->rsp_iocb; ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status) return NLP_STE_FREED_NODE; return ndlp->nlp_state; } static uint32_t lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb, *rspiocb; u32 ulp_status; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->rsp_iocb; ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } return ndlp->nlp_state; } static uint32_t lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); /* For the fabric port just clear the fc flags. */ if (ndlp->nlp_DID == Fabric_DID) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); spin_unlock_irq(shost->host_lock); } lpfc_unreg_rpi(vport, ndlp); return ndlp->nlp_state; } static uint32_t lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb, *rspiocb; u32 ulp_status; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->rsp_iocb; ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } return ndlp->nlp_state; } static uint32_t lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; MAILBOX_t *mb = &pmb->u.mb; if (!mb->mbxStatus) { /* SLI4 ports have preallocated logical rpis. */ if (vport->phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; if (ndlp->nlp_flag & NLP_LOGO_ACC) { lpfc_unreg_rpi(vport, ndlp); } } else { if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } return ndlp->nlp_state; } static uint32_t lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; } lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } static uint32_t lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { /* Don't do anything that will mess up processing of the * previous RSCN. */ if (vport->fc_flag & FC_RSCN_DEFERRED) return ndlp->nlp_state; lpfc_cancel_retry_delay_tmo(vport, ndlp); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; } /* This next section defines the NPort Discovery State Machine */ /* There are 4 different double linked lists nodelist entries can reside on. * The plogi list and adisc list are used when Link Up discovery or RSCN * processing is needed. Each list holds the nodes that we will send PLOGI * or ADISC on. These lists will keep track of what nodes will be effected * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up). * The unmapped_list will contain all nodes that we have successfully logged * into at the Fibre Channel level. The mapped_list will contain all nodes * that are mapped FCP targets. */ /* * The bind list is a list of undiscovered (potentially non-existent) nodes * that we have saved binding information on. This information is used when * nodes transition from the unmapped to the mapped list. */ /* For UNUSED_NODE state, the node has just been allocated . * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list * and put on the unmapped list. For ADISC processing, the node is taken off * the ADISC list and placed on either the mapped or unmapped list (depending * on its previous state). Once on the unmapped list, a PRLI is issued and the * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is * changed to UNMAPPED_NODE. If the completion indicates a mapped * node, the node is taken off the unmapped list. The binding list is checked * for a valid binding, or a binding is automatically assigned. If binding * assignment is unsuccessful, the node is left on the unmapped list. If * binding assignment is successful, the associated binding list entry (if * any) is removed, and the node is placed on the mapped list. */ /* * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers * expire, all effected nodes will receive a DEVICE_RM event. */ /* * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap * check, additional nodes may be added or removed (via DEVICE_RM) to / from * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated, * we will first process the ADISC list. 32 entries are processed initially and * ADISC is initited for each one. Completions / Events for each node are * funnelled thru the state machine. As each node finishes ADISC processing, it * starts ADISC for any nodes waiting for ADISC processing. If no nodes are * waiting, and the ADISC list count is identically 0, then we are done. For * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI * list. 32 entries are processed initially and PLOGI is initited for each one. * Completions / Events for each node are funnelled thru the state machine. As * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is * indentically 0, then we are done. We have now completed discovery / RSCN * handling. Upon completion, ALL nodes should be on either the mapped or * unmapped lists. */ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = { /* Action routine Event Current State */ lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */ lpfc_rcv_els_unused_node, /* RCV_PRLI */ lpfc_rcv_logo_unused_node, /* RCV_LOGO */ lpfc_rcv_els_unused_node, /* RCV_ADISC */ lpfc_rcv_els_unused_node, /* RCV_PDISC */ lpfc_rcv_els_unused_node, /* RCV_PRLO */ lpfc_disc_illegal, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_disc_illegal, /* CMPL_REG_LOGIN */ lpfc_device_rm_unused_node, /* DEVICE_RM */ lpfc_device_recov_unused_node, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */ lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ lpfc_rcv_els_plogi_issue, /* RCV_PRLO */ lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */ lpfc_device_rm_plogi_issue, /* DEVICE_RM */ lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */ lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */ lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */ lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */ lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */ lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */ lpfc_disc_illegal, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_disc_illegal, /* CMPL_LOGO */ lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */ lpfc_disc_illegal, /* CMPL_REG_LOGIN */ lpfc_device_rm_adisc_issue, /* DEVICE_RM */ lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */ lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */ lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */ lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */ lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */ lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_disc_illegal, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */ lpfc_device_rm_reglogin_issue, /* DEVICE_RM */ lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */ lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */ lpfc_rcv_prli_prli_issue, /* RCV_PRLI */ lpfc_rcv_logo_prli_issue, /* RCV_LOGO */ lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */ lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */ lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */ lpfc_disc_illegal, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_disc_illegal, /* CMPL_REG_LOGIN */ lpfc_device_rm_prli_issue, /* DEVICE_RM */ lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */ lpfc_rcv_prli_logo_issue, /* RCV_PRLI */ lpfc_rcv_logo_logo_issue, /* RCV_LOGO */ lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */ lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */ lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_disc_illegal, /* CMPL_REG_LOGIN */ lpfc_device_rm_logo_issue, /* DEVICE_RM */ lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */ lpfc_rcv_prli_unmap_node, /* RCV_PRLI */ lpfc_rcv_logo_unmap_node, /* RCV_LOGO */ lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */ lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */ lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */ lpfc_disc_illegal, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_disc_illegal, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_disc_illegal, /* CMPL_REG_LOGIN */ lpfc_device_rm_unmap_node, /* DEVICE_RM */ lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */ lpfc_rcv_prli_mapped_node, /* RCV_PRLI */ lpfc_rcv_logo_mapped_node, /* RCV_LOGO */ lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */ lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */ lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */ lpfc_disc_illegal, /* CMPL_PLOGI */ lpfc_disc_illegal, /* CMPL_PRLI */ lpfc_disc_illegal, /* CMPL_LOGO */ lpfc_disc_illegal, /* CMPL_ADISC */ lpfc_disc_illegal, /* CMPL_REG_LOGIN */ lpfc_disc_illegal, /* DEVICE_RM */ lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */ lpfc_rcv_prli_npr_node, /* RCV_PRLI */ lpfc_rcv_logo_npr_node, /* RCV_LOGO */ lpfc_rcv_padisc_npr_node, /* RCV_ADISC */ lpfc_rcv_padisc_npr_node, /* RCV_PDISC */ lpfc_rcv_prlo_npr_node, /* RCV_PRLO */ lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */ lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */ lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */ lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */ lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */ lpfc_device_rm_npr_node, /* DEVICE_RM */ lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */ }; int lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { uint32_t cur_state, rc; uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t); uint32_t got_ndlp = 0; uint32_t data1; if (lpfc_nlp_get(ndlp)) got_ndlp = 1; cur_state = ndlp->nlp_state; data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) | ((uint32_t)ndlp->nlp_type)); /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0211 DSM in event x%x on NPort x%x in " "state %d rpi x%x Data: x%x x%x\n", evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi, ndlp->nlp_flag, data1); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, "DSM in: evt:%d ste:%d did:x%x", evt, cur_state, ndlp->nlp_DID); func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; rc = (func) (vport, ndlp, arg, evt); /* DSM out state <rc> on NPort <nlp_DID> */ if (got_ndlp) { data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) | ((uint32_t)ndlp->nlp_type)); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0212 DSM out state %d on NPort x%x " "rpi x%x Data: x%x x%x\n", rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, data1); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, "DSM out: ste:%d did:x%x flg:x%x", rc, ndlp->nlp_DID, ndlp->nlp_flag); /* Decrement the ndlp reference count held for this function */ lpfc_nlp_put(ndlp); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0213 DSM out state %d on NPort free\n", rc); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, "DSM out: ste:%d did:x%x flg:x%x", rc, 0, 0); } return rc; }
linux-master
drivers/scsi/lpfc/lpfc_nportdisc.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ /* See Fibre Channel protocol T11 FC-LS for details */ #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include <uapi/scsi/fc/fc_fs.h> #include <uapi/scsi/fc/fc_els.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_debugfs.h" static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry); static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb); static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb); static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); static int lpfc_max_els_tries = 3; static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); /** * lpfc_els_chk_latt - Check host link attention event for a vport * @vport: pointer to a host virtual N_Port data structure. * * This routine checks whether there is an outstanding host link * attention event during the discovery process with the @vport. It is done * by reading the HBA's Host Attention (HA) register. If there is any host * link attention events during this @vport's discovery process, the @vport * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall * be issued if the link state is not already in host link cleared state, * and a return code shall indicate whether the host link attention event * had happened. * * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport * state in LPFC_VPORT_READY, the request for checking host link attention * event will be ignored and a return code shall indicate no host link * attention event had happened. * * Return codes * 0 - no host link attention event happened * 1 - host link attention event happened **/ int lpfc_els_chk_latt(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; uint32_t ha_copy; if (vport->port_state >= LPFC_VPORT_READY || phba->link_state == LPFC_LINK_DOWN || phba->sli_rev > LPFC_SLI_REV3) return 0; /* Read the HBA Host Attention Register */ if (lpfc_readl(phba->HAregaddr, &ha_copy)) return 1; if (!(ha_copy & HA_LATT)) return 0; /* Pending Link Event during Discovery */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0237 Pending Link Event during " "Discovery: State x%x\n", phba->pport->port_state); /* CLEAR_LA should re-enable link attention events and * we should then immediately take a LATT event. The * LATT processing should call lpfc_linkdown() which * will cleanup any left over in-progress discovery * events. */ spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_ABORT_DISCOVERY; spin_unlock_irq(shost->host_lock); if (phba->link_state != LPFC_CLEAR_LA) lpfc_issue_clear_la(phba, vport); return 1; } /** * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure * @vport: pointer to a host virtual N_Port data structure. * @expect_rsp: flag indicating whether response is expected. * @cmd_size: size of the ELS command. * @retry: number of retries to the command when it fails. * @ndlp: pointer to a node-list data structure. * @did: destination identifier. * @elscmd: the ELS command code. * * This routine is used for allocating a lpfc-IOCB data structure from * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters * passed into the routine for discovery state machine to issue an Extended * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation * and preparation routine that is used by all the discovery state machine * routines and the ELS command-specific fields will be later set up by * the individual discovery machine routines after calling this routine * allocating and preparing a generic IOCB data structure. It fills in the * Buffer Descriptor Entries (BDEs), allocates buffers for both command * payload and response payload (if expected). The reference count on the * ndlp is incremented by 1 and the reference to the ndlp is put into * ndlp of the IOCB data structure for this IOCB to hold the ndlp * reference for the command's callback function to access later. * * Return code * Pointer to the newly allocated/prepared els iocb data structure * NULL - when els iocb data structure allocation/preparation failed **/ struct lpfc_iocbq * lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, u16 cmd_size, u8 retry, struct lpfc_nodelist *ndlp, u32 did, u32 elscmd) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *elsiocb; struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; struct ulp_bde64_le *bpl; u32 timeout = 0; if (!lpfc_is_link_up(phba)) return NULL; /* Allocate buffer for command iocb */ elsiocb = lpfc_sli_get_iocbq(phba); if (!elsiocb) return NULL; /* * If this command is for fabric controller and HBA running * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. */ if ((did == Fabric_DID) && (phba->hba_flag & HBA_FIP_SUPPORT) && ((elscmd == ELS_CMD_FLOGI) || (elscmd == ELS_CMD_FDISC) || (elscmd == ELS_CMD_LOGO))) switch (elscmd) { case ELS_CMD_FLOGI: elsiocb->cmd_flag |= ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) & LPFC_FIP_ELS_ID_MASK); break; case ELS_CMD_FDISC: elsiocb->cmd_flag |= ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) & LPFC_FIP_ELS_ID_MASK); break; case ELS_CMD_LOGO: elsiocb->cmd_flag |= ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) & LPFC_FIP_ELS_ID_MASK); break; } else elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; /* fill in BDEs for command */ /* Allocate buffer for command payload */ pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); if (pcmd) pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); if (!pcmd || !pcmd->virt) goto els_iocb_free_pcmb_exit; INIT_LIST_HEAD(&pcmd->list); /* Allocate buffer for response payload */ if (expect_rsp) { prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); if (prsp) prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &prsp->phys); if (!prsp || !prsp->virt) goto els_iocb_free_prsp_exit; INIT_LIST_HEAD(&prsp->list); } else { prsp = NULL; } /* Allocate buffer for Buffer ptr list */ pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); if (pbuflist) pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pbuflist->phys); if (!pbuflist || !pbuflist->virt) goto els_iocb_free_pbuf_exit; INIT_LIST_HEAD(&pbuflist->list); if (expect_rsp) { switch (elscmd) { case ELS_CMD_FLOGI: timeout = FF_DEF_RATOV * 2; break; case ELS_CMD_LOGO: timeout = phba->fc_ratov; break; default: timeout = phba->fc_ratov * 2; } /* Fill SGE for the num bde count */ elsiocb->num_bdes = 2; } if (phba->sli_rev == LPFC_SLI_REV4) bmp = pcmd; else bmp = pbuflist; lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, elscmd, timeout, expect_rsp); bpl = (struct ulp_bde64_le *)pbuflist->virt; bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); bpl->type_size = cpu_to_le32(cmd_size); bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); if (expect_rsp) { bpl++; bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); bpl->type_size = cpu_to_le32(FCELSSIZE); bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); } elsiocb->cmd_dmabuf = pcmd; elsiocb->bpl_dmabuf = pbuflist; elsiocb->retry = retry; elsiocb->vport = vport; elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; if (prsp) list_add(&prsp->list, &pcmd->list); if (expect_rsp) { /* Xmit ELS command <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0116 Xmit ELS command x%x to remote " "NPORT x%x I/O tag: x%x, port state:x%x " "rpi x%x fc_flag:x%x\n", elscmd, did, elsiocb->iotag, vport->port_state, ndlp->nlp_rpi, vport->fc_flag); } else { /* Xmit ELS response <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0117 Xmit ELS response x%x to remote " "NPORT x%x I/O tag: x%x, size: x%x " "port_state x%x rpi x%x fc_flag x%x\n", elscmd, ndlp->nlp_DID, elsiocb->iotag, cmd_size, vport->port_state, ndlp->nlp_rpi, vport->fc_flag); } return elsiocb; els_iocb_free_pbuf_exit: if (expect_rsp) lpfc_mbuf_free(phba, prsp->virt, prsp->phys); kfree(pbuflist); els_iocb_free_prsp_exit: lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); kfree(prsp); els_iocb_free_pcmb_exit: kfree(pcmd); lpfc_sli_release_iocbq(phba, elsiocb); return NULL; } /** * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport * @vport: pointer to a host virtual N_Port data structure. * * This routine issues a fabric registration login for a @vport. An * active ndlp node with Fabric_DID must already exist for this @vport. * The routine invokes two mailbox commands to carry out fabric registration * login through the HBA firmware: the first mailbox command requests the * HBA to perform link configuration for the @vport; and the second mailbox * command requests the HBA to perform the actual fabric registration login * with the @vport. * * Return code * 0 - successfully issued fabric registration login for @vport * -ENXIO -- failed to issue fabric registration login for @vport **/ int lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; struct lpfc_nodelist *ndlp; struct serv_parm *sp; int rc; int err = 0; sp = &phba->fc_fabparam; ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) { err = 1; goto fail; } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { err = 2; goto fail; } vport->port_state = LPFC_FABRIC_CFG_LINK; lpfc_config_link(phba, mbox); mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { err = 3; goto fail_free_mbox; } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { err = 4; goto fail; } rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, ndlp->nlp_rpi); if (rc) { err = 5; goto fail_free_mbox; } mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; mbox->vport = vport; /* increment the reference count on ndlp to hold reference * for the callback routine. */ mbox->ctx_ndlp = lpfc_nlp_get(ndlp); if (!mbox->ctx_ndlp) { err = 6; goto fail_free_mbox; } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { err = 7; goto fail_issue_reg_login; } return 0; fail_issue_reg_login: /* decrement the reference count on ndlp just incremented * for the failed mbox command. */ lpfc_nlp_put(ndlp); fail_free_mbox: lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); fail: lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0249 Cannot issue Register Fabric login: Err %d\n", err); return -ENXIO; } /** * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login * @vport: pointer to a host virtual N_Port data structure. * * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for * the @vport. This mailbox command is necessary for SLI4 port only. * * Return code * 0 - successfully issued REG_VFI for @vport * A failure code otherwise. **/ int lpfc_issue_reg_vfi(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mboxq = NULL; struct lpfc_nodelist *ndlp; struct lpfc_dmabuf *dmabuf = NULL; int rc = 0; /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ if ((phba->sli_rev == LPFC_SLI_REV4) && !(phba->link_flag & LS_LOOPBACK_MODE) && !(vport->fc_flag & FC_PT2PT)) { ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) { rc = -ENODEV; goto fail; } } mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { rc = -ENOMEM; goto fail; } /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { rc = lpfc_mbox_rsrc_prep(phba, mboxq); if (rc) { rc = -ENOMEM; goto fail_mbox; } dmabuf = mboxq->ctx_buf; memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(struct serv_parm)); } vport->port_state = LPFC_FABRIC_CFG_LINK; if (dmabuf) { lpfc_reg_vfi(mboxq, vport, dmabuf->phys); /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */ mboxq->ctx_buf = dmabuf; } else { lpfc_reg_vfi(mboxq, vport, 0); } mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; mboxq->vport = vport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { rc = -ENXIO; goto fail_mbox; } return 0; fail_mbox: lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); fail: lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0289 Issue Register VFI failed: Err %d\n", rc); return rc; } /** * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login * @vport: pointer to a host virtual N_Port data structure. * * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for * the @vport. This mailbox command is necessary for SLI4 port only. * * Return code * 0 - successfully issued REG_VFI for @vport * A failure code otherwise. **/ int lpfc_issue_unreg_vfi(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct Scsi_Host *shost; LPFC_MBOXQ_t *mboxq; int rc; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2556 UNREG_VFI mbox allocation failed" "HBA state x%x\n", phba->pport->port_state); return -ENOMEM; } lpfc_unreg_vfi(mboxq, vport); mboxq->vport = vport; mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2557 UNREG_VFI issue mbox failed rc x%x " "HBA state x%x\n", rc, phba->pport->port_state); mempool_free(mboxq, phba->mbox_mem_pool); return -EIO; } shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VFI_REGISTERED; spin_unlock_irq(shost->host_lock); return 0; } /** * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. * @vport: pointer to a host virtual N_Port data structure. * @sp: pointer to service parameter data structure. * * This routine is called from FLOGI/FDISC completion handler functions. * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric * node nodename is changed in the completion service parameter else return * 0. This function also set flag in the vport data structure to delay * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric * node nodename is changed in the completion service parameter. * * Return code * 0 - FCID and Fabric Nodename and Fabric portname is not changed. * 1 - FCID or Fabric Nodename or Fabric portname is changed. * **/ static uint8_t lpfc_check_clean_addr_bit(struct lpfc_vport *vport, struct serv_parm *sp) { struct lpfc_hba *phba = vport->phba; uint8_t fabric_param_changed = 0; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if ((vport->fc_prevDID != vport->fc_myDID) || memcmp(&vport->fabric_portname, &sp->portName, sizeof(struct lpfc_name)) || memcmp(&vport->fabric_nodename, &sp->nodeName, sizeof(struct lpfc_name)) || (vport->vport_flag & FAWWPN_PARAM_CHG)) { fabric_param_changed = 1; vport->vport_flag &= ~FAWWPN_PARAM_CHG; } /* * Word 1 Bit 31 in common service parameter is overloaded. * Word 1 Bit 31 in FLOGI request is multiple NPort request * Word 1 Bit 31 in FLOGI response is clean address bit * * If fabric parameter is changed and clean address bit is * cleared delay nport discovery if * - vport->fc_prevDID != 0 (not initial discovery) OR * - lpfc_delay_discovery module parameter is set. */ if (fabric_param_changed && !sp->cmn.clean_address_bit && (vport->fc_prevDID || phba->cfg_delay_discovery)) { spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_DISC_DELAYED; spin_unlock_irq(shost->host_lock); } return fabric_param_changed; } /** * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port * @vport: pointer to a host virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. * @sp: pointer to service parameter data structure. * @ulp_word4: command response value * * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback * function to handle the completion of a Fabric Login (FLOGI) into a fabric * port in a fabric topology. It properly sets up the parameters to the @ndlp * from the IOCB response. It also check the newly assigned N_Port ID to the * @vport against the previously assigned N_Port ID. If it is different from * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine * is invoked on all the remaining nodes with the @vport to unregister the * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() * is invoked to register login to the fabric. * * Return code * 0 - Success (currently, always return 0) **/ static int lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct serv_parm *sp, uint32_t ulp_word4) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *np; struct lpfc_nodelist *next_np; uint8_t fabric_param_changed; spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_FABRIC; spin_unlock_irq(shost->host_lock); phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; phba->fc_edtovResol = sp->cmn.edtovResolution; phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_PUBLIC_LOOP; spin_unlock_irq(shost->host_lock); } vport->fc_myDID = ulp_word4 & Mask_DID; memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); ndlp->nlp_class_sup = 0; if (sp->cls1.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS1; if (sp->cls2.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS2; if (sp->cls3.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS3; if (sp->cls4.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); if (fabric_param_changed) { /* Reset FDMI attribute masks based on config parameter */ if (phba->cfg_enable_SmartSAN || (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { /* Setup appropriate attribute masks */ vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; if (phba->cfg_enable_SmartSAN) vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; else vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; } else { vport->fdmi_hba_mask = 0; vport->fdmi_port_mask = 0; } } memcpy(&vport->fabric_portname, &sp->portName, sizeof(struct lpfc_name)); memcpy(&vport->fabric_nodename, &sp->nodeName, sizeof(struct lpfc_name)); memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { if (sp->cmn.response_multiple_NPort) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_VPORT, "1816 FLOGI NPIV supported, " "response data 0x%x\n", sp->cmn.response_multiple_NPort); spin_lock_irq(&phba->hbalock); phba->link_flag |= LS_NPIV_FAB_SUPPORTED; spin_unlock_irq(&phba->hbalock); } else { /* Because we asked f/w for NPIV it still expects us to call reg_vnpid at least for the physical host */ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_VPORT, "1817 Fabric does not support NPIV " "- configuring single port mode.\n"); spin_lock_irq(&phba->hbalock); phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; spin_unlock_irq(&phba->hbalock); } } /* * For FC we need to do some special processing because of the SLI * Port's default settings of the Common Service Parameters. */ if ((phba->sli_rev == LPFC_SLI_REV4) && (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ if (fabric_param_changed) lpfc_unregister_fcf_prep(phba); /* This should just update the VFI CSPs*/ if (vport->fc_flag & FC_VFI_REGISTERED) lpfc_issue_reg_vfi(vport); } if (fabric_param_changed && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { /* If our NportID changed, we need to ensure all * remaining NPORTs get unreg_login'ed. */ list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { if ((np->nlp_state != NLP_STE_NPR_NODE) || !(np->nlp_flag & NLP_NPR_ADISC)) continue; spin_lock_irq(&np->lock); np->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(&np->lock); lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_sli4_unreg_all_rpis(vport); lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); } /* * For SLI3 and SLI4, the VPI needs to be reregistered in * response to this fabric parameter change event. */ spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); } else if ((phba->sli_rev == LPFC_SLI_REV4) && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { /* * Driver needs to re-reg VPI in order for f/w * to update the MAC address. */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_register_new_vport(phba, vport, ndlp); return 0; } if (phba->sli_rev < LPFC_SLI_REV4) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) lpfc_register_new_vport(phba, vport, ndlp); else lpfc_issue_fabric_reglogin(vport); } else { ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && (vport->vpi_state & LPFC_VPI_REGISTERED)) { lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } else if (vport->fc_flag & FC_VFI_REGISTERED) lpfc_issue_init_vpi(vport); else { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3135 Need register VFI: (x%x/%x)\n", vport->fc_prevDID, vport->fc_myDID); lpfc_issue_reg_vfi(vport); } } return 0; } /** * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port * @vport: pointer to a host virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. * @sp: pointer to service parameter data structure. * * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback * function to handle the completion of a Fabric Login (FLOGI) into an N_Port * in a point-to-point topology. First, the @vport's N_Port Name is compared * with the received N_Port Name: if the @vport's N_Port Name is greater than * the received N_Port Name lexicographically, this node shall assign local * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, * this node shall just wait for the remote node to issue PLOGI and assign * N_Port IDs. * * Return code * 0 - Success * -ENXIO - Fail **/ static int lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct serv_parm *sp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); vport->fc_flag |= FC_PT2PT; spin_unlock_irq(shost->host_lock); /* If we are pt2pt with another NPort, force NPIV off! */ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { lpfc_unregister_fcf_prep(phba); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VFI_REGISTERED; spin_unlock_irq(shost->host_lock); phba->fc_topology_changed = 0; } rc = memcmp(&vport->fc_portname, &sp->portName, sizeof(vport->fc_portname)); if (rc >= 0) { /* This side will initiate the PLOGI */ spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_PT2PT_PLOGI; spin_unlock_irq(shost->host_lock); /* * N_Port ID cannot be 0, set our Id to LocalID * the other side will be RemoteID. */ /* not equal */ if (rc) vport->fc_myDID = PT2PT_LocalID; /* If not registered with a transport, decrement ndlp reference * count indicating that ndlp can be safely released when other * references are removed. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) lpfc_nlp_put(ndlp); ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); if (!ndlp) { /* * Cannot find existing Fabric ndlp, so allocate a * new one */ ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); if (!ndlp) goto fail; } memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); /* Set state will put ndlp onto node list if not already done */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) goto fail; lpfc_config_link(phba, mbox); mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); goto fail; } } else { /* This side will wait for the PLOGI. If not registered with * a transport, decrement node reference count indicating that * ndlp can be released when other references are removed. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) lpfc_nlp_put(ndlp); /* Start discovery - this should just do CLEAR_LA */ lpfc_disc_start(vport); } return 0; fail: return -ENXIO; } /** * lpfc_cmpl_els_flogi - Completion callback function for flogi * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is the top-level completion callback function for issuing * a Fabric Login (FLOGI) command. If the response IOCB reported error, * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If * retry has been made (either immediately or delayed with lpfc_els_retry() * returning 1), the command IOCB will be released and function returned. * If the retry attempt has been given up (possibly reach the maximum * number of retries), one additional decrement of ndlp reference shall be * invoked before going out after releasing the command IOCB. This will * actually release the remote node (Note, lpfc_els_free_iocb() will also * invoke one decrement of ndlp reference count). If no error reported in * the IOCB status, the command Port ID field is used to determine whether * this is a point-to-point topology or a fabric topology: if the Port ID * field is assigned, it is a fabric topology; otherwise, it is a * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the * specific topology completion conditions. **/ static void lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp = cmdiocb->ndlp; IOCB_t *irsp; struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; struct serv_parm *sp; uint16_t fcf_index; int rc; u32 ulp_status, ulp_word4, tmo; bool flogi_in_retry = false; /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { /* One additional decrement on node reference count to * trigger the release of the node */ if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) lpfc_nlp_put(ndlp); goto out; } ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); if (phba->sli_rev == LPFC_SLI_REV4) { tmo = get_wqe_tmo(cmdiocb); } else { irsp = &rspiocb->iocb; tmo = irsp->ulpTimeout; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "FLOGI cmpl: status:x%x/x%x state:x%x", ulp_status, ulp_word4, vport->port_state); if (ulp_status) { /* * In case of FIP mode, perform roundrobin FCF failover * due to new FCF discovery */ if ((phba->hba_flag & HBA_FIP_SUPPORT) && (phba->fcf.fcf_flag & FCF_DISCOVERY)) { if (phba->link_state < LPFC_LINK_UP) goto stop_rr_fcf_flogi; if ((phba->fcoe_cvl_eventtag_attn == phba->fcoe_cvl_eventtag) && (ulp_status == IOSTAT_LOCAL_REJECT) && ((ulp_word4 & IOERR_PARAM_MASK) == IOERR_SLI_ABORTED)) goto stop_rr_fcf_flogi; else phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, "2611 FLOGI failed on FCF (x%x), " "status:x%x/x%x, tmo:x%x, perform " "roundrobin FCF failover\n", phba->fcf.current_rec.fcf_indx, ulp_status, ulp_word4, tmo); lpfc_sli4_set_fcf_flogi_fail(phba, phba->fcf.current_rec.fcf_indx); fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); if (rc) goto out; } stop_rr_fcf_flogi: /* FLOGI failure */ if (!(ulp_status == IOSTAT_LOCAL_REJECT && ((ulp_word4 & IOERR_PARAM_MASK) == IOERR_LOOP_OPEN_FAILURE))) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2858 FLOGI failure Status:x%x/x%x TMO" ":x%x Data x%x x%x\n", ulp_status, ulp_word4, tmo, phba->hba_flag, phba->fcf.fcf_flag); /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* Address a timing race with dev_loss. If dev_loss * is active on this FPort node, put the initial ref * count back to stop premature node release actions. */ lpfc_check_nlp_post_devloss(vport, ndlp); flogi_in_retry = true; goto out; } /* The FLOGI will not be retried. If the FPort node is not * registered with the SCSI transport, remove the initial * reference to trigger node release. */ if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) lpfc_nlp_put(ndlp); lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, "0150 FLOGI failure Status:x%x/x%x " "xri x%x TMO:x%x refcnt %d\n", ulp_status, ulp_word4, cmdiocb->sli4_xritag, tmo, kref_read(&ndlp->kref)); /* If this is not a loop open failure, bail out */ if (!(ulp_status == IOSTAT_LOCAL_REJECT && ((ulp_word4 & IOERR_PARAM_MASK) == IOERR_LOOP_OPEN_FAILURE))) { /* FLOGI failure */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0100 FLOGI failure Status:x%x/x%x " "TMO:x%x\n", ulp_status, ulp_word4, tmo); goto flogifail; } /* FLOGI failed, so there is no fabric */ spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | FC_PT2PT_NO_NVME); spin_unlock_irq(shost->host_lock); /* If private loop, then allow max outstanding els to be * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no * alpa map would take too long otherwise. */ if (phba->alpa_map[0] == 0) vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; if ((phba->sli_rev == LPFC_SLI_REV4) && (!(vport->fc_flag & FC_VFI_REGISTERED) || (vport->fc_prevDID != vport->fc_myDID) || phba->fc_topology_changed)) { if (vport->fc_flag & FC_VFI_REGISTERED) { if (phba->fc_topology_changed) { lpfc_unregister_fcf_prep(phba); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VFI_REGISTERED; spin_unlock_irq(shost->host_lock); phba->fc_topology_changed = 0; } else { lpfc_sli4_unreg_all_rpis(vport); } } /* Do not register VFI if the driver aborted FLOGI */ if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) lpfc_issue_reg_vfi(vport); goto out; } goto flogifail; } spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VPORT_CVL_RCVD; vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; spin_unlock_irq(shost->host_lock); /* * The FLOGI succeeded. Sync the data for the CPU before * accessing it. */ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); if (!prsp) goto out; sp = prsp->virt + sizeof(uint32_t); /* FLOGI completes successfully */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0101 FLOGI completes successfully, I/O tag:x%x " "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", cmdiocb->iotag, cmdiocb->sli4_xritag, ulp_word4, sp->cmn.e_d_tov, sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, vport->port_state, vport->fc_flag, sp->cmn.priority_tagging, kref_read(&ndlp->kref)); if (sp->cmn.priority_tagging) vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | LPFC_VMID_TYPE_PRIO); /* reinitialize the VMID datastructure before returning */ if (lpfc_is_vmid_enabled(phba)) lpfc_reinit_vmid(vport); /* * Address a timing race with dev_loss. If dev_loss is active on * this FPort node, put the initial ref count back to stop premature * node release actions. */ lpfc_check_nlp_post_devloss(vport, ndlp); if (vport->port_state == LPFC_FLOGI) { /* * If Common Service Parameters indicate Nport * we are point to point, if Fport we are Fabric. */ if (sp->cmn.fPort) rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, ulp_word4); else if (!(phba->hba_flag & HBA_FCOE_MODE)) rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); else { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2831 FLOGI response with cleared Fabric " "bit fcf_index 0x%x " "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " "Fabric Name " "%02x%02x%02x%02x%02x%02x%02x%02x\n", phba->fcf.current_rec.fcf_indx, phba->fcf.current_rec.switch_name[0], phba->fcf.current_rec.switch_name[1], phba->fcf.current_rec.switch_name[2], phba->fcf.current_rec.switch_name[3], phba->fcf.current_rec.switch_name[4], phba->fcf.current_rec.switch_name[5], phba->fcf.current_rec.switch_name[6], phba->fcf.current_rec.switch_name[7], phba->fcf.current_rec.fabric_name[0], phba->fcf.current_rec.fabric_name[1], phba->fcf.current_rec.fabric_name[2], phba->fcf.current_rec.fabric_name[3], phba->fcf.current_rec.fabric_name[4], phba->fcf.current_rec.fabric_name[5], phba->fcf.current_rec.fabric_name[6], phba->fcf.current_rec.fabric_name[7]); lpfc_nlp_put(ndlp); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); spin_unlock_irq(&phba->hbalock); phba->fcf.fcf_redisc_attempted = 0; /* reset */ goto out; } if (!rc) { /* Mark the FCF discovery process done */ if (phba->hba_flag & HBA_FIP_SUPPORT) lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS, "2769 FLOGI to FCF (x%x) " "completed successfully\n", phba->fcf.current_rec.fcf_indx); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); spin_unlock_irq(&phba->hbalock); phba->fcf.fcf_redisc_attempted = 0; /* reset */ goto out; } } else if (vport->port_state > LPFC_FLOGI && vport->fc_flag & FC_PT2PT) { /* * In a p2p topology, it is possible that discovery has * already progressed, and this completion can be ignored. * Recheck the indicated topology. */ if (!sp->cmn.fPort) goto out; } flogifail: spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; spin_unlock_irq(&phba->hbalock); if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { /* FLOGI failed, so just use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || (((ulp_word4 & IOERR_PARAM_MASK) != IOERR_SLI_ABORTED) && ((ulp_word4 & IOERR_PARAM_MASK) != IOERR_SLI_DOWN))) && (phba->link_state != LPFC_CLEAR_LA)) { /* If FLOGI failed enable link interrupt. */ lpfc_issue_clear_la(phba, vport); } out: if (!flogi_in_retry) phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } /** * lpfc_cmpl_els_link_down - Completion callback function for ELS command * aborted during a link down * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * */ static void lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { uint32_t *pcmd; uint32_t cmd; u32 ulp_status, ulp_word4; pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt; cmd = *pcmd; ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "6445 ELS completes after LINK_DOWN: " " Status %x/%x cmd x%x flg x%x\n", ulp_status, ulp_word4, cmd, cmdiocb->cmd_flag); if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; atomic_dec(&phba->fabric_iocb_count); } lpfc_els_free_iocb(phba, cmdiocb); } /** * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport * @vport: pointer to a host virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. * @retry: number of retries to the command IOCB. * * This routine issues a Fabric Login (FLOGI) Request ELS command * for a @vport. The initiator service parameters are put into the payload * of the FLOGI Request IOCB and the top-level callback function pointer * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback * function field. The lpfc_issue_fabric_iocb routine is invoked to send * out FLOGI ELS command with one outstanding fabric IOCB at a time. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the FLOGI ELS command. * * Return code * 0 - successfully issued flogi iocb for @vport * 1 - failed to issue flogi iocb for @vport **/ static int lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry) { struct lpfc_hba *phba = vport->phba; struct serv_parm *sp; union lpfc_wqe128 *wqe = NULL; IOCB_t *icmd = NULL; struct lpfc_iocbq *elsiocb; struct lpfc_iocbq defer_flogi_acc; u8 *pcmd, ct; uint16_t cmdsize; uint32_t tmo, did; int rc; cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_FLOGI); if (!elsiocb) return 1; wqe = &elsiocb->wqe; pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; icmd = &elsiocb->iocb; /* For FLOGI request, remainder of payload is service parameters */ *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; pcmd += sizeof(uint32_t); memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); sp = (struct serv_parm *) pcmd; /* Setup CSPs accordingly for Fabric */ sp->cmn.e_d_tov = 0; sp->cmn.w2.r_a_tov = 0; sp->cmn.virtual_fabric_support = 0; sp->cls1.classValid = 0; if (sp->cmn.fcphLow < FC_PH3) sp->cmn.fcphLow = FC_PH3; if (sp->cmn.fcphHigh < FC_PH3) sp->cmn.fcphHigh = FC_PH3; /* Determine if switch supports priority tagging */ if (phba->cfg_vmid_priority_tagging) { sp->cmn.priority_tagging = 1; /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0, sizeof(vport->lpfc_vmid_host_uuid))) { memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, sizeof(phba->wwpn)); memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, sizeof(phba->wwnn)); } } if (phba->sli_rev == LPFC_SLI_REV4) { if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_0) { /* FLOGI needs to be 3 for WQE FCFI */ ct = SLI4_CT_FCFI; bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); /* Set the fcfi to the fcfi we registered with */ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, phba->fcf.fcfi); } /* Can't do SLI4 class2 without support sequence coalescing */ sp->cls2.classValid = 0; sp->cls2.seqDelivery = 0; } else { /* Historical, setting sequential-delivery bit for SLI3 */ sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { sp->cmn.request_multiple_Nport = 1; /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ icmd->ulpCt_h = 1; icmd->ulpCt_l = 0; } else { sp->cmn.request_multiple_Nport = 0; } if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { icmd->un.elsreq64.myID = 0; icmd->un.elsreq64.fl = 1; } } tmo = phba->fc_ratov; phba->fc_ratov = LPFC_DISC_FLOGI_TMO; lpfc_set_disctmo(vport); phba->fc_ratov = tmo; phba->fc_stat.elsXmitFLOGI++; elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue FLOGI: opt:x%x", phba->sli3_options, 0, 0); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } /* Avoid race with FLOGI completion and hba_flags. */ phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); rc = lpfc_issue_fabric_iocb(phba, elsiocb); if (rc == IOCB_ERROR) { phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } /* Clear external loopback plug detected flag */ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; /* Check for a deferred FLOGI ACC condition */ if (phba->defer_flogi_acc_flag) { /* lookup ndlp for received FLOGI */ ndlp = lpfc_findnode_did(vport, 0); if (!ndlp) return 0; did = vport->fc_myDID; vport->fc_myDID = Fabric_DID; memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); if (phba->sli_rev == LPFC_SLI_REV4) { bf_set(wqe_ctxt_tag, &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, phba->defer_flogi_acc_rx_id); bf_set(wqe_rcvoxid, &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, phba->defer_flogi_acc_ox_id); } else { icmd = &defer_flogi_acc.iocb; icmd->ulpContext = phba->defer_flogi_acc_rx_id; icmd->unsli3.rcvsli3.ox_id = phba->defer_flogi_acc_ox_id; } lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3354 Xmit deferred FLOGI ACC: rx_id: x%x," " ox_id: x%x, hba_flag x%x\n", phba->defer_flogi_acc_rx_id, phba->defer_flogi_acc_ox_id, phba->hba_flag); /* Send deferred FLOGI ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, ndlp, NULL); phba->defer_flogi_acc_flag = false; vport->fc_myDID = did; /* Decrement ndlp reference count to indicate the node can be * released when other references are removed. */ lpfc_nlp_put(ndlp); } return 0; } /** * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs * @phba: pointer to lpfc hba data structure. * * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq * list and issues an abort IOCB commond on each outstanding IOCB that * contains a active Fabric_DID ndlp. Note that this function is to issue * the abort IOCB command on all the outstanding IOCBs, thus when this * function returns, it does not guarantee all the IOCBs are actually aborted. * * Return code * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) **/ int lpfc_els_abort_flogi(struct lpfc_hba *phba) { struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; struct lpfc_nodelist *ndlp; u32 ulp_command; /* Abort outstanding I/O on NPort <nlp_DID> */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, "0201 Abort outstanding I/O on NPort x%x\n", Fabric_DID); pring = lpfc_phba_elsring(phba); if (unlikely(!pring)) return -EIO; /* * Check the txcmplq for an iocb that matches the nport the driver is * searching for. */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { ulp_command = get_job_cmnd(phba, iocb); if (ulp_command == CMD_ELS_REQUEST64_CR) { ndlp = iocb->ndlp; if (ndlp && ndlp->nlp_DID == Fabric_DID) { if ((phba->pport->fc_flag & FC_PT2PT) && !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) iocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl; lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); } } } /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); spin_unlock_irq(&phba->hbalock); return 0; } /** * lpfc_initial_flogi - Issue an initial fabric login for a vport * @vport: pointer to a host virtual N_Port data structure. * * This routine issues an initial Fabric Login (FLOGI) for the @vport * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and * put it into the @vport's ndlp list. If an inactive ndlp found on the list, * it will just be enabled and made active. The lpfc_issue_els_flogi() routine * is then invoked with the @vport and the ndlp to perform the FLOGI for the * @vport. * * Return code * 0 - failed to issue initial flogi for @vport * 1 - successfully issued initial flogi for @vport **/ int lpfc_initial_flogi(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; vport->port_state = LPFC_FLOGI; lpfc_set_disctmo(vport); /* First look for the Fabric ndlp */ ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) { /* Cannot find existing Fabric ndlp, so allocate a new one */ ndlp = lpfc_nlp_init(vport, Fabric_DID); if (!ndlp) return 0; /* Set the node type */ ndlp->nlp_type |= NLP_FABRIC; /* Put ndlp onto node list */ lpfc_enqueue_node(vport, ndlp); } /* Reset the Fabric flag, topology change may have happened */ vport->fc_flag &= ~FC_FABRIC; if (lpfc_issue_els_flogi(vport, ndlp, 0)) { /* A node reference should be retained while registered with a * transport or dev-loss-evt work is pending. * Otherwise, decrement node reference to trigger release. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) lpfc_nlp_put(ndlp); return 0; } return 1; } /** * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport * @vport: pointer to a host virtual N_Port data structure. * * This routine issues an initial Fabric Discover (FDISC) for the @vport * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and * put it into the @vport's ndlp list. If an inactive ndlp found on the list, * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine * is then invoked with the @vport and the ndlp to perform the FDISC for the * @vport. * * Return code * 0 - failed to issue initial fdisc for @vport * 1 - successfully issued initial fdisc for @vport **/ int lpfc_initial_fdisc(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; /* First look for the Fabric ndlp */ ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) { /* Cannot find existing Fabric ndlp, so allocate a new one */ ndlp = lpfc_nlp_init(vport, Fabric_DID); if (!ndlp) return 0; /* NPIV is only supported in Fabrics. */ ndlp->nlp_type |= NLP_FABRIC; /* Put ndlp onto node list */ lpfc_enqueue_node(vport, ndlp); } if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { /* A node reference should be retained while registered with a * transport or dev-loss-evt work is pending. * Otherwise, decrement node reference to trigger release. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) lpfc_nlp_put(ndlp); return 0; } return 1; } /** * lpfc_more_plogi - Check and issue remaining plogis for a vport * @vport: pointer to a host virtual N_Port data structure. * * This routine checks whether there are more remaining Port Logins * (PLOGI) to be issued for the @vport. If so, it will invoke the routine * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes * to issue ELS PLOGIs up to the configured discover threads with the * @vport (@vport->cfg_discovery_threads). The function also decrement * the @vport's num_disc_node by 1 if it is not already 0. **/ void lpfc_more_plogi(struct lpfc_vport *vport) { if (vport->num_disc_nodes) vport->num_disc_nodes--; /* Continue discovery with <num_disc_nodes> PLOGIs to go */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0232 Continue discovery with %d PLOGIs to go " "Data: x%x x%x x%x\n", vport->num_disc_nodes, vport->fc_plogi_cnt, vport->fc_flag, vport->port_state); /* Check to see if there are more PLOGIs to be sent */ if (vport->fc_flag & FC_NLP_MORE) /* go thru NPR nodes and issue any remaining ELS PLOGIs */ lpfc_els_disc_plogi(vport); return; } /** * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp * @phba: pointer to lpfc hba data structure. * @prsp: pointer to response IOCB payload. * @ndlp: pointer to a node-list data structure. * * This routine checks and indicates whether the WWPN of an N_Port, retrieved * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. * The following cases are considered N_Port confirmed: * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but * it does not have WWPN assigned either. If the WWPN is confirmed, the * pointer to the @ndlp will be returned. If the WWPN is not confirmed: * 1) if there is a node on vport list other than the @ndlp with the same * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked * on that node to release the RPI associated with the node; 2) if there is * no node found on vport list with the same WWPN of the N_Port PLOGI logged * into, a new node shall be allocated (or activated). In either case, the * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall * be released and the new_ndlp shall be put on to the vport node list and * its pointer returned as the confirmed node. * * Note that before the @ndlp got "released", the keepDID from not-matching * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID * of the @ndlp. This is because the release of @ndlp is actually to put it * into an inactive state on the vport node list and the vport node list * management algorithm does not allow two node with a same DID. * * Return code * pointer to the PLOGI N_Port @ndlp **/ static struct lpfc_nodelist * lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, struct lpfc_nodelist *ndlp) { struct lpfc_vport *vport = ndlp->vport; struct lpfc_nodelist *new_ndlp; struct serv_parm *sp; uint8_t name[sizeof(struct lpfc_name)]; uint32_t keepDID = 0, keep_nlp_flag = 0; uint32_t keep_new_nlp_flag = 0; uint16_t keep_nlp_state; u32 keep_nlp_fc4_type = 0; struct lpfc_nvme_rport *keep_nrport = NULL; unsigned long *active_rrqs_xri_bitmap = NULL; /* Fabric nodes can have the same WWPN so we don't bother searching * by WWPN. Just return the ndlp that was given to us. */ if (ndlp->nlp_type & NLP_FABRIC) return ndlp; sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); memset(name, 0, sizeof(struct lpfc_name)); /* Now we find out if the NPort we are logging into, matches the WWPN * we have for that ndlp. If not, we have some work to do. */ new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); /* return immediately if the WWPN matches ndlp */ if (!new_ndlp || (new_ndlp == ndlp)) return ndlp; /* * Unregister from backend if not done yet. Could have been skipped * due to ADISC */ lpfc_nlp_unreg_node(vport, new_ndlp); if (phba->sli_rev == LPFC_SLI_REV4) { active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, GFP_KERNEL); if (active_rrqs_xri_bitmap) memset(active_rrqs_xri_bitmap, 0, phba->cfg_rrq_xri_bitmap_sz); } lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, "3178 PLOGI confirm: ndlp x%x x%x x%x: " "new_ndlp x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, (new_ndlp ? new_ndlp->nlp_DID : 0), (new_ndlp ? new_ndlp->nlp_flag : 0), (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); keepDID = new_ndlp->nlp_DID; if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, phba->cfg_rrq_xri_bitmap_sz); /* At this point in this routine, we know new_ndlp will be * returned. however, any previous GID_FTs that were done * would have updated nlp_fc4_type in ndlp, so we must ensure * new_ndlp has the right value. */ if (vport->fc_flag & FC_FABRIC) { keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; } lpfc_unreg_rpi(vport, new_ndlp); new_ndlp->nlp_DID = ndlp->nlp_DID; new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; if (phba->sli_rev == LPFC_SLI_REV4) memcpy(new_ndlp->active_rrqs_xri_bitmap, ndlp->active_rrqs_xri_bitmap, phba->cfg_rrq_xri_bitmap_sz); /* Lock both ndlps */ spin_lock_irq(&ndlp->lock); spin_lock_irq(&new_ndlp->lock); keep_new_nlp_flag = new_ndlp->nlp_flag; keep_nlp_flag = ndlp->nlp_flag; new_ndlp->nlp_flag = ndlp->nlp_flag; /* if new_ndlp had NLP_UNREG_INP set, keep it */ if (keep_new_nlp_flag & NLP_UNREG_INP) new_ndlp->nlp_flag |= NLP_UNREG_INP; else new_ndlp->nlp_flag &= ~NLP_UNREG_INP; /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ if (keep_new_nlp_flag & NLP_RPI_REGISTERED) new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; else new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; /* * Retain the DROPPED flag. This will take care of the init * refcount when affecting the state change */ if (keep_new_nlp_flag & NLP_DROPPED) new_ndlp->nlp_flag |= NLP_DROPPED; else new_ndlp->nlp_flag &= ~NLP_DROPPED; ndlp->nlp_flag = keep_new_nlp_flag; /* if ndlp had NLP_UNREG_INP set, keep it */ if (keep_nlp_flag & NLP_UNREG_INP) ndlp->nlp_flag |= NLP_UNREG_INP; else ndlp->nlp_flag &= ~NLP_UNREG_INP; /* if ndlp had NLP_RPI_REGISTERED set, keep it */ if (keep_nlp_flag & NLP_RPI_REGISTERED) ndlp->nlp_flag |= NLP_RPI_REGISTERED; else ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; /* * Retain the DROPPED flag. This will take care of the init * refcount when affecting the state change */ if (keep_nlp_flag & NLP_DROPPED) ndlp->nlp_flag |= NLP_DROPPED; else ndlp->nlp_flag &= ~NLP_DROPPED; spin_unlock_irq(&new_ndlp->lock); spin_unlock_irq(&ndlp->lock); /* Set nlp_states accordingly */ keep_nlp_state = new_ndlp->nlp_state; lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); /* interchange the nvme remoteport structs */ keep_nrport = new_ndlp->nrport; new_ndlp->nrport = ndlp->nrport; /* Move this back to NPR state */ if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { /* The ndlp doesn't have a portname yet, but does have an * NPort ID. The new_ndlp portname matches the Rport's * portname. Reinstantiate the new_ndlp and reset the ndlp. */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3179 PLOGI confirm NEW: %x %x\n", new_ndlp->nlp_DID, keepDID); /* Two ndlps cannot have the same did on the nodelist. * The KeepDID and keep_nlp_fc4_type need to be swapped * because ndlp is inflight with no WWPN. */ ndlp->nlp_DID = keepDID; ndlp->nlp_fc4_type = keep_nlp_fc4_type; lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) memcpy(ndlp->active_rrqs_xri_bitmap, active_rrqs_xri_bitmap, phba->cfg_rrq_xri_bitmap_sz); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3180 PLOGI confirm SWAP: %x %x\n", new_ndlp->nlp_DID, keepDID); lpfc_unreg_rpi(vport, ndlp); /* The ndlp and new_ndlp both have WWPNs but are swapping * NPort Ids and attributes. */ ndlp->nlp_DID = keepDID; ndlp->nlp_fc4_type = keep_nlp_fc4_type; if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) memcpy(ndlp->active_rrqs_xri_bitmap, active_rrqs_xri_bitmap, phba->cfg_rrq_xri_bitmap_sz); /* Since we are switching over to the new_ndlp, * reset the old ndlp state */ if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) keep_nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); ndlp->nrport = keep_nrport; } /* * If ndlp is not associated with any rport we can drop it here else * let dev_loss_tmo_callbk trigger DEVICE_RM event */ if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) mempool_free(active_rrqs_xri_bitmap, phba->active_rrq_pool); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", new_ndlp->nlp_DID, new_ndlp->nlp_flag, new_ndlp->nlp_fc4_type); return new_ndlp; } /** * lpfc_end_rscn - Check and handle more rscn for a vport * @vport: pointer to a host virtual N_Port data structure. * * This routine checks whether more Registration State Change * Notifications (RSCNs) came in while the discovery state machine was in * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be * invoked to handle the additional RSCNs for the @vport. Otherwise, the * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of * handling the RSCNs. **/ void lpfc_end_rscn(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (vport->fc_flag & FC_RSCN_MODE) { /* * Check to see if more RSCNs came in while we were * processing this one. */ if (vport->fc_rscn_id_cnt || (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) lpfc_els_handle_rscn(vport); else { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_RSCN_MODE; spin_unlock_irq(shost->host_lock); } } } /** * lpfc_cmpl_els_rrq - Completion handled for els RRQs. * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine will call the clear rrq function to free the rrq and * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not * exist then the clear_rrq is still called because the rrq needs to * be freed. **/ static void lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_nodelist *ndlp = cmdiocb->ndlp; struct lpfc_node_rrq *rrq; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); /* we pass cmdiocb to state machine which needs rspiocb as well */ rrq = cmdiocb->context_un.rrq; cmdiocb->rsp_iocb = rspiocb; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "RRQ cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, get_job_els_rsp64_did(phba, cmdiocb)); /* rrq completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2880 RRQ completes to DID x%x " "Data: x%x x%x x%x x%x x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4, get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); if (ulp_status) { /* Check for retry */ /* RRQ failed Don't print the vport to vport rjts */ if (ulp_status != IOSTAT_LS_RJT || (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || (phba)->pport->cfg_log_verbose & LOG_ELS) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2881 RRQ failure DID:%06X Status:" "x%x/x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4); } lpfc_clr_rrq_active(phba, rrq->xritag, rrq); lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); return; } /** * lpfc_cmpl_els_plogi - Completion callback function for plogi * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is the completion callback function for issuing the Port * Login (PLOGI) command. For PLOGI completion, there must be an active * ndlp on the vport node list that matches the remote node ID from the * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply * ignored and command IOCB released. The PLOGI response IOCB status is * checked for error conditions. If there is error status reported, PLOGI * retry shall be attempted by invoking the lpfc_els_retry() routine. * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine * (DSM) is set for this PLOGI completion. Finally, it checks whether * there are additional N_Port nodes with the vport that need to perform * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition * PLOGIs. **/ static void lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; struct lpfc_nodelist *ndlp, *free_ndlp; struct lpfc_dmabuf *prsp; int disc; struct serv_parm *sp = NULL; u32 ulp_status, ulp_word4, did, iotag; bool release_node = false; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->rsp_iocb = rspiocb; ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); did = get_job_els_rsp64_did(phba, cmdiocb); if (phba->sli_rev == LPFC_SLI_REV4) { iotag = get_wqe_reqtag(cmdiocb); } else { irsp = &rspiocb->iocb; iotag = irsp->ulpIoTag; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "PLOGI cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, did); ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0136 PLOGI completes to NPort x%x " "with no ndlp. Data: x%x x%x x%x\n", did, ulp_status, ulp_word4, iotag); goto out_freeiocb; } /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ spin_lock_irq(&ndlp->lock); disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); /* PLOGI completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0102 PLOGI completes to NPort x%06x " "Data: x%x x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_fc4_type, ulp_status, ulp_word4, disc, vport->num_disc_nodes); /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); goto out; } if (ulp_status) { /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ if (disc) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); } goto out; } /* PLOGI failed Don't print the vport to vport rjts */ if (ulp_status != IOSTAT_LS_RJT || (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || (phba)->pport->cfg_log_verbose & LOG_ELS) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2753 PLOGI failure DID:%06X " "Status:x%x/x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PLOGI); /* If a PLOGI collision occurred, the node needs to continue * with the reglogin process. */ spin_lock_irq(&ndlp->lock); if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { spin_unlock_irq(&ndlp->lock); goto out; } /* No PLOGI collision and the node is not registered with the * scsi or nvme transport. It is no longer an active node. Just * start the device remove process. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) release_node = true; } spin_unlock_irq(&ndlp->lock); if (release_node) lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); } else { /* Good status, call state machine */ prsp = list_entry(cmdiocb->cmd_dmabuf->list.next, struct lpfc_dmabuf, list); ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); sp = (struct serv_parm *)((u8 *)prsp->virt + sizeof(u32)); ndlp->vmid_support = 0; if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || (phba->cfg_vmid_priority_tagging && sp->cmn.priority_tagging)) { lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, "4018 app_hdr_support %d tagging %d DID x%x\n", sp->cmn.app_hdr_support, sp->cmn.priority_tagging, ndlp->nlp_DID); /* if the dest port supports VMID, mark it in ndlp */ ndlp->vmid_support = 1; } lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PLOGI); } if (disc && vport->num_disc_nodes) { /* Check to see if there are more PLOGIs to be sent */ lpfc_more_plogi(vport); if (vport->num_disc_nodes == 0) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); lpfc_end_rscn(vport); } } out: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "PLOGI Cmpl PUT: did:x%x refcnt %d", ndlp->nlp_DID, kref_read(&ndlp->kref), 0); out_freeiocb: /* Release the reference on the original I/O request. */ free_ndlp = cmdiocb->ndlp; lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(free_ndlp); return; } /** * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport * @vport: pointer to a host virtual N_Port data structure. * @did: destination port identifier. * @retry: number of retries to the command IOCB. * * This routine issues a Port Login (PLOGI) command to a remote N_Port * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. * This routine constructs the proper fields of the PLOGI IOCB and invokes * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. * * Note that the ndlp reference count will be incremented by 1 for holding * the ndlp and the reference to ndlp will be stored into the ndlp field * of the IOCB for the completion callback function to the PLOGI ELS command. * * Return code * 0 - Successfully issued a plogi for @vport * 1 - failed to issue a plogi for @vport **/ int lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) { struct lpfc_hba *phba = vport->phba; struct serv_parm *sp; struct lpfc_nodelist *ndlp; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; int ret; ndlp = lpfc_findnode_did(vport, did); if (!ndlp) return 1; /* Defer the processing of the issue PLOGI until after the * outstanding UNREG_RPI mbox command completes, unless we * are going offline. This logic does not apply for Fabric DIDs */ if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) && ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && !(vport->fc_flag & FC_OFFLINE_MODE)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4110 Issue PLOGI x%x deferred " "on NPort x%x rpi x%x flg x%x Data:" " x%px\n", ndlp->nlp_defer_did, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); /* We can only defer 1st PLOGI */ if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) ndlp->nlp_defer_did = did; return 0; } cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, ELS_CMD_PLOGI); if (!elsiocb) return 1; pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; /* For PLOGI request, remainder of payload is service parameters */ *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; pcmd += sizeof(uint32_t); memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); sp = (struct serv_parm *) pcmd; /* * If we are a N-port connected to a Fabric, fix-up paramm's so logins * to device on remote loops work. */ if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) sp->cmn.altBbCredit = 1; if (sp->cmn.fcphLow < FC_PH_4_3) sp->cmn.fcphLow = FC_PH_4_3; if (sp->cmn.fcphHigh < FC_PH3) sp->cmn.fcphHigh = FC_PH3; sp->cmn.valid_vendor_ver_level = 0; memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); sp->cmn.bbRcvSizeMsb &= 0xF; /* Check if the destination port supports VMID */ ndlp->vmid_support = 0; if (vport->vmid_priority_tagging) sp->cmn.priority_tagging = 1; else if (phba->cfg_vmid_app_header && bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) sp->cmn.app_hdr_support = 1; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue PLOGI: did:x%x", did, 0, 0); /* If our firmware supports this feature, convey that * information to the target using the vendor specific field. */ if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { sp->cmn.valid_vendor_ver_level = 1; sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); } phba->fc_stat.elsXmitPLOGI++; elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue PLOGI: did:x%x refcnt %d", did, kref_read(&ndlp->kref), 0); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (ret) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } return 0; } /** * lpfc_cmpl_els_prli - Completion callback function for prli * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is the completion callback function for a Process Login * (PRLI) ELS command. The PRLI response IOCB status is checked for error * status. If there is error status reported, PRLI retry shall be attempted * by invoking the lpfc_els_retry() routine. Otherwise, the state * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this * ndlp to mark the PRLI completion. **/ static void lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_nodelist *ndlp; char *mode; u32 loglevel; u32 ulp_status; u32 ulp_word4; bool release_node = false; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->rsp_iocb = rspiocb; ndlp = cmdiocb->ndlp; ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_PRLI_SND; /* Driver supports multiple FC4 types. Counters matter. */ vport->fc_prli_sent--; ndlp->fc4_prli_sent--; spin_unlock_irq(&ndlp->lock); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "PRLI cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, ndlp->nlp_DID); /* PRLI completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0103 PRLI completes to NPort x%06x " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4, vport->num_disc_nodes, ndlp->fc4_prli_sent); /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) goto out; if (ulp_status) { /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ goto out; } /* If we don't send GFT_ID to Fabric, a PRLI error * could be expected. */ if ((vport->fc_flag & FC_FABRIC) || (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { mode = KERN_ERR; loglevel = LOG_TRACE_EVENT; } else { mode = KERN_INFO; loglevel = LOG_ELS; } /* PRLI failed */ lpfc_printf_vlog(vport, mode, loglevel, "2754 PRLI failure DID:%06X Status:x%x/x%x, " "data: x%x x%x x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4, ndlp->nlp_state, ndlp->fc4_prli_sent, ndlp->nlp_flag); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI); /* The following condition catches an inflight transition * mismatch typically caused by an RSCN. Skip any * processing to allow recovery. */ if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) || (ndlp->nlp_state == NLP_STE_NPR_NODE && ndlp->nlp_flag & NLP_DELAY_TMO)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, "2784 PRLI cmpl: Allow Node recovery " "DID x%06x nstate x%x nflag x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag); goto out; } /* * For P2P topology, retain the node so that PLOGI can be * attempted on it again. */ if (vport->fc_flag & FC_PT2PT) goto out; /* As long as this node is not registered with the SCSI * or NVMe transport and no other PRLIs are outstanding, * it is no longer an active node. Otherwise devloss * handles the final cleanup. */ spin_lock_irq(&ndlp->lock); if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && !ndlp->fc4_prli_sent) { ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) release_node = true; } spin_unlock_irq(&ndlp->lock); if (release_node) lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); } else { /* Good status, call state machine. However, if another * PRLI is outstanding, don't call the state machine * because final disposition to Mapped or Unmapped is * completed there. */ lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI); } out: lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); return; } /** * lpfc_issue_els_prli - Issue a prli iocb command for a vport * @vport: pointer to a host virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. * @retry: number of retries to the command IOCB. * * This routine issues a Process Login (PRLI) ELS command for the * @vport. The PRLI service parameters are set up in the payload of the * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine * is put to the IOCB completion callback func field before invoking the * routine lpfc_sli_issue_iocb() to send out PRLI command. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the PRLI ELS command. * * Return code * 0 - successfully issued prli iocb command for @vport * 1 - failed to issue prli iocb command for @vport **/ int lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry) { int rc = 0; struct lpfc_hba *phba = vport->phba; PRLI *npr; struct lpfc_nvme_prli *npr_nvme; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; u32 local_nlp_type, elscmd; /* * If we are in RSCN mode, the FC4 types supported from a * previous GFT_ID command may not be accurate. So, if we * are a NVME Initiator, always look for the possibility of * the remote NPort beng a NVME Target. */ if (phba->sli_rev == LPFC_SLI_REV4 && vport->fc_flag & FC_RSCN_MODE && vport->nvmei_support) ndlp->nlp_fc4_type |= NLP_FC4_NVME; local_nlp_type = ndlp->nlp_fc4_type; /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp * fields here before any of them can complete. */ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); ndlp->nvme_fb_size = 0; send_next_prli: if (local_nlp_type & NLP_FC4_FCP) { /* Payload is 4 + 16 = 20 x14 bytes. */ cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); elscmd = ELS_CMD_PRLI; } else if (local_nlp_type & NLP_FC4_NVME) { /* Payload is 4 + 20 = 24 x18 bytes. */ cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); elscmd = ELS_CMD_NVMEPRLI; } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "3083 Unknown FC_TYPE x%x ndlp x%06x\n", ndlp->nlp_fc4_type, ndlp->nlp_DID); return 1; } /* SLI3 ports don't support NVME. If this rport is a strict NVME * FC4 type, implicitly LOGO. */ if (phba->sli_rev == LPFC_SLI_REV3 && ndlp->nlp_fc4_type == NLP_FC4_NVME) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", ndlp->nlp_type); lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); return 1; } elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, elscmd); if (!elsiocb) return 1; pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; /* For PRLI request, remainder of payload is service parameters */ memset(pcmd, 0, cmdsize); if (local_nlp_type & NLP_FC4_FCP) { /* Remainder of payload is FCP PRLI parameter page. * Note: this data structure is defined as * BE/LE in the structure definition so no * byte swap call is made. */ *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; pcmd += sizeof(uint32_t); npr = (PRLI *)pcmd; /* * If our firmware version is 3.20 or later, * set the following bits for FC-TAPE support. */ if (phba->vpd.rev.feaLevelHigh >= 0x02) { npr->ConfmComplAllowed = 1; npr->Retry = 1; npr->TaskRetryIdReq = 1; } npr->estabImagePair = 1; npr->readXferRdyDis = 1; if (vport->cfg_first_burst_size) npr->writeXferRdyDis = 1; /* For FCP support */ npr->prliType = PRLI_FCP_TYPE; npr->initiatorFunc = 1; elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; /* Remove FCP type - processed. */ local_nlp_type &= ~NLP_FC4_FCP; } else if (local_nlp_type & NLP_FC4_NVME) { /* Remainder of payload is NVME PRLI parameter page. * This data structure is the newer definition that * uses bf macros so a byte swap is required. */ *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; pcmd += sizeof(uint32_t); npr_nvme = (struct lpfc_nvme_prli *)pcmd; bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ if (phba->nsler) { bf_set(prli_nsler, npr_nvme, 1); bf_set(prli_conf, npr_nvme, 1); } /* Only initiators request first burst. */ if ((phba->cfg_nvme_enable_fb) && !phba->nvmet_support) bf_set(prli_fba, npr_nvme, 1); if (phba->nvmet_support) { bf_set(prli_tgt, npr_nvme, 1); bf_set(prli_disc, npr_nvme, 1); } else { bf_set(prli_init, npr_nvme, 1); bf_set(prli_conf, npr_nvme, 1); } npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; /* Remove NVME type - processed. */ local_nlp_type &= ~NLP_FC4_NVME; } phba->fc_stat.elsXmitPRLI++; elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue PRLI: did:x%x refcnt %d", ndlp->nlp_DID, kref_read(&ndlp->kref), 0); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } /* The vport counters are used for lpfc_scan_finished, but * the ndlp is used to track outstanding PRLIs for different * FC4 types. */ spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_PRLI_SND; vport->fc_prli_sent++; ndlp->fc4_prli_sent++; spin_unlock_irq(&ndlp->lock); /* The driver supports 2 FC4 types. Make sure * a PRLI is issued for all types before exiting. */ if (phba->sli_rev == LPFC_SLI_REV4 && local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) goto send_next_prli; else return 0; } /** * lpfc_rscn_disc - Perform rscn discovery for a vport * @vport: pointer to a host virtual N_Port data structure. * * This routine performs Registration State Change Notification (RSCN) * discovery for a @vport. If the @vport's node port recovery count is not * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all * the nodes that need recovery. If none of the PLOGI were needed through * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be * invoked to check and handle possible more RSCN came in during the period * of processing the current ones. **/ static void lpfc_rscn_disc(struct lpfc_vport *vport) { lpfc_can_disctmo(vport); /* RSCN discovery */ /* go thru NPR nodes and issue ELS PLOGIs */ if (vport->fc_npr_cnt) if (lpfc_els_disc_plogi(vport)) return; lpfc_end_rscn(vport); } /** * lpfc_adisc_done - Complete the adisc phase of discovery * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. * * This function is called when the final ADISC is completed during discovery. * This function handles clearing link attention or issuing reg_vpi depending * on whether npiv is enabled. This function also kicks off the PLOGI phase of * discovery. * This function is called with no locks held. **/ static void lpfc_adisc_done(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; /* * For NPIV, cmpl_reg_vpi will set port_state to READY, * and continue discovery. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { /* * If link is down, clear_la and reg_vpi will be done after * flogi following a link up event */ if (!lpfc_is_link_up(phba)) return; /* The ADISCs are complete. Doesn't matter if they * succeeded or failed because the ADISC completion * routine guarantees to call the state machine and * the RPI is either unregistered (failed ADISC response) * or the RPI is still valid and the node is marked * mapped for a target. The exchanges should be in the * correct state. This code is specific to SLI3. */ lpfc_issue_clear_la(phba, vport); lpfc_issue_reg_vpi(phba, vport); return; } /* * For SLI2, we need to set port_state to READY * and continue discovery. */ if (vport->port_state < LPFC_VPORT_READY) { /* If we get here, there is nothing to ADISC */ lpfc_issue_clear_la(phba, vport); if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { vport->num_disc_nodes = 0; /* go thru NPR list, issue ELS PLOGIs */ if (vport->fc_npr_cnt) lpfc_els_disc_plogi(vport); if (!vport->num_disc_nodes) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); lpfc_end_rscn(vport); } } vport->port_state = LPFC_VPORT_READY; } else lpfc_rscn_disc(vport); } /** * lpfc_more_adisc - Issue more adisc as needed * @vport: pointer to a host virtual N_Port data structure. * * This routine determines whether there are more ndlps on a @vport * node list need to have Address Discover (ADISC) issued. If so, it will * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's * remaining nodes which need to have ADISC sent. **/ void lpfc_more_adisc(struct lpfc_vport *vport) { if (vport->num_disc_nodes) vport->num_disc_nodes--; /* Continue discovery with <num_disc_nodes> ADISCs to go */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0210 Continue discovery with %d ADISCs to go " "Data: x%x x%x x%x\n", vport->num_disc_nodes, vport->fc_adisc_cnt, vport->fc_flag, vport->port_state); /* Check to see if there are more ADISCs to be sent */ if (vport->fc_flag & FC_NLP_MORE) { lpfc_set_disctmo(vport); /* go thru NPR nodes and issue any remaining ELS ADISCs */ lpfc_els_disc_adisc(vport); } if (!vport->num_disc_nodes) lpfc_adisc_done(vport); return; } /** * lpfc_cmpl_els_adisc - Completion callback function for adisc * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is the completion function for issuing the Address Discover * (ADISC) command. It first checks to see whether link went down during * the discovery process. If so, the node will be marked as node port * recovery for issuing discover IOCB by the link attention handler and * exit. Otherwise, the response status is checked. If error was reported * in the response status, the ADISC command shall be retried by invoking * the lpfc_els_retry() routine. Otherwise, if no error was reported in * the response status, the state machine is invoked to set transition * with respect to NLP_EVT_CMPL_ADISC event. **/ static void lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; IOCB_t *irsp; struct lpfc_nodelist *ndlp; int disc; u32 ulp_status, ulp_word4, tmo; bool release_node = false; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->rsp_iocb = rspiocb; ndlp = cmdiocb->ndlp; ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); if (phba->sli_rev == LPFC_SLI_REV4) { tmo = get_wqe_tmo(cmdiocb); } else { irsp = &rspiocb->iocb; tmo = irsp->ulpTimeout; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "ADISC cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, ndlp->nlp_DID); /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ spin_lock_irq(&ndlp->lock); disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); spin_unlock_irq(&ndlp->lock); /* ADISC completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0104 ADISC completes to NPort x%x " "Data: x%x x%x x%x x%x x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4, tmo, disc, vport->num_disc_nodes); /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); goto out; } if (ulp_status) { /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ if (disc) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); lpfc_set_disctmo(vport); } goto out; } /* ADISC failed */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2755 ADISC failure DID:%06X Status:x%x/x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_ADISC); /* As long as this node is not registered with the SCSI or NVMe * transport, it is no longer an active node. Otherwise * devloss handles the final cleanup. */ spin_lock_irq(&ndlp->lock); if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) release_node = true; } spin_unlock_irq(&ndlp->lock); if (release_node) lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); } else /* Good status, call state machine */ lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_ADISC); /* Check to see if there are more ADISCs to be sent */ if (disc && vport->num_disc_nodes) lpfc_more_adisc(vport); out: lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); return; } /** * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport * @vport: pointer to a virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. * @retry: number of retries to the command IOCB. * * This routine issues an Address Discover (ADISC) for an @ndlp on a * @vport. It prepares the payload of the ADISC ELS command, updates the * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine * to issue the ADISC ELS command. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the ADISC ELS command. * * Return code * 0 - successfully issued adisc * 1 - failed to issue adisc **/ int lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry) { int rc = 0; struct lpfc_hba *phba = vport->phba; ADISC *ap; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_ADISC); if (!elsiocb) return 1; pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; /* For ADISC request, remainder of payload is service parameters */ *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; pcmd += sizeof(uint32_t); /* Fill in ADISC payload */ ap = (ADISC *) pcmd; ap->hardAL_PA = phba->fc_pref_ALPA; memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); ap->DID = be32_to_cpu(vport->fc_myDID); phba->fc_stat.elsXmitADISC++; elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_ADISC_SND; spin_unlock_irq(&ndlp->lock); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); goto err; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue ADISC: did:x%x refcnt %d", ndlp->nlp_DID, kref_read(&ndlp->kref), 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); goto err; } return 0; err: spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_ADISC_SND; spin_unlock_irq(&ndlp->lock); return 1; } /** * lpfc_cmpl_els_logo - Completion callback function for logo * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is the completion function for issuing the ELS Logout (LOGO) * command. If no error status was reported from the LOGO response, the * state machine of the associated ndlp shall be invoked for transition with * respect to NLP_EVT_CMPL_LOGO event. **/ static void lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_nodelist *ndlp = cmdiocb->ndlp; struct lpfc_vport *vport = ndlp->vport; IOCB_t *irsp; unsigned long flags; uint32_t skip_recovery = 0; int wake_up_waiter = 0; u32 ulp_status; u32 ulp_word4; u32 tmo; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->rsp_iocb = rspiocb; ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); if (phba->sli_rev == LPFC_SLI_REV4) { tmo = get_wqe_tmo(cmdiocb); } else { irsp = &rspiocb->iocb; tmo = irsp->ulpTimeout; } spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_LOGO_SND; if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { wake_up_waiter = 1; ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; } spin_unlock_irq(&ndlp->lock); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "LOGO cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, ndlp->nlp_DID); /* LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0105 LOGO completes to NPort x%x " "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, ulp_status, ulp_word4, tmo, vport->num_disc_nodes); if (lpfc_els_chk_latt(vport)) { skip_recovery = 1; goto out; } /* The LOGO will not be retried on failure. A LOGO was * issued to the remote rport and a ACC or RJT or no Answer are * all acceptable. Note the failure and move forward with * discovery. The PLOGI will retry. */ if (ulp_status) { /* LOGO failed */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2756 LOGO failure, No Retry DID:%06X " "Status:x%x/x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4); if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) skip_recovery = 1; } /* Call state machine. This will unregister the rpi if needed. */ lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); if (skip_recovery) goto out; /* The driver sets this flag for an NPIV instance that doesn't want to * log into the remote port. */ if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { spin_lock_irq(&ndlp->lock); if (phba->sli_rev == LPFC_SLI_REV4) ndlp->nlp_flag |= NLP_RELEASE_RPI; ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); goto out_rsrc_free; } out: /* At this point, the LOGO processing is complete. NOTE: For a * pt2pt topology, we are assuming the NPortID will only change * on link up processing. For a LOGO / PLOGI initiated by the * Initiator, we are assuming the NPortID is not going to change. */ if (wake_up_waiter && ndlp->logo_waitq) wake_up(ndlp->logo_waitq); /* * If the node is a target, the handling attempts to recover the port. * For any other port type, the rpi is unregistered as an implicit * LOGO. */ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && skip_recovery == 0) { lpfc_cancel_retry_delay_tmo(vport, ndlp); spin_lock_irqsave(&ndlp->lock, flags); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irqrestore(&ndlp->lock, flags); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3187 LOGO completes to NPort x%x: Start " "Recovery Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4, tmo, vport->num_disc_nodes); lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); lpfc_disc_start(vport); return; } /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the * driver sends a LOGO to the rport to cleanup. For fabric and * initiator ports cleanup the node as long as it the node is not * register with the transport. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); } out_rsrc_free: /* Driver is done with the I/O. */ lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } /** * lpfc_issue_els_logo - Issue a logo to an node on a vport * @vport: pointer to a virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. * @retry: number of retries to the command IOCB. * * This routine constructs and issues an ELS Logout (LOGO) iocb command * to a remote node, referred by an @ndlp on a @vport. It constructs the * payload of the IOCB, properly sets up the @ndlp state, and invokes the * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the LOGO ELS command. * * Callers of this routine are expected to unregister the RPI first * * Return code * 0 - successfully issued logo * 1 - failed to issue logo **/ int lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; int rc; spin_lock_irq(&ndlp->lock); if (ndlp->nlp_flag & NLP_LOGO_SND) { spin_unlock_irq(&ndlp->lock); return 0; } spin_unlock_irq(&ndlp->lock); cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_LOGO); if (!elsiocb) return 1; pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; pcmd += sizeof(uint32_t); /* Fill in LOGO payload */ *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); pcmd += sizeof(uint32_t); memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); phba->fc_stat.elsXmitLOGO++; elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_LOGO_SND; ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; spin_unlock_irq(&ndlp->lock); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); goto err; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue LOGO: did:x%x refcnt %d", ndlp->nlp_DID, kref_read(&ndlp->kref), 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); goto err; } spin_lock_irq(&ndlp->lock); ndlp->nlp_prev_state = ndlp->nlp_state; spin_unlock_irq(&ndlp->lock); lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); return 0; err: spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_LOGO_SND; spin_unlock_irq(&ndlp->lock); return 1; } /** * lpfc_cmpl_els_cmd - Completion callback function for generic els command * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is a generic completion callback function for ELS commands. * Specifically, it is the callback function which does not need to perform * any command specific operations. It is currently used by the ELS command * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). * Other than certain debug loggings, this callback function simply invokes the * lpfc_els_chk_latt() routine to check whether link went down during the * discovery process. **/ static void lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_nodelist *free_ndlp; IOCB_t *irsp; u32 ulp_status, ulp_word4, tmo, did, iotag; ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); did = get_job_els_rsp64_did(phba, cmdiocb); if (phba->sli_rev == LPFC_SLI_REV4) { tmo = get_wqe_tmo(cmdiocb); iotag = get_wqe_reqtag(cmdiocb); } else { irsp = &rspiocb->iocb; tmo = irsp->ulpTimeout; iotag = irsp->ulpIoTag; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "ELS cmd cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, did); /* ELS cmd tag <ulpIoTag> completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", iotag, ulp_status, ulp_word4, tmo); /* Check to see if link went down during discovery */ lpfc_els_chk_latt(vport); free_ndlp = cmdiocb->ndlp; lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(free_ndlp); } /** * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. * @vport: pointer to lpfc_vport data structure. * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. * * This routine registers the rpi assigned to the fabric controller * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED * state triggering a registration with the SCSI transport. * * This routine is single out because the fabric controller node * does not receive a PLOGI. This routine is consumed by the * SCR and RDF ELS commands. Callers are expected to qualify * with SLI4 first. **/ static int lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) { int rc = 0; struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ns_ndlp; LPFC_MBOXQ_t *mbox; if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) return rc; ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); if (!ns_ndlp) return -ENODEV; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, ns_ndlp->nlp_state); if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) return -ENODEV; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0936 %s: no memory for reg_login " "Data: x%x x%x x%x x%x\n", __func__, fc_ndlp->nlp_DID, fc_ndlp->nlp_state, fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); return -ENOMEM; } rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); if (rc) { rc = -EACCES; goto out; } fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); if (!mbox->ctx_ndlp) { rc = -ENOMEM; goto out; } mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { rc = -ENODEV; lpfc_nlp_put(fc_ndlp); goto out; } /* Success path. Exit. */ lpfc_nlp_set_state(vport, fc_ndlp, NLP_STE_REG_LOGIN_ISSUE); return 0; out: lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0938 %s: failed to format reg_login " "Data: x%x x%x x%x x%x\n", __func__, fc_ndlp->nlp_DID, fc_ndlp->nlp_state, fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); return rc; } /** * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is a generic completion callback function for Discovery ELS cmd. * Currently used by the ELS command issuing routines for the ELS State Change * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). * These commands will be retried once only for ELS timeout errors. **/ static void lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; IOCB_t *irsp; struct lpfc_els_rdf_rsp *prdf; struct lpfc_dmabuf *pcmd, *prsp; u32 *pdata; u32 cmd; struct lpfc_nodelist *ndlp = cmdiocb->ndlp; u32 ulp_status, ulp_word4, tmo, did, iotag; ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); did = get_job_els_rsp64_did(phba, cmdiocb); if (phba->sli_rev == LPFC_SLI_REV4) { tmo = get_wqe_tmo(cmdiocb); iotag = get_wqe_reqtag(cmdiocb); } else { irsp = &rspiocb->iocb; tmo = irsp->ulpTimeout; iotag = irsp->ulpIoTag; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "ELS cmd cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, did); /* ELS cmd tag <ulpIoTag> completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); pcmd = cmdiocb->cmd_dmabuf; if (!pcmd) goto out; pdata = (u32 *)pcmd->virt; if (!pdata) goto out; cmd = *pdata; /* Only 1 retry for ELS Timeout only */ if (ulp_status == IOSTAT_LOCAL_REJECT && ((ulp_word4 & IOERR_PARAM_MASK) == IOERR_SEQUENCE_TIMEOUT)) { cmdiocb->retry++; if (cmdiocb->retry <= 1) { switch (cmd) { case ELS_CMD_SCR: lpfc_issue_els_scr(vport, cmdiocb->retry); break; case ELS_CMD_EDC: lpfc_issue_els_edc(vport, cmdiocb->retry); break; case ELS_CMD_RDF: lpfc_issue_els_rdf(vport, cmdiocb->retry); break; } goto out; } phba->fc_stat.elsRetryExceeded++; } if (cmd == ELS_CMD_EDC) { /* must be called before checking uplStatus and returning */ lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); return; } if (ulp_status) { /* ELS discovery cmd completes with error */ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, "4203 ELS cmd x%x error: x%x x%X\n", cmd, ulp_status, ulp_word4); goto out; } /* The RDF response doesn't have any impact on the running driver * but the notification descriptors are dumped here for support. */ if (cmd == ELS_CMD_RDF) { int i; prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); if (!prsp) goto out; prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; if (!prdf) goto out; for (i = 0; i < ELS_RDF_REG_TAG_CNT && i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, "4677 Fabric RDF Notification Grant " "Data: 0x%08x Reg: %x %x\n", be32_to_cpu( prdf->reg_d1.desc_tags[i]), phba->cgn_reg_signal, phba->cgn_reg_fpin); } out: /* Check to see if link went down during discovery */ lpfc_els_chk_latt(vport); lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); return; } /** * lpfc_issue_els_scr - Issue a scr to an node on a vport * @vport: pointer to a host virtual N_Port data structure. * @retry: retry counter for the command IOCB. * * This routine issues a State Change Request (SCR) to a fabric node * on a @vport. The remote node is Fabric Controller (0xfffffd). It * first search the @vport node list to find the matching ndlp. If no such * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() * routine is invoked to send the SCR IOCB. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the SCR ELS command. * * Return code * 0 - Successfully issued scr command * 1 - Failed to issue scr command **/ int lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) { int rc = 0; struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; struct lpfc_nodelist *ndlp; cmdsize = (sizeof(uint32_t) + sizeof(SCR)); ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); if (!ndlp) { ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); if (!ndlp) return 1; lpfc_enqueue_node(vport, ndlp); } elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_SCR); if (!elsiocb) return 1; if (phba->sli_rev == LPFC_SLI_REV4) { rc = lpfc_reg_fab_ctrl_node(vport, ndlp); if (rc) { lpfc_els_free_iocb(phba, elsiocb); lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0937 %s: Failed to reg fc node, rc %d\n", __func__, rc); return 1; } } pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_SCR; pcmd += sizeof(uint32_t); /* For SCR, remainder of payload is SCR parameter page */ memset(pcmd, 0, sizeof(SCR)); ((SCR *) pcmd)->Function = SCR_FUNC_FULL; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue SCR: did:x%x", ndlp->nlp_DID, 0, 0); phba->fc_stat.elsXmitSCR++; elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue SCR: did:x%x refcnt %d", ndlp->nlp_DID, kref_read(&ndlp->kref), 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } return 0; } /** * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) * or the other nport (pt2pt). * @vport: pointer to a host virtual N_Port data structure. * @retry: number of retries to the command IOCB. * * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) * when connected to a fabric, or to the remote port when connected * in point-to-point mode. When sent to the Fabric Controller, it will * replay the RSCN to registered recipients. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the RSCN ELS command. * * Return code * 0 - Successfully issued RSCN command * 1 - Failed to issue RSCN command **/ int lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) { int rc = 0; struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *elsiocb; struct lpfc_nodelist *ndlp; struct { struct fc_els_rscn rscn; struct fc_els_rscn_page portid; } *event; uint32_t nportid; uint16_t cmdsize = sizeof(*event); /* Not supported for private loop */ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && !(vport->fc_flag & FC_PUBLIC_LOOP)) return 1; if (vport->fc_flag & FC_PT2PT) { /* find any mapped nport - that would be the other nport */ ndlp = lpfc_findnode_mapped(vport); if (!ndlp) return 1; } else { nportid = FC_FID_FCTRL; /* find the fabric controller node */ ndlp = lpfc_findnode_did(vport, nportid); if (!ndlp) { /* if one didn't exist, make one */ ndlp = lpfc_nlp_init(vport, nportid); if (!ndlp) return 1; lpfc_enqueue_node(vport, ndlp); } } elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_RSCN_XMT); if (!elsiocb) return 1; event = elsiocb->cmd_dmabuf->virt; event->rscn.rscn_cmd = ELS_RSCN; event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); event->rscn.rscn_plen = cpu_to_be16(cmdsize); nportid = vport->fc_myDID; /* appears that page flags must be 0 for fabric to broadcast RSCN */ event->portid.rscn_page_flags = 0; event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; event->portid.rscn_fid[2] = nportid & 0x000000FF; phba->fc_stat.elsXmitRSCN++; elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue RSCN: did:x%x", ndlp->nlp_DID, 0, 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } return 0; } /** * lpfc_issue_els_farpr - Issue a farp to an node on a vport * @vport: pointer to a host virtual N_Port data structure. * @nportid: N_Port identifier to the remote node. * @retry: number of retries to the command IOCB. * * This routine issues a Fibre Channel Address Resolution Response * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) * is passed into the function. It first search the @vport node list to find * the matching ndlp. If no such ndlp is found, a new ndlp shall be created * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the FARPR ELS command. * * Return code * 0 - Successfully issued farpr command * 1 - Failed to issue farpr command **/ static int lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) { int rc = 0; struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *elsiocb; FARP *fp; uint8_t *pcmd; uint32_t *lp; uint16_t cmdsize; struct lpfc_nodelist *ondlp; struct lpfc_nodelist *ndlp; cmdsize = (sizeof(uint32_t) + sizeof(FARP)); ndlp = lpfc_findnode_did(vport, nportid); if (!ndlp) { ndlp = lpfc_nlp_init(vport, nportid); if (!ndlp) return 1; lpfc_enqueue_node(vport, ndlp); } elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_FARPR); if (!elsiocb) return 1; pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; pcmd += sizeof(uint32_t); /* Fill in FARPR payload */ fp = (FARP *) (pcmd); memset(fp, 0, sizeof(FARP)); lp = (uint32_t *) pcmd; *lp++ = be32_to_cpu(nportid); *lp++ = be32_to_cpu(vport->fc_myDID); fp->Rflags = 0; fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); ondlp = lpfc_findnode_did(vport, nportid); if (ondlp) { memcpy(&fp->OportName, &ondlp->nlp_portname, sizeof(struct lpfc_name)); memcpy(&fp->OnodeName, &ondlp->nlp_nodename, sizeof(struct lpfc_name)); } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue FARPR: did:x%x", ndlp->nlp_DID, 0, 0); phba->fc_stat.elsXmitFARPR++; elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { /* The additional lpfc_nlp_put will cause the following * lpfc_els_free_iocb routine to trigger the release of * the node. */ lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } /* This will cause the callback-function lpfc_cmpl_els_cmd to * trigger the release of the node. */ /* Don't release reference count as RDF is likely outstanding */ return 0; } /** * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. * @vport: pointer to a host virtual N_Port data structure. * @retry: retry counter for the command IOCB. * * This routine issues an ELS RDF to the Fabric Controller to register * for diagnostic functions. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the RDF ELS command. * * Return code * 0 - Successfully issued rdf command * 1 - Failed to issue rdf command **/ int lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *elsiocb; struct lpfc_els_rdf_req *prdf; struct lpfc_nodelist *ndlp; uint16_t cmdsize; int rc; cmdsize = sizeof(*prdf); ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); if (!ndlp) { ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); if (!ndlp) return -ENODEV; lpfc_enqueue_node(vport, ndlp); } /* RDF ELS is not required on an NPIV VN_Port. */ if (vport->port_type == LPFC_NPIV_PORT) return -EACCES; elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_RDF); if (!elsiocb) return -ENOMEM; /* Configure the payload for the supported FPIN events. */ prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; memset(prdf, 0, cmdsize); prdf->rdf.fpin_cmd = ELS_RDF; prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - sizeof(struct fc_els_rdf)); prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", ndlp->nlp_DID, phba->cgn_reg_signal, phba->cgn_reg_fpin); phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return -EIO; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue RDF: did:x%x refcnt %d", ndlp->nlp_DID, kref_read(&ndlp->kref), 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return -EIO; } return 0; } /** * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * A received RDF implies a possible change to fabric supported diagnostic * functions. This routine sends LS_ACC and then has the Nx_Port issue a new * RDF request to reregister for supported diagnostic functions. * * Return code * 0 - Success * -EIO - Failed to process received RDF **/ static int lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { /* Send LS_ACC */ if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, "1623 Failed to RDF_ACC from x%x for x%x\n", ndlp->nlp_DID, vport->fc_myDID); return -EIO; } /* Issue new RDF for reregistering */ if (lpfc_issue_els_rdf(vport, 0)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, "2623 Failed to re register RDF for x%x\n", vport->fc_myDID); return -EIO; } return 0; } /** * lpfc_least_capable_settings - helper function for EDC rsp processing * @phba: pointer to lpfc hba data structure. * @pcgd: pointer to congestion detection descriptor in EDC rsp. * * This helper routine determines the least capable setting for * congestion signals, signal freq, including scale, from the * congestion detection descriptor in the EDC rsp. The routine * sets @phba values in preparation for a set_featues mailbox. **/ static void lpfc_least_capable_settings(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *pcgd) { u32 rsp_sig_cap = 0, drv_sig_cap = 0; u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; /* Get rsp signal and frequency capabilities. */ rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); /* If the Fport does not support signals. Set FPIN only */ if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) goto out_no_support; /* Apply the xmt scale to the xmt cycle to get the correct frequency. * Adapter default is 100 millisSeconds. Convert all xmt cycle values * to milliSeconds. */ switch (rsp_sig_freq_scale) { case EDC_CG_SIGFREQ_SEC: rsp_sig_freq_cyc *= MSEC_PER_SEC; break; case EDC_CG_SIGFREQ_MSEC: rsp_sig_freq_cyc = 1; break; default: goto out_no_support; } /* Convenient shorthand. */ drv_sig_cap = phba->cgn_reg_signal; /* Choose the least capable frequency. */ if (rsp_sig_freq_cyc > phba->cgn_sig_freq) phba->cgn_sig_freq = rsp_sig_freq_cyc; /* Should be some common signals support. Settle on least capable * signal and adjust FPIN values. Initialize defaults to ease the * decision. */ phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; } if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; } if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; } } /* We are NOT recording signal frequency in congestion info buffer */ return; out_no_support: phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; phba->cgn_sig_freq = 0; phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; } DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, FC_LS_TLV_DTAG_INIT); /** * lpfc_cmpl_els_edc - Completion callback function for EDC * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is the completion callback function for issuing the Exchange * Diagnostic Capabilities (EDC) command. The driver issues an EDC to * notify the FPort of its Congestion and Link Fault capabilities. This * routine parses the FPort's response and decides on the least common * values applicable to both FPort and NPort for Warnings and Alarms that * are communicated via hardware signals. **/ static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { IOCB_t *irsp_iocb; struct fc_els_edc_resp *edc_rsp; struct fc_tlv_desc *tlv; struct fc_diag_cg_sig_desc *pcgd; struct fc_diag_lnkflt_desc *plnkflt; struct lpfc_dmabuf *pcmd, *prsp; const char *dtag_nm; u32 *pdata, dtag; int desc_cnt = 0, bytes_remain; bool rcv_cap_desc = false; struct lpfc_nodelist *ndlp; u32 ulp_status, ulp_word4, tmo, did, iotag; ndlp = cmdiocb->ndlp; ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); did = get_job_els_rsp64_did(phba, rspiocb); if (phba->sli_rev == LPFC_SLI_REV4) { tmo = get_wqe_tmo(rspiocb); iotag = get_wqe_reqtag(rspiocb); } else { irsp_iocb = &rspiocb->iocb; tmo = irsp_iocb->ulpTimeout; iotag = irsp_iocb->ulpIoTag; } lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, "EDC cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, did); /* ELS cmd tag <ulpIoTag> completes */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", iotag, ulp_status, ulp_word4, tmo); pcmd = cmdiocb->cmd_dmabuf; if (!pcmd) goto out; pdata = (u32 *)pcmd->virt; if (!pdata) goto out; /* Need to clear signal values, send features MB and RDF with FPIN. */ if (ulp_status) goto out; prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); if (!prsp) goto out; edc_rsp = prsp->virt; if (!edc_rsp) goto out; /* ELS cmd tag <ulpIoTag> completes */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, "4676 Fabric EDC Rsp: " "0x%02x, 0x%08x\n", edc_rsp->acc_hdr.la_cmd, be32_to_cpu(edc_rsp->desc_list_len)); /* * Payload length in bytes is the response descriptor list * length minus the 12 bytes of Link Service Request * Information descriptor in the reply. */ bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - sizeof(struct fc_els_lsri_desc); if (bytes_remain <= 0) goto out; tlv = edc_rsp->desc; /* * cycle through EDC diagnostic descriptors to find the * congestion signaling capability descriptor */ while (bytes_remain) { if (bytes_remain < FC_TLV_DESC_HDR_SZ) { lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, "6461 Truncated TLV hdr on " "Diagnostic descriptor[%d]\n", desc_cnt); goto out; } dtag = be32_to_cpu(tlv->desc_tag); switch (dtag) { case ELS_DTAG_LNK_FAULT_CAP: if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != sizeof(struct fc_diag_lnkflt_desc)) { lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, "6462 Truncated Link Fault Diagnostic " "descriptor[%d]: %d vs 0x%zx 0x%zx\n", desc_cnt, bytes_remain, FC_TLV_DESC_SZ_FROM_LENGTH(tlv), sizeof(struct fc_diag_lnkflt_desc)); goto out; } plnkflt = (struct fc_diag_lnkflt_desc *)tlv; lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_LDS_EVENT, "4617 Link Fault Desc Data: 0x%08x 0x%08x " "0x%08x 0x%08x 0x%08x\n", be32_to_cpu(plnkflt->desc_tag), be32_to_cpu(plnkflt->desc_len), be32_to_cpu( plnkflt->degrade_activate_threshold), be32_to_cpu( plnkflt->degrade_deactivate_threshold), be32_to_cpu(plnkflt->fec_degrade_interval)); break; case ELS_DTAG_CG_SIGNAL_CAP: if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != sizeof(struct fc_diag_cg_sig_desc)) { lpfc_printf_log( phba, KERN_WARNING, LOG_CGN_MGMT, "6463 Truncated Cgn Signal Diagnostic " "descriptor[%d]: %d vs 0x%zx 0x%zx\n", desc_cnt, bytes_remain, FC_TLV_DESC_SZ_FROM_LENGTH(tlv), sizeof(struct fc_diag_cg_sig_desc)); goto out; } pcgd = (struct fc_diag_cg_sig_desc *)tlv; lpfc_printf_log( phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, "4616 CGN Desc Data: 0x%08x 0x%08x " "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", be32_to_cpu(pcgd->desc_tag), be32_to_cpu(pcgd->desc_len), be32_to_cpu(pcgd->xmt_signal_capability), be16_to_cpu(pcgd->xmt_signal_frequency.count), be16_to_cpu(pcgd->xmt_signal_frequency.units), be32_to_cpu(pcgd->rcv_signal_capability), be16_to_cpu(pcgd->rcv_signal_frequency.count), be16_to_cpu(pcgd->rcv_signal_frequency.units)); /* Compare driver and Fport capabilities and choose * least common. */ lpfc_least_capable_settings(phba, pcgd); rcv_cap_desc = true; break; default: dtag_nm = lpfc_get_tlv_dtag_nm(dtag); lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, "4919 unknown Diagnostic " "Descriptor[%d]: tag x%x (%s)\n", desc_cnt, dtag, dtag_nm); } bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); tlv = fc_tlv_next_desc(tlv); desc_cnt++; } out: if (!rcv_cap_desc) { phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; phba->cgn_sig_freq = 0; lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, "4202 EDC rsp error - sending RDF " "for FPIN only.\n"); } lpfc_config_cgn_signal(phba); /* Check to see if link went down during discovery */ lpfc_els_chk_latt(phba->pport); lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, "EDC Cmpl: did:x%x refcnt %d", ndlp->nlp_DID, kref_read(&ndlp->kref), 0); lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } static void lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) { struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv; lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP); lft->desc_len = cpu_to_be32( FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc)); lft->degrade_activate_threshold = cpu_to_be32(phba->degrade_activate_threshold); lft->degrade_deactivate_threshold = cpu_to_be32(phba->degrade_deactivate_threshold); lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval); } static void lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) { struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv; /* We are assuming cgd was zero'ed before calling this routine */ /* Configure the congestion detection capability */ cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); /* Descriptor len doesn't include the tag or len fields. */ cgd->desc_len = cpu_to_be32( FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. * xmt_signal_frequency.count already set to 0. * xmt_signal_frequency.units already set to 0. */ if (phba->cmf_active_mode == LPFC_CFG_OFF) { /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. * rcv_signal_frequency.count already set to 0. * rcv_signal_frequency.units already set to 0. */ phba->cgn_sig_freq = 0; return; } switch (phba->cgn_reg_signal) { case EDC_CG_SIG_WARN_ONLY: cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); break; case EDC_CG_SIG_WARN_ALARM: cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); break; default: /* rcv_signal_capability left 0 thus no support */ break; } /* We start negotiation with lpfc_fabric_cgn_frequency, after * the completion we settle on the higher frequency. */ cgd->rcv_signal_frequency.count = cpu_to_be16(lpfc_fabric_cgn_frequency); cgd->rcv_signal_frequency.units = cpu_to_be16(EDC_CG_SIGFREQ_MSEC); } static bool lpfc_link_is_lds_capable(struct lpfc_hba *phba) { if (!(phba->lmt & LMT_64Gb)) return false; if (phba->sli_rev != LPFC_SLI_REV4) return false; if (phba->sli4_hba.conf_trunk) { if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G) return true; } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) { return true; } return false; } /** * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. * @vport: pointer to a host virtual N_Port data structure. * @retry: retry counter for the command iocb. * * This routine issues an ELS EDC to the F-Port Controller to communicate * this N_Port's support of hardware signals in its Congestion * Capabilities Descriptor. * * Note: This routine does not check if one or more signals are * set in the cgn_reg_signal parameter. The caller makes the * decision to enforce cgn_reg_signal as nonzero or zero depending * on the conditions. During Fabric requests, the driver * requires cgn_reg_signals to be nonzero. But a dynamic request * to set the congestion mode to OFF from Monitor or Manage * would correctly issue an EDC with no signals enabled to * turn off switch functionality and then update the FW. * * Return code * 0 - Successfully issued edc command * 1 - Failed to issue edc command **/ int lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *elsiocb; struct fc_els_edc *edc_req; struct fc_tlv_desc *tlv; u16 cmdsize; struct lpfc_nodelist *ndlp; u8 *pcmd = NULL; u32 cgn_desc_size, lft_desc_size; int rc; if (vport->port_type == LPFC_NPIV_PORT) return -EACCES; ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) return -ENODEV; cgn_desc_size = (phba->cgn_init_reg_signal) ? sizeof(struct fc_diag_cg_sig_desc) : 0; lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? sizeof(struct fc_diag_lnkflt_desc) : 0; cmdsize = cgn_desc_size + lft_desc_size; /* Skip EDC if no applicable descriptors */ if (!cmdsize) goto try_rdf; cmdsize += sizeof(struct fc_els_edc); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_EDC); if (!elsiocb) goto try_rdf; /* Configure the payload for the supported Diagnostics capabilities. */ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; memset(pcmd, 0, cmdsize); edc_req = (struct fc_els_edc *)pcmd; edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size); edc_req->edc_cmd = ELS_EDC; tlv = edc_req->desc; if (cgn_desc_size) { lpfc_format_edc_cgn_desc(phba, tlv); phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; tlv = fc_tlv_next_desc(tlv); } if (lft_desc_size) lpfc_format_edc_lft_desc(phba, tlv); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, "4623 Xmit EDC to remote " "NPORT x%x reg_sig x%x reg_fpin:x%x\n", ndlp->nlp_DID, phba->cgn_reg_signal, phba->cgn_reg_fpin); elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return -EIO; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue EDC: did:x%x refcnt %d", ndlp->nlp_DID, kref_read(&ndlp->kref), 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { /* The additional lpfc_nlp_put will cause the following * lpfc_els_free_iocb routine to trigger the rlease of * the node. */ lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); goto try_rdf; } return 0; try_rdf: phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; rc = lpfc_issue_els_rdf(vport, 0); return rc; } /** * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry * @vport: pointer to a host virtual N_Port data structure. * @nlp: pointer to a node-list data structure. * * This routine cancels the timer with a delayed IOCB-command retry for * a @vport's @ndlp. It stops the timer for the delayed function retrial and * removes the ELS retry event if it presents. In addition, if the * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB * commands are sent for the @vport's nodes that require issuing discovery * ADISC. **/ void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_work_evt *evtp; if (!(nlp->nlp_flag & NLP_DELAY_TMO)) return; spin_lock_irq(&nlp->lock); nlp->nlp_flag &= ~NLP_DELAY_TMO; spin_unlock_irq(&nlp->lock); del_timer_sync(&nlp->nlp_delayfunc); nlp->nlp_last_elscmd = 0; if (!list_empty(&nlp->els_retry_evt.evt_listp)) { list_del_init(&nlp->els_retry_evt.evt_listp); /* Decrement nlp reference count held for the delayed retry */ evtp = &nlp->els_retry_evt; lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); } if (nlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(&nlp->lock); nlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(&nlp->lock); if (vport->num_disc_nodes) { if (vport->port_state < LPFC_VPORT_READY) { /* Check if there are more ADISCs to be sent */ lpfc_more_adisc(vport); } else { /* Check if there are more PLOGIs to be sent */ lpfc_more_plogi(vport); if (vport->num_disc_nodes == 0) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); lpfc_end_rscn(vport); } } } } return; } /** * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer * @t: pointer to the timer function associated data (ndlp). * * This routine is invoked by the ndlp delayed-function timer to check * whether there is any pending ELS retry event(s) with the node. If not, it * simply returns. Otherwise, if there is at least one ELS delayed event, it * adds the delayed events to the HBA work list and invokes the * lpfc_worker_wake_up() routine to wake up worker thread to process the * event. Note that lpfc_nlp_get() is called before posting the event to * the work list to hold reference count of ndlp so that it guarantees the * reference to ndlp will still be available when the worker thread gets * to the event associated with the ndlp. **/ void lpfc_els_retry_delay(struct timer_list *t) { struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); struct lpfc_vport *vport = ndlp->vport; struct lpfc_hba *phba = vport->phba; unsigned long flags; struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; spin_lock_irqsave(&phba->hbalock, flags); if (!list_empty(&evtp->evt_listp)) { spin_unlock_irqrestore(&phba->hbalock, flags); return; } /* We need to hold the node by incrementing the reference * count until the queued work is done */ evtp->evt_arg1 = lpfc_nlp_get(ndlp); if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_ELS_RETRY; list_add_tail(&evtp->evt_listp, &phba->work_list); lpfc_worker_wake_up(phba); } spin_unlock_irqrestore(&phba->hbalock, flags); return; } /** * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function * @ndlp: pointer to a node-list data structure. * * This routine is the worker-thread handler for processing the @ndlp delayed * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves * the last ELS command from the associated ndlp and invokes the proper ELS * function according to the delayed ELS command to retry the command. **/ void lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) { struct lpfc_vport *vport = ndlp->vport; uint32_t cmd, retry; spin_lock_irq(&ndlp->lock); cmd = ndlp->nlp_last_elscmd; ndlp->nlp_last_elscmd = 0; if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { spin_unlock_irq(&ndlp->lock); return; } ndlp->nlp_flag &= ~NLP_DELAY_TMO; spin_unlock_irq(&ndlp->lock); /* * If a discovery event readded nlp_delayfunc after timer * firing and before processing the timer, cancel the * nlp_delayfunc. */ del_timer_sync(&ndlp->nlp_delayfunc); retry = ndlp->nlp_retry; ndlp->nlp_retry = 0; switch (cmd) { case ELS_CMD_FLOGI: lpfc_issue_els_flogi(vport, ndlp, retry); break; case ELS_CMD_PLOGI: if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); } break; case ELS_CMD_ADISC: if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); } break; case ELS_CMD_PRLI: case ELS_CMD_NVMEPRLI: if (!lpfc_issue_els_prli(vport, ndlp, retry)) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); } break; case ELS_CMD_LOGO: if (!lpfc_issue_els_logo(vport, ndlp, retry)) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); } break; case ELS_CMD_FDISC: if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) lpfc_issue_els_fdisc(vport, ndlp, retry); break; } return; } /** * lpfc_link_reset - Issue link reset * @vport: pointer to a virtual N_Port data structure. * * This routine performs link reset by sending INIT_LINK mailbox command. * For SLI-3 adapter, link attention interrupt is enabled before issuing * INIT_LINK mailbox command. * * Return code * 0 - Link reset initiated successfully * 1 - Failed to initiate link reset **/ int lpfc_link_reset(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; uint32_t control; int rc; lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "2851 Attempt link reset\n"); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2852 Failed to allocate mbox memory"); return 1; } /* Enable Link attention interrupts */ if (phba->sli_rev <= LPFC_SLI_REV3) { spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); } lpfc_init_link(phba, mbox, phba->cfg_topology, phba->cfg_link_speed); mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2853 Failed to issue INIT_LINK " "mbox command, rc:x%x\n", rc); mempool_free(mbox, phba->mbox_mem_pool); return 1; } return 0; } /** * lpfc_els_retry - Make retry decision on an els command iocb * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine makes a retry decision on an ELS command IOCB, which has * failed. The following ELS IOCBs use this function for retrying the command * when previously issued command responsed with error status: FLOGI, PLOGI, * PRLI, ADISC and FDISC. Based on the ELS command type and the * returned error status, it makes the decision whether a retry shall be * issued for the command, and whether a retry shall be made immediately or * delayed. In the former case, the corresponding ELS command issuing-function * is called to retry the command. In the later case, the ELS command shall * be posted to the ndlp delayed event and delayed function timer set to the * ndlp for the delayed command issusing. * * Return code * 0 - No retry of els command is made * 1 - Immediate or delayed retry of els command is made **/ static int lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; union lpfc_wqe128 *irsp = &rspiocb->wqe; struct lpfc_nodelist *ndlp = cmdiocb->ndlp; struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; uint32_t *elscmd; struct ls_rjt stat; int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; int logerr = 0; uint32_t cmd = 0; uint32_t did; int link_reset = 0, rc; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); /* Note: cmd_dmabuf may be 0 for internal driver abort * of delays ELS command. */ if (pcmd && pcmd->virt) { elscmd = (uint32_t *) (pcmd->virt); cmd = *elscmd++; } if (ndlp) did = ndlp->nlp_DID; else { /* We should only hit this case for retrying PLOGI */ did = get_job_els_rsp64_did(phba, rspiocb); ndlp = lpfc_findnode_did(vport, did); if (!ndlp && (cmd != ELS_CMD_PLOGI)) return 0; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Retry ELS: wd7:x%x wd4:x%x did:x%x", *(((uint32_t *)irsp) + 7), ulp_word4, did); switch (ulp_status) { case IOSTAT_FCP_RSP_ERROR: break; case IOSTAT_REMOTE_STOP: if (phba->sli_rev == LPFC_SLI_REV4) { /* This IO was aborted by the target, we don't * know the rxid and because we did not send the * ABTS we cannot generate and RRQ. */ lpfc_set_rrq_active(phba, ndlp, cmdiocb->sli4_lxritag, 0, 0); } break; case IOSTAT_LOCAL_REJECT: switch ((ulp_word4 & IOERR_PARAM_MASK)) { case IOERR_LOOP_OPEN_FAILURE: if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) delay = 1000; retry = 1; break; case IOERR_ILLEGAL_COMMAND: lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0124 Retry illegal cmd x%x " "retry:x%x delay:x%x\n", cmd, cmdiocb->retry, delay); retry = 1; /* All command's retry policy */ maxretry = 8; if (cmdiocb->retry > 2) delay = 1000; break; case IOERR_NO_RESOURCES: logerr = 1; /* HBA out of resources */ retry = 1; if (cmdiocb->retry > 100) delay = 100; maxretry = 250; break; case IOERR_ILLEGAL_FRAME: delay = 100; retry = 1; break; case IOERR_INVALID_RPI: if (cmd == ELS_CMD_PLOGI && did == NameServer_DID) { /* Continue forever if plogi to */ /* the nameserver fails */ maxretry = 0; delay = 100; } else if (cmd == ELS_CMD_PRLI && ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { /* State-command disagreement. The PRLI was * failed with an invalid rpi meaning there * some unexpected state change. Don't retry. */ maxretry = 0; retry = 0; break; } retry = 1; break; case IOERR_SEQUENCE_TIMEOUT: if (cmd == ELS_CMD_PLOGI && did == NameServer_DID && (cmdiocb->retry + 1) == maxretry) { /* Reset the Link */ link_reset = 1; break; } retry = 1; delay = 100; break; case IOERR_SLI_ABORTED: /* Retry ELS PLOGI command? * Possibly the rport just wasn't ready. */ if (cmd == ELS_CMD_PLOGI) { /* No retry if state change */ if (ndlp && ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) goto out_retry; retry = 1; maxretry = 2; } break; } break; case IOSTAT_NPORT_RJT: case IOSTAT_FABRIC_RJT: if (ulp_word4 & RJT_UNAVAIL_TEMP) { retry = 1; break; } break; case IOSTAT_NPORT_BSY: case IOSTAT_FABRIC_BSY: logerr = 1; /* Fabric / Remote NPort out of resources */ retry = 1; break; case IOSTAT_LS_RJT: stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); /* Added for Vendor specifc support * Just keep retrying for these Rsn / Exp codes */ if ((vport->fc_flag & FC_PT2PT) && cmd == ELS_CMD_NVMEPRLI) { switch (stat.un.b.lsRjtRsnCode) { case LSRJT_UNABLE_TPC: case LSRJT_INVALID_CMD: case LSRJT_LOGICAL_ERR: case LSRJT_CMD_UNSUPPORTED: lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, "0168 NVME PRLI LS_RJT " "reason %x port doesn't " "support NVME, disabling NVME\n", stat.un.b.lsRjtRsnCode); retry = 0; vport->fc_flag |= FC_PT2PT_NO_NVME; goto out_retry; } } switch (stat.un.b.lsRjtRsnCode) { case LSRJT_UNABLE_TPC: /* Special case for PRLI LS_RJTs. Recall that lpfc * uses a single routine to issue both PRLI FC4 types. * If the PRLI is rejected because that FC4 type * isn't really supported, don't retry and cause * multiple transport registrations. Otherwise, parse * the reason code/reason code explanation and take the * appropriate action. */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS | LOG_NODE, "0153 ELS cmd x%x LS_RJT by x%x. " "RsnCode x%x RsnCodeExp x%x\n", cmd, did, stat.un.b.lsRjtRsnCode, stat.un.b.lsRjtRsnCodeExp); switch (stat.un.b.lsRjtRsnCodeExp) { case LSEXP_CANT_GIVE_DATA: case LSEXP_CMD_IN_PROGRESS: if (cmd == ELS_CMD_PLOGI) { delay = 1000; maxretry = 48; } retry = 1; break; case LSEXP_REQ_UNSUPPORTED: case LSEXP_NO_RSRC_ASSIGN: /* These explanation codes get no retry. */ if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) break; fallthrough; default: /* Limit the delay and retry action to a limited * cmd set. There are other ELS commands where * a retry is not expected. */ if (cmd == ELS_CMD_PLOGI || cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) { delay = 1000; maxretry = lpfc_max_els_tries + 1; retry = 1; } break; } if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (cmd == ELS_CMD_FDISC) && (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0125 FDISC Failed (x%x). " "Fabric out of resources\n", stat.un.lsRjtError); lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_RSCS); } break; case LSRJT_LOGICAL_BSY: if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_PRLI) || (cmd == ELS_CMD_NVMEPRLI)) { delay = 1000; maxretry = 48; } else if (cmd == ELS_CMD_FDISC) { /* FDISC retry policy */ maxretry = 48; if (cmdiocb->retry >= 32) delay = 1000; } retry = 1; break; case LSRJT_LOGICAL_ERR: /* There are some cases where switches return this * error when they are not ready and should be returning * Logical Busy. We should delay every time. */ if (cmd == ELS_CMD_FDISC && stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { maxretry = 3; delay = 1000; retry = 1; } else if (cmd == ELS_CMD_FLOGI && stat.un.b.lsRjtRsnCodeExp == LSEXP_NOTHING_MORE) { vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; retry = 1; lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0820 FLOGI Failed (x%x). " "BBCredit Not Supported\n", stat.un.lsRjtError); } break; case LSRJT_PROTOCOL_ERR: if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (cmd == ELS_CMD_FDISC) && ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) ) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0122 FDISC Failed (x%x). " "Fabric Detected Bad WWN\n", stat.un.lsRjtError); lpfc_vport_set_state(vport, FC_VPORT_FABRIC_REJ_WWN); } break; case LSRJT_VENDOR_UNIQUE: if ((stat.un.b.vendorUnique == 0x45) && (cmd == ELS_CMD_FLOGI)) { goto out_retry; } break; case LSRJT_CMD_UNSUPPORTED: /* lpfc nvmet returns this type of LS_RJT when it * receives an FCP PRLI because lpfc nvmet only * support NVME. ELS request is terminated for FCP4 * on this rport. */ if (stat.un.b.lsRjtRsnCodeExp == LSEXP_REQ_UNSUPPORTED) { if (cmd == ELS_CMD_PRLI) goto out_retry; } break; } break; case IOSTAT_INTERMED_RSP: case IOSTAT_BA_RJT: break; default: break; } if (link_reset) { rc = lpfc_link_reset(vport); if (rc) { /* Do not give up. Retry PLOGI one more time and attempt * link reset if PLOGI fails again. */ retry = 1; delay = 100; goto out_retry; } return 1; } if (did == FDMI_DID) retry = 1; if ((cmd == ELS_CMD_FLOGI) && (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { /* FLOGI retry policy */ retry = 1; /* retry FLOGI forever */ if (phba->link_flag != LS_LOOPBACK_MODE) maxretry = 0; else maxretry = 2; if (cmdiocb->retry >= 100) delay = 5000; else if (cmdiocb->retry >= 32) delay = 1000; } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { /* retry FDISCs every second up to devloss */ retry = 1; maxretry = vport->cfg_devloss_tmo; delay = 1000; } cmdiocb->retry++; if (maxretry && (cmdiocb->retry >= maxretry)) { phba->fc_stat.elsRetryExceeded++; retry = 0; } if ((vport->load_flag & FC_UNLOADING) != 0) retry = 0; out_retry: if (retry) { if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { /* Stop retrying PLOGI and FDISC if in FCF discovery */ if (phba->fcf.fcf_flag & FCF_DISCOVERY) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2849 Stop retry ELS command " "x%x to remote NPORT x%x, " "Data: x%x x%x\n", cmd, did, cmdiocb->retry, delay); return 0; } } /* Retry ELS command <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0107 Retry ELS command x%x to remote " "NPORT x%x Data: x%x x%x\n", cmd, did, cmdiocb->retry, delay); if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && ((ulp_status != IOSTAT_LOCAL_REJECT) || ((ulp_word4 & IOERR_PARAM_MASK) != IOERR_NO_RESOURCES))) { /* Don't reset timer for no resources */ /* If discovery / RSCN timer is running, reset it */ if (timer_pending(&vport->fc_disctmo) || (vport->fc_flag & FC_RSCN_MODE)) lpfc_set_disctmo(vport); } phba->fc_stat.elsXmitRetry++; if (ndlp && delay) { phba->fc_stat.elsDelayRetry++; ndlp->nlp_retry = cmdiocb->retry; /* delay is specified in milliseconds */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(delay)); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(&ndlp->lock); ndlp->nlp_prev_state = ndlp->nlp_state; if ((cmd == ELS_CMD_PRLI) || (cmd == ELS_CMD_NVMEPRLI)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); else if (cmd != ELS_CMD_ADISC) lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); ndlp->nlp_last_elscmd = cmd; return 1; } switch (cmd) { case ELS_CMD_FLOGI: lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); return 1; case ELS_CMD_FDISC: lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); return 1; case ELS_CMD_PLOGI: if (ndlp) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); } lpfc_issue_els_plogi(vport, did, cmdiocb->retry); return 1; case ELS_CMD_ADISC: ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); return 1; case ELS_CMD_PRLI: case ELS_CMD_NVMEPRLI: ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); return 1; case ELS_CMD_LOGO: ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); return 1; } } /* No retry ELS command <elsCmd> to remote NPORT <did> */ if (logerr) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0137 No retry ELS command x%x to remote " "NPORT x%x: Out of Resources: Error:x%x/%x\n", cmd, did, ulp_status, ulp_word4); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0108 No retry ELS command x%x to remote " "NPORT x%x Retried:%d Error:x%x/%x\n", cmd, did, cmdiocb->retry, ulp_status, ulp_word4); } return 0; } /** * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb * @phba: pointer to lpfc hba data structure. * @buf_ptr1: pointer to the lpfc DMA buffer data structure. * * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) * associated with a command IOCB back to the lpfc DMA buffer pool. It first * checks to see whether there is a lpfc DMA buffer associated with the * response of the command IOCB. If so, it will be released before releasing * the lpfc DMA buffer associated with the IOCB itself. * * Return code * 0 - Successfully released lpfc DMA buffer (currently, always return 0) **/ static int lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) { struct lpfc_dmabuf *buf_ptr; /* Free the response before processing the command. */ if (!list_empty(&buf_ptr1->list)) { list_remove_head(&buf_ptr1->list, buf_ptr, struct lpfc_dmabuf, list); lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); } lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); kfree(buf_ptr1); return 0; } /** * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl * @phba: pointer to lpfc hba data structure. * @buf_ptr: pointer to the lpfc dma buffer data structure. * * This routine releases the lpfc Direct Memory Access (DMA) buffer * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer * pool. * * Return code * 0 - Successfully released lpfc DMA buffer (currently, always return 0) **/ static int lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) { lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); return 0; } /** * lpfc_els_free_iocb - Free a command iocb and its associated resources * @phba: pointer to lpfc hba data structure. * @elsiocb: pointer to lpfc els command iocb data structure. * * This routine frees a command IOCB and its associated resources. The * command IOCB data structure contains the reference to various associated * resources, these fields must be set to NULL if the associated reference * not present: * cmd_dmabuf - reference to cmd. * cmd_dmabuf->next - reference to rsp * rsp_dmabuf - unused * bpl_dmabuf - reference to bpl * * It first properly decrements the reference count held on ndlp for the * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not * set, it invokes the lpfc_els_free_data() routine to release the Direct * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it * adds the DMA buffer the @phba data structure for the delayed release. * If reference to the Buffer Pointer List (BPL) is present, the * lpfc_els_free_bpl() routine is invoked to release the DMA memory * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is * invoked to release the IOCB data structure back to @phba IOCBQ list. * * Return code * 0 - Success (currently, always return 0) **/ int lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) { struct lpfc_dmabuf *buf_ptr, *buf_ptr1; /* The I/O iocb is complete. Clear the node and first dmbuf */ elsiocb->ndlp = NULL; /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ if (elsiocb->cmd_dmabuf) { if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { /* Firmware could still be in progress of DMAing * payload, so don't free data buffer till after * a hbeat. */ elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; buf_ptr = elsiocb->cmd_dmabuf; elsiocb->cmd_dmabuf = NULL; if (buf_ptr) { buf_ptr1 = NULL; spin_lock_irq(&phba->hbalock); if (!list_empty(&buf_ptr->list)) { list_remove_head(&buf_ptr->list, buf_ptr1, struct lpfc_dmabuf, list); INIT_LIST_HEAD(&buf_ptr1->list); list_add_tail(&buf_ptr1->list, &phba->elsbuf); phba->elsbuf_cnt++; } INIT_LIST_HEAD(&buf_ptr->list); list_add_tail(&buf_ptr->list, &phba->elsbuf); phba->elsbuf_cnt++; spin_unlock_irq(&phba->hbalock); } } else { buf_ptr1 = elsiocb->cmd_dmabuf; lpfc_els_free_data(phba, buf_ptr1); elsiocb->cmd_dmabuf = NULL; } } if (elsiocb->bpl_dmabuf) { buf_ptr = elsiocb->bpl_dmabuf; lpfc_els_free_bpl(phba, buf_ptr); elsiocb->bpl_dmabuf = NULL; } lpfc_sli_release_iocbq(phba, elsiocb); return 0; } /** * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is the completion callback function to the Logout (LOGO) * Accept (ACC) Response ELS command. This routine is invoked to indicate * the completion of the LOGO process. If the node has transitioned to NPR, * this routine unregisters the RPI if it is still registered. The * lpfc_els_free_iocb() is invoked to release the IOCB data structure. **/ static void lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_nodelist *ndlp = cmdiocb->ndlp; struct lpfc_vport *vport = cmdiocb->vport; u32 ulp_status, ulp_word4; ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "ACC LOGO cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, ndlp->nlp_DID); /* ACC to LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0109 ACC to LOGO completes to NPort x%x refcnt %d " "Data: x%x x%x x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); /* This clause allows the LOGO ACC to complete and free resources * for the Fabric Domain Controller. It does deliberately skip * the unreg_rpi and release rpi because some fabrics send RDP * requests after logging out from the initiator. */ if (ndlp->nlp_type & NLP_FABRIC && ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) goto out; if (ndlp->nlp_state == NLP_STE_NPR_NODE) { /* If PLOGI is being retried, PLOGI completion will cleanup the * node. The NLP_NPR_2B_DISC flag needs to be retained to make * progress on nodes discovered from last RSCN. */ if ((ndlp->nlp_flag & NLP_DELAY_TMO) && (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) goto out; if (ndlp->nlp_flag & NLP_RPI_REGISTERED) lpfc_unreg_rpi(vport, ndlp); } out: /* * The driver received a LOGO from the rport and has ACK'd it. * At this point, the driver is done so release the IOCB */ lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } /** * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * This routine is the completion callback function for unregister default * RPI (Remote Port Index) mailbox command to the @phba. It simply releases * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and * decrements the ndlp reference count held for this completion callback * function. After that, it invokes the lpfc_drop_node to check * whether it is appropriate to release the node. **/ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; u32 mbx_flag = pmb->mbox_flag; u32 mbx_cmd = pmb->u.mb.mbxCommand; if (ndlp) { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "0006 rpi x%x DID:%x flg:%x %d x%px " "mbx_cmd x%x mbx_flag x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp, mbx_cmd, mbx_flag, pmb); /* This ends the default/temporary RPI cleanup logic for this * ndlp and the node and rpi needs to be released. Free the rpi * first on an UNREG_LOGIN and then release the final * references. */ spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; if (mbx_cmd == MBX_UNREG_LOGIN) ndlp->nlp_flag &= ~NLP_UNREG_INP; spin_unlock_irq(&ndlp->lock); lpfc_nlp_put(ndlp); lpfc_drop_node(ndlp->vport, ndlp); } lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); } /** * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is the completion callback function for ELS Response IOCB * command. In normal case, this callback function just properly sets the * nlp_flag bitmap in the ndlp data structure, if the mbox command reference * field in the command IOCB is not NULL, the referred mailbox command will * be send out, and then invokes the lpfc_els_free_iocb() routine to release * the IOCB. **/ static void lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_nodelist *ndlp = cmdiocb->ndlp; struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; IOCB_t *irsp; LPFC_MBOXQ_t *mbox = NULL; u32 ulp_status, ulp_word4, tmo, did, iotag; if (!vport) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3177 ELS response failed\n"); goto out; } if (cmdiocb->context_un.mbox) mbox = cmdiocb->context_un.mbox; ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); did = get_job_els_rsp64_did(phba, cmdiocb); if (phba->sli_rev == LPFC_SLI_REV4) { tmo = get_wqe_tmo(cmdiocb); iotag = get_wqe_reqtag(cmdiocb); } else { irsp = &rspiocb->iocb; tmo = irsp->ulpTimeout; iotag = irsp->ulpIoTag; } /* Check to see if link went down during discovery */ if (!ndlp || lpfc_els_chk_latt(vport)) { if (mbox) lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); goto out; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "ELS rsp cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, did); /* ELS response tag <ulpIoTag> completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0110 ELS response tag x%x completes " "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", iotag, ulp_status, ulp_word4, tmo, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); if (mbox) { if (ulp_status == 0 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { if (!lpfc_unreg_rpi(vport, ndlp) && (!(vport->fc_flag & FC_PT2PT))) { if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0314 PLOGI recov " "DID x%x " "Data: x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); goto out_free_mbox; } } /* Increment reference count to ndlp to hold the * reference to ndlp for the callback function. */ mbox->ctx_ndlp = lpfc_nlp_get(ndlp); if (!mbox->ctx_ndlp) goto out_free_mbox; mbox->vport = vport; if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; } else { mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); } ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) != MBX_NOT_FINISHED) goto out; /* Decrement the ndlp reference count we * set for this failed mailbox command. */ lpfc_nlp_put(ndlp); ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; /* ELS rsp: Cannot issue reg_login for <NPortid> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0138 ELS rsp: Cannot issue reg_login for x%x " "Data: x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } out_free_mbox: lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); } out: if (ndlp && shost) { spin_lock_irq(&ndlp->lock); if (mbox) ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; spin_unlock_irq(&ndlp->lock); } /* An SLI4 NPIV instance wants to drop the node at this point under * these conditions and release the RPI. */ if (phba->sli_rev == LPFC_SLI_REV4 && vport && vport->port_type == LPFC_NPIV_PORT && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { if (ndlp->nlp_flag & NLP_RELEASE_RPI) { if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); spin_lock_irq(&ndlp->lock); ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; ndlp->nlp_flag &= ~NLP_RELEASE_RPI; spin_unlock_irq(&ndlp->lock); } lpfc_drop_node(vport, ndlp); } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { /* Drop ndlp if there is no planned or outstanding * issued PRLI. * * In cases when the ndlp is acting as both an initiator * and target function, let our issued PRLI determine * the final ndlp kref drop. */ lpfc_drop_node(vport, ndlp); } } /* Release the originating I/O reference. */ lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); return; } /** * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command * @vport: pointer to a host virtual N_Port data structure. * @flag: the els command code to be accepted. * @oldiocb: pointer to the original lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * @mbox: pointer to the driver internal queue element for mailbox command. * * This routine prepares and issues an Accept (ACC) response IOCB * command. It uses the @flag to properly set up the IOCB field for the * specific ACC response command to be issued and invokes the * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a * @mbox pointer is passed in, it will be put into the context_un.mbox * field of the IOCB for the completion callback function to issue the * mailbox command to the HBA later when callback is invoked. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the corresponding * response ELS IOCB command. * * Return code * 0 - Successfully issued acc response * 1 - Failed to issue acc response **/ int lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) { struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; IOCB_t *oldcmd; union lpfc_wqe128 *wqe; union lpfc_wqe128 *oldwqe = &oldiocb->wqe; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; struct serv_parm *sp; uint16_t cmdsize; int rc; ELS_PKT *els_pkt_ptr; struct fc_els_rdf_resp *rdf_resp; switch (flag) { case ELS_CMD_ACC: cmdsize = sizeof(uint32_t); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_LOGO_ACC; spin_unlock_irq(&ndlp->lock); return 1; } if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; /* XRI / rx_id */ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, bf_get(wqe_ctxt_tag, &oldwqe->xmit_els_rsp.wqe_com)); /* oxid */ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, bf_get(wqe_rcvoxid, &oldwqe->xmit_els_rsp.wqe_com)); } else { icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; } pcmd = elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_ACC; pcmd += sizeof(uint32_t); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Issue ACC: did:x%x flg:x%x", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_FLOGI: case ELS_CMD_PLOGI: cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) return 1; if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; /* XRI / rx_id */ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, bf_get(wqe_ctxt_tag, &oldwqe->xmit_els_rsp.wqe_com)); /* oxid */ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, bf_get(wqe_rcvoxid, &oldwqe->xmit_els_rsp.wqe_com)); } else { icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; } pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; if (mbox) elsiocb->context_un.mbox = mbox; *((uint32_t *) (pcmd)) = ELS_CMD_ACC; pcmd += sizeof(uint32_t); sp = (struct serv_parm *)pcmd; if (flag == ELS_CMD_FLOGI) { /* Copy the received service parameters back */ memcpy(sp, &phba->fc_fabparam, sizeof(struct serv_parm)); /* Clear the F_Port bit */ sp->cmn.fPort = 0; /* Mark all class service parameters as invalid */ sp->cls1.classValid = 0; sp->cls2.classValid = 0; sp->cls3.classValid = 0; sp->cls4.classValid = 0; /* Copy our worldwide names */ memcpy(&sp->portName, &vport->fc_sparam.portName, sizeof(struct lpfc_name)); memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); } else { memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); sp->cmn.valid_vendor_ver_level = 0; memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); sp->cmn.bbRcvSizeMsb &= 0xF; /* If our firmware supports this feature, convey that * info to the target using the vendor specific field. */ if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { sp->cmn.valid_vendor_ver_level = 1; sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); } } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_PRLO: cmdsize = sizeof(uint32_t) + sizeof(PRLO); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); if (!elsiocb) return 1; if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; /* XRI / rx_id */ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, bf_get(wqe_ctxt_tag, &oldwqe->xmit_els_rsp.wqe_com)); /* oxid */ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, bf_get(wqe_rcvoxid, &oldwqe->xmit_els_rsp.wqe_com)); } else { icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; } pcmd = (u8 *) elsiocb->cmd_dmabuf->virt; memcpy(pcmd, oldiocb->cmd_dmabuf->virt, sizeof(uint32_t) + sizeof(PRLO)); *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; els_pkt_ptr = (ELS_PKT *) pcmd; els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Issue ACC PRLO: did:x%x flg:x%x", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_RDF: cmdsize = sizeof(*rdf_resp); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) return 1; if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; /* XRI / rx_id */ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, bf_get(wqe_ctxt_tag, &oldwqe->xmit_els_rsp.wqe_com)); /* oxid */ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, bf_get(wqe_rcvoxid, &oldwqe->xmit_els_rsp.wqe_com)); } else { icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; } pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; rdf_resp = (struct fc_els_rdf_resp *)pcmd; memset(rdf_resp, 0, sizeof(*rdf_resp)); rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; /* FC-LS-5 specifies desc_list_len shall be set to 12 */ rdf_resp->desc_list_len = cpu_to_be32(12); /* FC-LS-5 specifies LS REQ Information descriptor */ rdf_resp->lsri.desc_tag = cpu_to_be32(1); rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; break; default: return 1; } if (ndlp->nlp_flag & NLP_LOGO_ACC) { spin_lock_irq(&ndlp->lock); if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) ndlp->nlp_flag &= ~NLP_LOGO_ACC; spin_unlock_irq(&ndlp->lock); elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; } else { elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; } phba->fc_stat.elsXmitACC++; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } /* Xmit ELS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " "RPI: x%x, fc_flag x%x refcnt %d\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); return 0; } /** * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command * @vport: pointer to a virtual N_Port data structure. * @rejectError: reject response to issue * @oldiocb: pointer to the original lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * @mbox: pointer to the driver internal queue element for mailbox command. * * This routine prepares and issue an Reject (RJT) response IOCB * command. If a @mbox pointer is passed in, it will be put into the * context_un.mbox field of the IOCB for the completion callback function * to issue to the HBA later. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the reject response * ELS IOCB command. * * Return code * 0 - Successfully issued reject response * 1 - Failed to issue reject response **/ int lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) { int rc; struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; IOCB_t *oldcmd; union lpfc_wqe128 *wqe; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; cmdsize = 2 * sizeof(uint32_t); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); if (!elsiocb) return 1; if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, get_job_rcvoxid(phba, oldiocb)); } else { icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; } pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; pcmd += sizeof(uint32_t); *((uint32_t *) (pcmd)) = rejectError; if (mbox) elsiocb->context_un.mbox = mbox; /* Xmit ELS RJT <err> response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0129 Xmit ELS RJT x%x response tag x%x " "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " "rpi x%x\n", rejectError, elsiocb->iotag, get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Issue LS_RJT: did:x%x flg:x%x err:x%x", ndlp->nlp_DID, ndlp->nlp_flag, rejectError); phba->fc_stat.elsXmitLSRJT++; elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } /* The NPIV instance is rejecting this unsolicited ELS. Make sure the * node's assigned RPI gets released provided this node is not already * registered with the transport. */ if (phba->sli_rev == LPFC_SLI_REV4 && vport->port_type == LPFC_NPIV_PORT && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_RELEASE_RPI; spin_unlock_irq(&ndlp->lock); } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } return 0; } /** * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to the original lpfc command iocb data structure. * @ndlp: NPort to where rsp is directed * * This routine issues an EDC ACC RSP to the F-Port Controller to communicate * this N_Port's support of hardware signals in its Congestion * Capabilities Descriptor. * * Return code * 0 - Successfully issued edc rsp command * 1 - Failed to issue edc rsp command **/ static int lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; struct fc_els_edc_resp *edc_rsp; struct fc_tlv_desc *tlv; struct lpfc_iocbq *elsiocb; IOCB_t *icmd, *cmd; union lpfc_wqe128 *wqe; u32 cgn_desc_size, lft_desc_size; u16 cmdsize; uint8_t *pcmd; int rc; cmdsize = sizeof(struct fc_els_edc_resp); cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? sizeof(struct fc_diag_lnkflt_desc) : 0; cmdsize += cgn_desc_size + lft_desc_size; elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) return 1; if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, get_job_rcvoxid(phba, cmdiocb)); } else { icmd = &elsiocb->iocb; cmd = &cmdiocb->iocb; icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; } pcmd = elsiocb->cmd_dmabuf->virt; memset(pcmd, 0, cmdsize); edc_rsp = (struct fc_els_edc_resp *)pcmd; edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC; edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) + cgn_desc_size + lft_desc_size); edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); edc_rsp->lsri.desc_len = cpu_to_be32( FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); edc_rsp->lsri.rqst_w0.cmd = ELS_EDC; tlv = edc_rsp->desc; lpfc_format_edc_cgn_desc(phba, tlv); tlv = fc_tlv_next_desc(tlv); if (lft_desc_size) lpfc_format_edc_lft_desc(phba, tlv); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Issue EDC ACC: did:x%x flg:x%x refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } /* Xmit ELS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " "RPI: x%x, fc_flag x%x\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi, vport->fc_flag); return 0; } /** * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd * @vport: pointer to a virtual N_Port data structure. * @oldiocb: pointer to the original lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine prepares and issues an Accept (ACC) response to Address * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB * and invokes the lpfc_sli_issue_iocb() routine to send out the command. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the ADISC Accept response * ELS IOCB command. * * Return code * 0 - Successfully issued acc adisc response * 1 - Failed to issue adisc acc response **/ int lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; ADISC *ap; IOCB_t *icmd, *oldcmd; union lpfc_wqe128 *wqe; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; int rc; u32 ulp_context; cmdsize = sizeof(uint32_t) + sizeof(ADISC); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) return 1; if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; /* XRI / rx_id */ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, get_job_ulpcontext(phba, oldiocb)); ulp_context = get_job_ulpcontext(phba, elsiocb); /* oxid */ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, get_job_rcvoxid(phba, oldiocb)); } else { icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ ulp_context = elsiocb->iocb.ulpContext; icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; } /* Xmit ADISC ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0130 Xmit ADISC ACC response iotag x%x xri: " "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_ACC; pcmd += sizeof(uint32_t); ap = (ADISC *) (pcmd); ap->hardAL_PA = phba->fc_pref_ALPA; memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); ap->DID = be32_to_cpu(vport->fc_myDID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } return 0; } /** * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd * @vport: pointer to a virtual N_Port data structure. * @oldiocb: pointer to the original lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine prepares and issues an Accept (ACC) response to Process * Login (PRLI) ELS command. It simply prepares the payload of the IOCB * and invokes the lpfc_sli_issue_iocb() routine to send out the command. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the PRLI Accept response * ELS IOCB command. * * Return code * 0 - Successfully issued acc prli response * 1 - Failed to issue acc prli response **/ int lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; PRLI *npr; struct lpfc_nvme_prli *npr_nvme; lpfc_vpd_t *vpd; IOCB_t *icmd; IOCB_t *oldcmd; union lpfc_wqe128 *wqe; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; uint32_t prli_fc4_req, *req_payload; struct lpfc_dmabuf *req_buf; int rc; u32 elsrspcmd, ulp_context; /* Need the incoming PRLI payload to determine if the ACC is for an * FC4 or NVME PRLI type. The PRLI type is at word 1. */ req_buf = oldiocb->cmd_dmabuf; req_payload = (((uint32_t *)req_buf->virt) + 1); /* PRLI type payload is at byte 3 for FCP or NVME. */ prli_fc4_req = be32_to_cpu(*req_payload); prli_fc4_req = (prli_fc4_req >> 24) & 0xff; lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", prli_fc4_req, *((uint32_t *)req_payload)); if (prli_fc4_req == PRLI_FCP_TYPE) { cmdsize = sizeof(uint32_t) + sizeof(PRLI); elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); } else if (prli_fc4_req == PRLI_NVME_TYPE) { cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); } else { return 1; } elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, elsrspcmd); if (!elsiocb) return 1; if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ ulp_context = get_job_ulpcontext(phba, elsiocb); bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, get_job_rcvoxid(phba, oldiocb)); } else { icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ ulp_context = elsiocb->iocb.ulpContext; icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; } /* Xmit PRLI ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0131 Xmit PRLI ACC response tag x%x xri x%x, " "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; memset(pcmd, 0, cmdsize); *((uint32_t *)(pcmd)) = elsrspcmd; pcmd += sizeof(uint32_t); /* For PRLI, remainder of payload is PRLI parameter page */ vpd = &phba->vpd; if (prli_fc4_req == PRLI_FCP_TYPE) { /* * If the remote port is a target and our firmware version * is 3.20 or later, set the following bits for FC-TAPE * support. */ npr = (PRLI *) pcmd; if ((ndlp->nlp_type & NLP_FCP_TARGET) && (vpd->rev.feaLevelHigh >= 0x02)) { npr->ConfmComplAllowed = 1; npr->Retry = 1; npr->TaskRetryIdReq = 1; } npr->acceptRspCode = PRLI_REQ_EXECUTED; /* Set image pair for complementary pairs only. */ if (ndlp->nlp_type & NLP_FCP_TARGET) npr->estabImagePair = 1; else npr->estabImagePair = 0; npr->readXferRdyDis = 1; npr->ConfmComplAllowed = 1; npr->prliType = PRLI_FCP_TYPE; npr->initiatorFunc = 1; /* Xmit PRLI ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, "6014 FCP issue PRLI ACC imgpair %d " "retry %d task %d\n", npr->estabImagePair, npr->Retry, npr->TaskRetryIdReq); } else if (prli_fc4_req == PRLI_NVME_TYPE) { /* Respond with an NVME PRLI Type */ npr_nvme = (struct lpfc_nvme_prli *) pcmd; bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); if (phba->nvmet_support) { bf_set(prli_tgt, npr_nvme, 1); bf_set(prli_disc, npr_nvme, 1); if (phba->cfg_nvme_enable_fb) { bf_set(prli_fba, npr_nvme, 1); /* TBD. Target mode needs to post buffers * that support the configured first burst * byte size. */ bf_set(prli_fb_sz, npr_nvme, phba->cfg_nvmet_fb_size); } } else { bf_set(prli_init, npr_nvme, 1); } lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6015 NVME issue PRLI ACC word1 x%08x " "word4 x%08x word5 x%08x flag x%x, " "fcp_info x%x nlp_type x%x\n", npr_nvme->word1, npr_nvme->word4, npr_nvme->word5, ndlp->nlp_flag, ndlp->nlp_fcp_info, ndlp->nlp_type); npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); } else lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", prli_fc4_req, ndlp->nlp_fc4_type, ndlp->nlp_DID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Issue ACC PRLI: did:x%x flg:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } return 0; } /** * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command * @vport: pointer to a virtual N_Port data structure. * @format: rnid command format. * @oldiocb: pointer to the original lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine issues a Request Node Identification Data (RNID) Accept * (ACC) response. It constructs the RNID ACC response command according to * the proper @format and then calls the lpfc_sli_issue_iocb() routine to * issue the response. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function. * * Return code * 0 - Successfully issued acc rnid response * 1 - Failed to issue acc rnid response **/ static int lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; RNID *rn; IOCB_t *icmd, *oldcmd; union lpfc_wqe128 *wqe; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; int rc; u32 ulp_context; cmdsize = sizeof(uint32_t) + sizeof(uint32_t) + (2 * sizeof(struct lpfc_name)); if (format) cmdsize += sizeof(RNID_TOP_DISC); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) return 1; if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ ulp_context = get_job_ulpcontext(phba, elsiocb); bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, get_job_rcvoxid(phba, oldiocb)); } else { icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ ulp_context = elsiocb->iocb.ulpContext; icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; } /* Xmit RNID ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0132 Xmit RNID ACC response tag x%x xri x%x\n", elsiocb->iotag, ulp_context); pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_ACC; pcmd += sizeof(uint32_t); memset(pcmd, 0, sizeof(RNID)); rn = (RNID *) (pcmd); rn->Format = format; rn->CommonLen = (2 * sizeof(struct lpfc_name)); memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); switch (format) { case 0: rn->SpecificLen = 0; break; case RNID_TOPOLOGY_DISC: rn->SpecificLen = sizeof(RNID_TOP_DISC); memcpy(&rn->un.topologyDisc.portName, &vport->fc_portname, sizeof(struct lpfc_name)); rn->un.topologyDisc.unitType = RNID_HBA; rn->un.topologyDisc.physPort = 0; rn->un.topologyDisc.attachedNodes = 0; break; default: rn->CommonLen = 0; rn->SpecificLen = 0; break; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Issue ACC RNID: did:x%x flg:x%x refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } return 0; } /** * lpfc_els_clear_rrq - Clear the rq that this rrq describes. * @vport: pointer to a virtual N_Port data structure. * @iocb: pointer to the lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * Return **/ static void lpfc_els_clear_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; uint8_t *pcmd; struct RRQ *rrq; uint16_t rxid; uint16_t xri; struct lpfc_node_rrq *prrq; pcmd = (uint8_t *)iocb->cmd_dmabuf->virt; pcmd += sizeof(uint32_t); rrq = (struct RRQ *)pcmd; rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); rxid = bf_get(rrq_rxid, rrq); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" " x%x x%x\n", be32_to_cpu(bf_get(rrq_did, rrq)), bf_get(rrq_oxid, rrq), rxid, get_wqe_reqtag(iocb), get_job_ulpcontext(phba, iocb)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) xri = bf_get(rrq_oxid, rrq); else xri = rxid; prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); if (prrq) lpfc_clr_rrq_active(phba, xri, prrq); return; } /** * lpfc_els_rsp_echo_acc - Issue echo acc response * @vport: pointer to a virtual N_Port data structure. * @data: pointer to echo data to return in the accept. * @oldiocb: pointer to the original lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * Return code * 0 - Successfully issued acc echo response * 1 - Failed to issue acc echo response **/ static int lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; IOCB_t *icmd, *oldcmd; union lpfc_wqe128 *wqe; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; int rc; u32 ulp_context; if (phba->sli_rev == LPFC_SLI_REV4) cmdsize = oldiocb->wcqe_cmpl.total_data_placed; else cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; /* The accumulated length can exceed the BPL_SIZE. For * now, use this as the limit */ if (cmdsize > LPFC_BPL_SIZE) cmdsize = LPFC_BPL_SIZE; elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) return 1; if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ ulp_context = get_job_ulpcontext(phba, elsiocb); bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, get_job_rcvoxid(phba, oldiocb)); } else { icmd = &elsiocb->iocb; oldcmd = &oldiocb->iocb; icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ ulp_context = elsiocb->iocb.ulpContext; icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; } /* Xmit ECHO ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2876 Xmit ECHO ACC response tag x%x xri x%x\n", elsiocb->iotag, ulp_context); pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_ACC; pcmd += sizeof(uint32_t); memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } return 0; } /** * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport * @vport: pointer to a host virtual N_Port data structure. * * This routine issues Address Discover (ADISC) ELS commands to those * N_Ports which are in node port recovery state and ADISC has not been issued * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the * lpfc_issue_els_adisc() routine, the per @vport number of discover count * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC * IOCBs quit for later pick up. On the other hand, after walking through * all the ndlps with the @vport and there is none ADISC IOCB issued, the * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is * no more ADISC need to be sent. * * Return code * The number of N_Ports with adisc issued. **/ int lpfc_els_disc_adisc(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp, *next_ndlp; int sentadisc = 0; /* go thru NPR nodes and issue any remaining ELS ADISCs */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state != NLP_STE_NPR_NODE || !(ndlp->nlp_flag & NLP_NPR_ADISC)) continue; spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(&ndlp->lock); if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { /* This node was marked for ADISC but was not picked * for discovery. This is possible if the node was * missing in gidft response. * * At time of marking node for ADISC, we skipped unreg * from backend */ lpfc_nlp_unreg_node(vport, ndlp); lpfc_unreg_rpi(vport, ndlp); continue; } ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); lpfc_issue_els_adisc(vport, ndlp, 0); sentadisc++; vport->num_disc_nodes++; if (vport->num_disc_nodes >= vport->cfg_discovery_threads) { spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_NLP_MORE; spin_unlock_irq(shost->host_lock); break; } } if (sentadisc == 0) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NLP_MORE; spin_unlock_irq(shost->host_lock); } return sentadisc; } /** * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc * @vport: pointer to a host virtual N_Port data structure. * * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports * which are in node port recovery state, with a @vport. Each time an ELS * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, * the per @vport number of discover count (num_disc_nodes) shall be * incremented. If the num_disc_nodes reaches a pre-configured threshold * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for * later pick up. On the other hand, after walking through all the ndlps with * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC * PLOGI need to be sent. * * Return code * The number of N_Ports with plogi issued. **/ int lpfc_els_disc_plogi(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp, *next_ndlp; int sentplogi = 0; /* go thru NPR nodes and issue any remaining ELS PLOGIs */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_NPR_NODE && (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); sentplogi++; vport->num_disc_nodes++; if (vport->num_disc_nodes >= vport->cfg_discovery_threads) { spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_NLP_MORE; spin_unlock_irq(shost->host_lock); break; } } } lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6452 Discover PLOGI %d flag x%x\n", sentplogi, vport->fc_flag); if (sentplogi) { lpfc_set_disctmo(vport); } else { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NLP_MORE; spin_unlock_irq(shost->host_lock); } return sentplogi; } static uint32_t lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, uint32_t word0) { desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); desc->payload.els_req = word0; desc->length = cpu_to_be32(sizeof(desc->payload)); return sizeof(struct fc_rdp_link_service_desc); } static uint32_t lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, uint8_t *page_a0, uint8_t *page_a2) { uint16_t wavelength; uint16_t temperature; uint16_t rx_power; uint16_t tx_bias; uint16_t tx_power; uint16_t vcc; uint16_t flag = 0; struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) &page_a0[SSF_TRANSCEIVER_CODE_B4]; trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) &page_a0[SSF_TRANSCEIVER_CODE_B5]; if ((trasn_code_byte4->fc_sw_laser) || (trasn_code_byte5->fc_sw_laser_sl) || (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); } else if (trasn_code_byte4->fc_lw_laser) { wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | page_a0[SSF_WAVELENGTH_B0]; if (wavelength == SFP_WAVELENGTH_LC1310) flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; if (wavelength == SFP_WAVELENGTH_LL1550) flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; } /* check if its SFP+ */ flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) << SFP_FLAG_CT_SHIFT; /* check if its OPTICAL */ flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? SFP_FLAG_IS_OPTICAL_PORT : 0) << SFP_FLAG_IS_OPTICAL_SHIFT; temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | page_a2[SFF_TEMPERATURE_B0]); vcc = (page_a2[SFF_VCC_B1] << 8 | page_a2[SFF_VCC_B0]); tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | page_a2[SFF_TXPOWER_B0]); tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | page_a2[SFF_TX_BIAS_CURRENT_B0]); rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | page_a2[SFF_RXPOWER_B0]); desc->sfp_info.temperature = cpu_to_be16(temperature); desc->sfp_info.rx_power = cpu_to_be16(rx_power); desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); desc->sfp_info.tx_power = cpu_to_be16(tx_power); desc->sfp_info.vcc = cpu_to_be16(vcc); desc->sfp_info.flags = cpu_to_be16(flag); desc->length = cpu_to_be32(sizeof(desc->sfp_info)); return sizeof(struct fc_rdp_sfp_desc); } static uint32_t lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, READ_LNK_VAR *stat) { uint32_t type; desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; desc->info.port_type = cpu_to_be32(type); desc->info.link_status.link_failure_cnt = cpu_to_be32(stat->linkFailureCnt); desc->info.link_status.loss_of_synch_cnt = cpu_to_be32(stat->lossSyncCnt); desc->info.link_status.loss_of_signal_cnt = cpu_to_be32(stat->lossSignalCnt); desc->info.link_status.primitive_seq_proto_err = cpu_to_be32(stat->primSeqErrCnt); desc->info.link_status.invalid_trans_word = cpu_to_be32(stat->invalidXmitWord); desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); desc->length = cpu_to_be32(sizeof(desc->info)); return sizeof(struct fc_rdp_link_error_status_desc); } static uint32_t lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, struct lpfc_vport *vport) { uint32_t bbCredit; desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); bbCredit = vport->fc_sparam.cmn.bbCreditLsb | (vport->fc_sparam.cmn.bbCreditMsb << 8); desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); } else { desc->bbc_info.attached_port_bbc = 0; } desc->bbc_info.rtt = 0; desc->length = cpu_to_be32(sizeof(desc->bbc_info)); return sizeof(struct fc_rdp_bbc_desc); } static uint32_t lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) flags |= RDP_OET_HIGH_ALARM; if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) flags |= RDP_OET_LOW_ALARM; if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) flags |= RDP_OET_HIGH_WARNING; if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) flags |= RDP_OET_LOW_WARNING; flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); return sizeof(struct fc_rdp_oed_sfp_desc); } static uint32_t lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) flags |= RDP_OET_HIGH_ALARM; if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) flags |= RDP_OET_LOW_ALARM; if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) flags |= RDP_OET_HIGH_WARNING; if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) flags |= RDP_OET_LOW_WARNING; flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); return sizeof(struct fc_rdp_oed_sfp_desc); } static uint32_t lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) flags |= RDP_OET_HIGH_ALARM; if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) flags |= RDP_OET_LOW_ALARM; if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) flags |= RDP_OET_HIGH_WARNING; if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) flags |= RDP_OET_LOW_WARNING; flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); return sizeof(struct fc_rdp_oed_sfp_desc); } static uint32_t lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) flags |= RDP_OET_HIGH_ALARM; if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) flags |= RDP_OET_LOW_ALARM; if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) flags |= RDP_OET_HIGH_WARNING; if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) flags |= RDP_OET_LOW_WARNING; flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); return sizeof(struct fc_rdp_oed_sfp_desc); } static uint32_t lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) flags |= RDP_OET_HIGH_ALARM; if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) flags |= RDP_OET_LOW_ALARM; if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) flags |= RDP_OET_HIGH_WARNING; if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) flags |= RDP_OET_LOW_WARNING; flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); return sizeof(struct fc_rdp_oed_sfp_desc); } static uint32_t lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, uint8_t *page_a0, struct lpfc_vport *vport) { desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); desc->length = cpu_to_be32(sizeof(desc->opd_info)); return sizeof(struct fc_rdp_opd_sfp_desc); } static uint32_t lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) { if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) return 0; desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); desc->info.CorrectedBlocks = cpu_to_be32(stat->fecCorrBlkCount); desc->info.UncorrectableBlocks = cpu_to_be32(stat->fecUncorrBlkCount); desc->length = cpu_to_be32(sizeof(desc->info)); return sizeof(struct fc_fec_rdp_desc); } static uint32_t lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) { uint16_t rdp_cap = 0; uint16_t rdp_speed; desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); switch (phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: rdp_speed = RDP_PS_1GB; break; case LPFC_LINK_SPEED_2GHZ: rdp_speed = RDP_PS_2GB; break; case LPFC_LINK_SPEED_4GHZ: rdp_speed = RDP_PS_4GB; break; case LPFC_LINK_SPEED_8GHZ: rdp_speed = RDP_PS_8GB; break; case LPFC_LINK_SPEED_10GHZ: rdp_speed = RDP_PS_10GB; break; case LPFC_LINK_SPEED_16GHZ: rdp_speed = RDP_PS_16GB; break; case LPFC_LINK_SPEED_32GHZ: rdp_speed = RDP_PS_32GB; break; case LPFC_LINK_SPEED_64GHZ: rdp_speed = RDP_PS_64GB; break; case LPFC_LINK_SPEED_128GHZ: rdp_speed = RDP_PS_128GB; break; case LPFC_LINK_SPEED_256GHZ: rdp_speed = RDP_PS_256GB; break; default: rdp_speed = RDP_PS_UNKNOWN; break; } desc->info.port_speed.speed = cpu_to_be16(rdp_speed); if (phba->lmt & LMT_256Gb) rdp_cap |= RDP_PS_256GB; if (phba->lmt & LMT_128Gb) rdp_cap |= RDP_PS_128GB; if (phba->lmt & LMT_64Gb) rdp_cap |= RDP_PS_64GB; if (phba->lmt & LMT_32Gb) rdp_cap |= RDP_PS_32GB; if (phba->lmt & LMT_16Gb) rdp_cap |= RDP_PS_16GB; if (phba->lmt & LMT_10Gb) rdp_cap |= RDP_PS_10GB; if (phba->lmt & LMT_8Gb) rdp_cap |= RDP_PS_8GB; if (phba->lmt & LMT_4Gb) rdp_cap |= RDP_PS_4GB; if (phba->lmt & LMT_2Gb) rdp_cap |= RDP_PS_2GB; if (phba->lmt & LMT_1Gb) rdp_cap |= RDP_PS_1GB; if (rdp_cap == 0) rdp_cap = RDP_CAP_UNKNOWN; if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) rdp_cap |= RDP_CAP_USER_CONFIGURED; desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); desc->length = cpu_to_be32(sizeof(desc->info)); return sizeof(struct fc_rdp_port_speed_desc); } static uint32_t lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, struct lpfc_vport *vport) { desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); memcpy(desc->port_names.wwnn, &vport->fc_nodename, sizeof(desc->port_names.wwnn)); memcpy(desc->port_names.wwpn, &vport->fc_portname, sizeof(desc->port_names.wwpn)); desc->length = cpu_to_be32(sizeof(desc->port_names)); return sizeof(struct fc_rdp_port_name_desc); } static uint32_t lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); if (vport->fc_flag & FC_FABRIC) { memcpy(desc->port_names.wwnn, &vport->fabric_nodename, sizeof(desc->port_names.wwnn)); memcpy(desc->port_names.wwpn, &vport->fabric_portname, sizeof(desc->port_names.wwpn)); } else { /* Point to Point */ memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, sizeof(desc->port_names.wwnn)); memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, sizeof(desc->port_names.wwpn)); } desc->length = cpu_to_be32(sizeof(desc->port_names)); return sizeof(struct fc_rdp_port_name_desc); } static void lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, int status) { struct lpfc_nodelist *ndlp = rdp_context->ndlp; struct lpfc_vport *vport = ndlp->vport; struct lpfc_iocbq *elsiocb; struct ulp_bde64 *bpl; IOCB_t *icmd; union lpfc_wqe128 *wqe; uint8_t *pcmd; struct ls_rjt *stat; struct fc_rdp_res_frame *rdp_res; uint32_t cmdsize, len; uint16_t *flag_ptr; int rc; u32 ulp_context; if (status != SUCCESS) goto error; /* This will change once we know the true size of the RDP payload */ cmdsize = sizeof(struct fc_rdp_res_frame); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, rdp_context->ndlp, rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) goto free_rdp_context; ulp_context = get_job_ulpcontext(phba, elsiocb); if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; /* ox-id of the frame */ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, rdp_context->ox_id); bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, rdp_context->rx_id); } else { icmd = &elsiocb->iocb; icmd->ulpContext = rdp_context->rx_id; icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; } lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2171 Xmit RDP response tag x%x xri x%x, " "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt; pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); *((uint32_t *) (pcmd)) = ELS_CMD_ACC; /* Update Alarm and Warning */ flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); phba->sfp_alarm |= *flag_ptr; flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); phba->sfp_warning |= *flag_ptr; /* For RDP payload */ len = 8; len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) (len + pcmd), ELS_CMD_RDP); len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), rdp_context->page_a0, rdp_context->page_a2); len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), phba); len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) (len + pcmd), &rdp_context->link_stat); len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) (len + pcmd), vport); len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) (len + pcmd), vport, ndlp); len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), &rdp_context->link_stat); len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), &rdp_context->link_stat, vport); len += lpfc_rdp_res_oed_temp_desc(phba, (struct fc_rdp_oed_sfp_desc *)(len + pcmd), rdp_context->page_a2); len += lpfc_rdp_res_oed_voltage_desc(phba, (struct fc_rdp_oed_sfp_desc *)(len + pcmd), rdp_context->page_a2); len += lpfc_rdp_res_oed_txbias_desc(phba, (struct fc_rdp_oed_sfp_desc *)(len + pcmd), rdp_context->page_a2); len += lpfc_rdp_res_oed_txpower_desc(phba, (struct fc_rdp_oed_sfp_desc *)(len + pcmd), rdp_context->page_a2); len += lpfc_rdp_res_oed_rxpower_desc(phba, (struct fc_rdp_oed_sfp_desc *)(len + pcmd), rdp_context->page_a2); len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), rdp_context->page_a0, vport); rdp_res->length = cpu_to_be32(len - 8); elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; /* Now that we know the true size of the payload, update the BPL */ bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt; bpl->tus.f.bdeSize = len; bpl->tus.f.bdeFlags = 0; bpl->tus.w = le32_to_cpu(bpl->tus.w); phba->fc_stat.elsXmitACC++; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); goto free_rdp_context; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); } goto free_rdp_context; error: cmdsize = 2 * sizeof(uint32_t); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); if (!elsiocb) goto free_rdp_context; if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; /* ox-id of the frame */ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, rdp_context->ox_id); bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, rdp_context->rx_id); } else { icmd = &elsiocb->iocb; icmd->ulpContext = rdp_context->rx_id; icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; } pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; phba->fc_stat.elsXmitLSRJT++; elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); goto free_rdp_context; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); } free_rdp_context: /* This reference put is for the original unsolicited RDP. If the * prep failed, there is no reference to remove. */ lpfc_nlp_put(ndlp); kfree(rdp_context); } static int lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) { LPFC_MBOXQ_t *mbox = NULL; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, "7105 failed to allocate mailbox memory"); return 1; } if (lpfc_sli4_dump_page_a0(phba, mbox)) goto rdp_fail; mbox->vport = rdp_context->ndlp->vport; mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); return 1; } return 0; rdp_fail: mempool_free(mbox, phba->mbox_mem_pool); return 1; } int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) { LPFC_MBOXQ_t *mbox = NULL; int rc; struct lpfc_dmabuf *mp; struct lpfc_dmabuf *mpsave; void *virt; MAILBOX_t *mb; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, "7205 failed to allocate mailbox memory"); return 1; } if (lpfc_sli4_dump_page_a0(phba, mbox)) goto sfp_fail; mp = mbox->ctx_buf; mpsave = mp; virt = mp->virt; if (phba->sli_rev < LPFC_SLI_REV4) { mb = &mbox->u.mb; mb->un.varDmp.cv = 1; mb->un.varDmp.co = 1; mb->un.varWords[2] = 0; mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4; mb->un.varWords[4] = 0; mb->un.varWords[5] = 0; mb->un.varWords[6] = 0; mb->un.varWords[7] = 0; mb->un.varWords[8] = 0; mb->un.varWords[9] = 0; mb->un.varWords[10] = 0; mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; mbox->mbox_offset_word = 5; mbox->ctx_buf = virt; } else { bf_set(lpfc_mbx_memory_dump_type3_length, &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); } mbox->vport = phba->pport; mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); if (rc == MBX_NOT_FINISHED) { rc = 1; goto error; } if (phba->sli_rev == LPFC_SLI_REV4) mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); else mp = mpsave; if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { rc = 1; goto error; } lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, DMP_SFF_PAGE_A0_SIZE); memset(mbox, 0, sizeof(*mbox)); memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); INIT_LIST_HEAD(&mp->list); /* save address for completion */ mbox->ctx_buf = mp; mbox->vport = phba->pport; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); bf_set(lpfc_mbx_memory_dump_type3_type, &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); bf_set(lpfc_mbx_memory_dump_type3_link, &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); bf_set(lpfc_mbx_memory_dump_type3_page_no, &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); if (phba->sli_rev < LPFC_SLI_REV4) { mb = &mbox->u.mb; mb->un.varDmp.cv = 1; mb->un.varDmp.co = 1; mb->un.varWords[2] = 0; mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4; mb->un.varWords[4] = 0; mb->un.varWords[5] = 0; mb->un.varWords[6] = 0; mb->un.varWords[7] = 0; mb->un.varWords[8] = 0; mb->un.varWords[9] = 0; mb->un.varWords[10] = 0; mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; mbox->mbox_offset_word = 5; mbox->ctx_buf = virt; } else { bf_set(lpfc_mbx_memory_dump_type3_length, &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); } mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { rc = 1; goto error; } rc = 0; lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, DMP_SFF_PAGE_A2_SIZE); error: mbox->ctx_buf = mpsave; lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); return rc; sfp_fail: mempool_free(mbox, phba->mbox_mem_pool); return 1; } /* * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine processes an unsolicited RDP(Read Diagnostic Parameters) * IOCB. First, the payload of the unsolicited RDP is checked. * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl * gather all data and send RDP response. * * Return code * 0 - Sent the acc response * 1 - Sent the reject response. */ static int lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; struct fc_rdp_req_frame *rdp_req; struct lpfc_rdp_context *rdp_context; union lpfc_wqe128 *cmd = NULL; struct ls_rjt stat; if (phba->sli_rev < LPFC_SLI_REV4 || bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2) { rjt_err = LSRJT_UNABLE_TPC; rjt_expl = LSEXP_REQ_UNSUPPORTED; goto error; } if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { rjt_err = LSRJT_UNABLE_TPC; rjt_expl = LSEXP_REQ_UNSUPPORTED; goto error; } pcmd = cmdiocb->cmd_dmabuf; rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2422 ELS RDP Request " "dec len %d tag x%x port_id %d len %d\n", be32_to_cpu(rdp_req->rdp_des_length), be32_to_cpu(rdp_req->nport_id_desc.tag), be32_to_cpu(rdp_req->nport_id_desc.nport_id), be32_to_cpu(rdp_req->nport_id_desc.length)); if (sizeof(struct fc_rdp_nport_desc) != be32_to_cpu(rdp_req->rdp_des_length)) goto rjt_logerr; if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) goto rjt_logerr; if (RDP_NPORT_ID_SIZE != be32_to_cpu(rdp_req->nport_id_desc.length)) goto rjt_logerr; rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); if (!rdp_context) { rjt_err = LSRJT_UNABLE_TPC; goto error; } cmd = &cmdiocb->wqe; rdp_context->ndlp = lpfc_nlp_get(ndlp); if (!rdp_context->ndlp) { kfree(rdp_context); rjt_err = LSRJT_UNABLE_TPC; goto error; } rdp_context->ox_id = bf_get(wqe_rcvoxid, &cmd->xmit_els_rsp.wqe_com); rdp_context->rx_id = bf_get(wqe_ctxt_tag, &cmd->xmit_els_rsp.wqe_com); rdp_context->cmpl = lpfc_els_rdp_cmpl; if (lpfc_get_rdp_info(phba, rdp_context)) { lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, "2423 Unable to send mailbox"); kfree(rdp_context); rjt_err = LSRJT_UNABLE_TPC; lpfc_nlp_put(ndlp); goto error; } return 0; rjt_logerr: rjt_err = LSRJT_LOGICAL_ERR; error: memset(&stat, 0, sizeof(stat)); stat.un.b.lsRjtRsnCode = rjt_err; stat.un.b.lsRjtRsnCodeExp = rjt_expl; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 1; } static void lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb; IOCB_t *icmd; union lpfc_wqe128 *wqe; uint8_t *pcmd; struct lpfc_iocbq *elsiocb; struct lpfc_nodelist *ndlp; struct ls_rjt *stat; union lpfc_sli4_cfg_shdr *shdr; struct lpfc_lcb_context *lcb_context; struct fc_lcb_res_frame *lcb_res; uint32_t cmdsize, shdr_status, shdr_add_status; int rc; mb = &pmb->u.mb; lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; ndlp = lcb_context->ndlp; pmb->ctx_ndlp = NULL; pmb->ctx_buf = NULL; shdr = (union lpfc_sli4_cfg_shdr *) &pmb->u.mqe.un.beacon_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, "0194 SET_BEACON_CONFIG mailbox " "completed with status x%x add_status x%x," " mbx status x%x\n", shdr_status, shdr_add_status, mb->mbxStatus); if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { mempool_free(pmb, phba->mbox_mem_pool); goto error; } mempool_free(pmb, phba->mbox_mem_pool); cmdsize = sizeof(struct fc_lcb_res_frame); elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, lpfc_max_els_tries, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); /* Decrement the ndlp reference count from previous mbox command */ lpfc_nlp_put(ndlp); if (!elsiocb) goto free_lcb_context; lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt; memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, lcb_context->ox_id); } else { icmd = &elsiocb->iocb; icmd->ulpContext = lcb_context->rx_id; icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; } pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *)(pcmd)) = ELS_CMD_ACC; lcb_res->lcb_sub_command = lcb_context->sub_command; lcb_res->lcb_type = lcb_context->type; lcb_res->capability = lcb_context->capability; lcb_res->lcb_frequency = lcb_context->frequency; lcb_res->lcb_duration = lcb_context->duration; elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); goto out; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); } out: kfree(lcb_context); return; error: cmdsize = sizeof(struct fc_lcb_res_frame); elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, lpfc_max_els_tries, ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); lpfc_nlp_put(ndlp); if (!elsiocb) goto free_lcb_context; if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, lcb_context->ox_id); } else { icmd = &elsiocb->iocb; icmd->ulpContext = lcb_context->rx_id; icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; } pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitLSRJT++; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); goto free_lcb_context; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); } free_lcb_context: kfree(lcb_context); } static int lpfc_sli4_set_beacon(struct lpfc_vport *vport, struct lpfc_lcb_context *lcb_context, uint32_t beacon_state) { struct lpfc_hba *phba = vport->phba; union lpfc_sli4_cfg_shdr *cfg_shdr; LPFC_MBOXQ_t *mbox = NULL; uint32_t len; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return 1; cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; len = sizeof(struct lpfc_mbx_set_beacon_config) - sizeof(struct lpfc_sli4_cfg_mhdr); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, LPFC_SLI4_MBX_EMBED); mbox->ctx_ndlp = (void *)lcb_context; mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_els_lcb_rsp; bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, phba->sli4_hba.physical_port); bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, beacon_state); mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ /* * Check bv1s bit before issuing the mailbox * if bv1s == 1, LCB V1 supported * else, LCB V0 supported */ if (phba->sli4_hba.pc_sli4_params.bv1s) { /* COMMON_SET_BEACON_CONFIG_V1 */ cfg_shdr->request.word9 = BEACON_VERSION_V1; lcb_context->capability |= LCB_CAPABILITY_DURATION; bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 0); bf_set(lpfc_mbx_set_beacon_duration_v1, &mbox->u.mqe.un.beacon_config, be16_to_cpu(lcb_context->duration)); } else { /* COMMON_SET_BEACON_CONFIG_V0 */ if (be16_to_cpu(lcb_context->duration) != 0) { mempool_free(mbox, phba->mbox_mem_pool); return 1; } cfg_shdr->request.word9 = BEACON_VERSION_V0; lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, beacon_state); bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1); bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, be16_to_cpu(lcb_context->duration)); } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); return 1; } return 0; } /** * lpfc_els_rcv_lcb - Process an unsolicited LCB * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. * First, the payload of the unsolicited LCB is checked. * Then based on Subcommand beacon will either turn on or off. * * Return code * 0 - Sent the acc response * 1 - Sent the reject response. **/ static int lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; uint8_t *lp; struct fc_lcb_request_frame *beacon; struct lpfc_lcb_context *lcb_context; u8 state, rjt_err = 0; struct ls_rjt stat; pcmd = cmdiocb->cmd_dmabuf; lp = (uint8_t *)pcmd->virt; beacon = (struct fc_lcb_request_frame *)pcmd->virt; lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " "type x%x frequency %x duration x%x\n", lp[0], lp[1], lp[2], beacon->lcb_command, beacon->lcb_sub_command, beacon->lcb_type, beacon->lcb_frequency, be16_to_cpu(beacon->lcb_duration)); if (beacon->lcb_sub_command != LPFC_LCB_ON && beacon->lcb_sub_command != LPFC_LCB_OFF) { rjt_err = LSRJT_CMD_UNSUPPORTED; goto rjt; } if (phba->sli_rev < LPFC_SLI_REV4 || phba->hba_flag & HBA_FCOE_MODE || (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2)) { rjt_err = LSRJT_CMD_UNSUPPORTED; goto rjt; } lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); if (!lcb_context) { rjt_err = LSRJT_UNABLE_TPC; goto rjt; } state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; lcb_context->sub_command = beacon->lcb_sub_command; lcb_context->capability = 0; lcb_context->type = beacon->lcb_type; lcb_context->frequency = beacon->lcb_frequency; lcb_context->duration = beacon->lcb_duration; lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); lcb_context->ndlp = lpfc_nlp_get(ndlp); if (!lcb_context->ndlp) { rjt_err = LSRJT_UNABLE_TPC; goto rjt_free; } if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, "0193 failed to send mail box"); lpfc_nlp_put(ndlp); rjt_err = LSRJT_UNABLE_TPC; goto rjt_free; } return 0; rjt_free: kfree(lcb_context); rjt: memset(&stat, 0, sizeof(stat)); stat.un.b.lsRjtRsnCode = rjt_err; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 1; } /** * lpfc_els_flush_rscn - Clean up any rscn activities with a vport * @vport: pointer to a host virtual N_Port data structure. * * This routine cleans up any Registration State Change Notification * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the * @vport together with the host_lock is used to prevent multiple thread * trying to access the RSCN array on a same @vport at the same time. **/ void lpfc_els_flush_rscn(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; int i; spin_lock_irq(shost->host_lock); if (vport->fc_rscn_flush) { /* Another thread is walking fc_rscn_id_list on this vport */ spin_unlock_irq(shost->host_lock); return; } /* Indicate we are walking lpfc_els_flush_rscn on this vport */ vport->fc_rscn_flush = 1; spin_unlock_irq(shost->host_lock); for (i = 0; i < vport->fc_rscn_id_cnt; i++) { lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); vport->fc_rscn_id_list[i] = NULL; } spin_lock_irq(shost->host_lock); vport->fc_rscn_id_cnt = 0; vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); /* Indicate we are done walking this fc_rscn_id_list */ vport->fc_rscn_flush = 0; } /** * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did * @vport: pointer to a host virtual N_Port data structure. * @did: remote destination port identifier. * * This routine checks whether there is any pending Registration State * Configuration Notification (RSCN) to a @did on @vport. * * Return code * None zero - The @did matched with a pending rscn * 0 - not able to match @did with a pending rscn **/ int lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) { D_ID ns_did; D_ID rscn_did; uint32_t *lp; uint32_t payload_len, i; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); ns_did.un.word = did; /* Never match fabric nodes for RSCNs */ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) return 0; /* If we are doing a FULL RSCN rediscovery, match everything */ if (vport->fc_flag & FC_RSCN_DISCOVERY) return did; spin_lock_irq(shost->host_lock); if (vport->fc_rscn_flush) { /* Another thread is walking fc_rscn_id_list on this vport */ spin_unlock_irq(shost->host_lock); return 0; } /* Indicate we are walking fc_rscn_id_list on this vport */ vport->fc_rscn_flush = 1; spin_unlock_irq(shost->host_lock); for (i = 0; i < vport->fc_rscn_id_cnt; i++) { lp = vport->fc_rscn_id_list[i]->virt; payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); payload_len -= sizeof(uint32_t); /* take off word 0 */ while (payload_len) { rscn_did.un.word = be32_to_cpu(*lp++); payload_len -= sizeof(uint32_t); switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { case RSCN_ADDRESS_FORMAT_PORT: if ((ns_did.un.b.domain == rscn_did.un.b.domain) && (ns_did.un.b.area == rscn_did.un.b.area) && (ns_did.un.b.id == rscn_did.un.b.id)) goto return_did_out; break; case RSCN_ADDRESS_FORMAT_AREA: if ((ns_did.un.b.domain == rscn_did.un.b.domain) && (ns_did.un.b.area == rscn_did.un.b.area)) goto return_did_out; break; case RSCN_ADDRESS_FORMAT_DOMAIN: if (ns_did.un.b.domain == rscn_did.un.b.domain) goto return_did_out; break; case RSCN_ADDRESS_FORMAT_FABRIC: goto return_did_out; } } } /* Indicate we are done with walking fc_rscn_id_list on this vport */ vport->fc_rscn_flush = 0; return 0; return_did_out: /* Indicate we are done with walking fc_rscn_id_list on this vport */ vport->fc_rscn_flush = 0; return did; } /** * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn * @vport: pointer to a host virtual N_Port data structure. * * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the * state machine for a @vport's nodes that are with pending RSCN (Registration * State Change Notification). * * Return code * 0 - Successful (currently alway return 0) **/ static int lpfc_rscn_recovery_check(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp = NULL, *n; /* Move all affected nodes by pending RSCNs to NPR state. */ list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) continue; /* NVME Target mode does not do RSCN Recovery. */ if (vport->phba->nvmet_support) continue; /* If we are in the process of doing discovery on this * NPort, let it continue on its own. */ switch (ndlp->nlp_state) { case NLP_STE_PLOGI_ISSUE: case NLP_STE_ADISC_ISSUE: case NLP_STE_REG_LOGIN_ISSUE: case NLP_STE_PRLI_ISSUE: case NLP_STE_LOGO_ISSUE: continue; } lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); lpfc_cancel_retry_delay_tmo(vport, ndlp); } return 0; } /** * lpfc_send_rscn_event - Send an RSCN event to management application * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * * lpfc_send_rscn_event sends an RSCN netlink event to management * applications. */ static void lpfc_send_rscn_event(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb) { struct lpfc_dmabuf *pcmd; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); uint32_t *payload_ptr; uint32_t payload_len; struct lpfc_rscn_event_header *rscn_event_data; pcmd = cmdiocb->cmd_dmabuf; payload_ptr = (uint32_t *) pcmd->virt; payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + payload_len, GFP_KERNEL); if (!rscn_event_data) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0147 Failed to allocate memory for RSCN event\n"); return; } rscn_event_data->event_type = FC_REG_RSCN_EVENT; rscn_event_data->payload_length = payload_len; memcpy(rscn_event_data->rscn_payload, payload_ptr, payload_len); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(struct lpfc_rscn_event_header) + payload_len, (char *)rscn_event_data, LPFC_NL_VENDOR_ID); kfree(rscn_event_data); } /** * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine processes an unsolicited RSCN (Registration State Change * Notification) IOCB. First, the payload of the unsolicited RSCN is walked * to invoke fc_host_post_event() routine to the FC transport layer. If the * discover state machine is about to begin discovery, it just accepts the * RSCN and the discovery process will satisfy the RSCN. If this RSCN only * contains N_Port IDs for other vports on this HBA, it just accepts the * RSCN and ignore processing it. If the state machine is in the recovery * state, the fc_rscn_id_list of this @vport is walked and the * lpfc_rscn_recovery_check() routine is invoked to send recovery event for * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() * routine is invoked to handle the RSCN event. * * Return code * 0 - Just sent the acc response * 1 - Sent the acc response and waited for name server completion **/ static int lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; uint32_t *lp, *datap; uint32_t payload_len, length, nportid, *cmd; int rscn_cnt; int rscn_id = 0, hba_id = 0; int i, tmo; pcmd = cmdiocb->cmd_dmabuf; lp = (uint32_t *) pcmd->virt; payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); payload_len -= sizeof(uint32_t); /* take off word 0 */ /* RSCN received */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0214 RSCN received Data: x%x x%x x%x x%x\n", vport->fc_flag, payload_len, *lp, vport->fc_rscn_id_cnt); /* Send an RSCN event to the management application */ lpfc_send_rscn_event(vport, cmdiocb); for (i = 0; i < payload_len/sizeof(uint32_t); i++) fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_RSCN, lp[i]); /* Check if RSCN is coming from a direct-connected remote NPort */ if (vport->fc_flag & FC_PT2PT) { /* If so, just ACC it, no other action needed for now */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2024 pt2pt RSCN %08x Data: x%x x%x\n", *lp, vport->fc_flag, payload_len); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); /* Check to see if we need to NVME rescan this target * remoteport. */ if (ndlp->nlp_fc4_type & NLP_FC4_NVME && ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) lpfc_nvme_rescan_port(vport, ndlp); return 0; } /* If we are about to begin discovery, just ACC the RSCN. * Discovery processing will satisfy it. */ if (vport->port_state <= LPFC_NS_QRY) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return 0; } /* If this RSCN just contains NPortIDs for other vports on this HBA, * just ACC and ignore it. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->cfg_peer_port_login)) { i = payload_len; datap = lp; while (i > 0) { nportid = *datap++; nportid = ((be32_to_cpu(nportid)) & Mask_DID); i -= sizeof(uint32_t); rscn_id++; if (lpfc_find_vport_by_did(phba, nportid)) hba_id++; } if (rscn_id == hba_id) { /* ALL NPortIDs in RSCN are on HBA */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0219 Ignore RSCN " "Data: x%x x%x x%x x%x\n", vport->fc_flag, payload_len, *lp, vport->fc_rscn_id_cnt); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); /* Restart disctmo if its already running */ if (vport->fc_flag & FC_DISC_TMO) { tmo = ((phba->fc_ratov * 3) + 3); mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); } return 0; } } spin_lock_irq(shost->host_lock); if (vport->fc_rscn_flush) { /* Another thread is walking fc_rscn_id_list on this vport */ vport->fc_flag |= FC_RSCN_DISCOVERY; spin_unlock_irq(shost->host_lock); /* Send back ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return 0; } /* Indicate we are walking fc_rscn_id_list on this vport */ vport->fc_rscn_flush = 1; spin_unlock_irq(shost->host_lock); /* Get the array count after successfully have the token */ rscn_cnt = vport->fc_rscn_id_cnt; /* If we are already processing an RSCN, save the received * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later. */ if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_RSCN_DEFERRED; /* Restart disctmo if its already running */ if (vport->fc_flag & FC_DISC_TMO) { tmo = ((phba->fc_ratov * 3) + 3); mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); } if ((rscn_cnt < FC_MAX_HOLD_RSCN) && !(vport->fc_flag & FC_RSCN_DISCOVERY)) { vport->fc_flag |= FC_RSCN_MODE; spin_unlock_irq(shost->host_lock); if (rscn_cnt) { cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); } if ((rscn_cnt) && (payload_len + length <= LPFC_BPL_SIZE)) { *cmd &= ELS_CMD_MASK; *cmd |= cpu_to_be32(payload_len + length); memcpy(((uint8_t *)cmd) + length, lp, payload_len); } else { vport->fc_rscn_id_list[rscn_cnt] = pcmd; vport->fc_rscn_id_cnt++; /* If we zero, cmdiocb->cmd_dmabuf, the calling * routine will not try to free it. */ cmdiocb->cmd_dmabuf = NULL; } /* Deferred RSCN */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0235 Deferred RSCN " "Data: x%x x%x x%x\n", vport->fc_rscn_id_cnt, vport->fc_flag, vport->port_state); } else { vport->fc_flag |= FC_RSCN_DISCOVERY; spin_unlock_irq(shost->host_lock); /* ReDiscovery RSCN */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0234 ReDiscovery RSCN " "Data: x%x x%x x%x\n", vport->fc_rscn_id_cnt, vport->fc_flag, vport->port_state); } /* Indicate we are done walking fc_rscn_id_list on this vport */ vport->fc_rscn_flush = 0; /* Send back ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); /* send RECOVERY event for ALL nodes that match RSCN payload */ lpfc_rscn_recovery_check(vport); return 0; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RSCN: did:x%x/ste:x%x flg:x%x", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_RSCN_MODE; spin_unlock_irq(shost->host_lock); vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; /* Indicate we are done walking fc_rscn_id_list on this vport */ vport->fc_rscn_flush = 0; /* * If we zero, cmdiocb->cmd_dmabuf, the calling routine will * not try to free it. */ cmdiocb->cmd_dmabuf = NULL; lpfc_set_disctmo(vport); /* Send back ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); /* send RECOVERY event for ALL nodes that match RSCN payload */ lpfc_rscn_recovery_check(vport); return lpfc_els_handle_rscn(vport); } /** * lpfc_els_handle_rscn - Handle rscn for a vport * @vport: pointer to a host virtual N_Port data structure. * * This routine handles the Registration State Configuration Notification * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, * if the ndlp to NameServer exists, a Common Transport (CT) command to the * NameServer shall be issued. If CT command to the NameServer fails to be * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any * RSCN activities with the @vport. * * Return code * 0 - Cleaned up rscn on the @vport * 1 - Wait for plogi to name server before proceed **/ int lpfc_els_handle_rscn(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; struct lpfc_hba *phba = vport->phba; /* Ignore RSCN if the port is being torn down. */ if (vport->load_flag & FC_UNLOADING) { lpfc_els_flush_rscn(vport); return 0; } /* Start timer for RSCN processing */ lpfc_set_disctmo(vport); /* RSCN processed */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", vport->fc_flag, 0, vport->fc_rscn_id_cnt, vport->port_state, vport->num_disc_nodes, vport->gidft_inp); /* To process RSCN, first compare RSCN data with NameServer */ vport->fc_ns_retry = 0; vport->num_disc_nodes = 0; ndlp = lpfc_findnode_did(vport, NameServer_DID); if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { /* Good ndlp, issue CT Request to NameServer. Need to * know how many gidfts were issued. If none, then just * flush the RSCN. Otherwise, the outstanding requests * need to complete. */ if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { if (lpfc_issue_gidft(vport) > 0) return 1; } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { if (lpfc_issue_gidpt(vport) > 0) return 1; } else { return 1; } } else { /* Nameserver login in question. Revalidate. */ if (ndlp) { ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); } else { ndlp = lpfc_nlp_init(vport, NameServer_DID); if (!ndlp) { lpfc_els_flush_rscn(vport); return 0; } ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); } ndlp->nlp_type |= NLP_FABRIC; lpfc_issue_els_plogi(vport, NameServer_DID, 0); /* Wait for NameServer login cmpl before we can * continue */ return 1; } lpfc_els_flush_rscn(vport); return 0; } /** * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine processes Fabric Login (FLOGI) IOCB received as an ELS * unsolicited event. An unsolicited FLOGI can be received in a point-to- * point topology. As an unsolicited FLOGI should not be received in a loop * mode, any unsolicited FLOGI received in loop mode shall be ignored. The * lpfc_check_sparm() routine is invoked to check the parameters in the * unsolicited FLOGI. If parameters validation failed, the routine * lpfc_els_rsp_reject() shall be called with reject reason code set to * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the * FLOGI shall be compared with the Port WWN of the @vport to determine who * will initiate PLOGI. The higher lexicographical value party shall has * higher priority (as the winning port) and will initiate PLOGI and * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. * * Return code * 0 - Successfully processed the unsolicited flogi * 1 - Failed to process the unsolicited flogi **/ static int lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; uint32_t *lp = (uint32_t *) pcmd->virt; union lpfc_wqe128 *wqe = &cmdiocb->wqe; struct serv_parm *sp; LPFC_MBOXQ_t *mbox; uint32_t cmd, did; int rc; uint32_t fc_flag = 0; uint32_t port_state = 0; /* Clear external loopback plug detected flag */ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; cmd = *lp++; sp = (struct serv_parm *) lp; /* FLOGI received */ lpfc_set_disctmo(vport); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { /* We should never receive a FLOGI in loop mode, ignore it */ did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); /* An FLOGI ELS command <elsCmd> was received from DID <did> in Loop Mode */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0113 An FLOGI ELS command x%x was " "received from DID x%x in Loop Mode\n", cmd, did); return 1; } (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); /* * If our portname is greater than the remote portname, * then we initiate Nport login. */ rc = memcmp(&vport->fc_portname, &sp->portName, sizeof(struct lpfc_name)); if (!rc) { if (phba->sli_rev < LPFC_SLI_REV4) { mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return 1; lpfc_linkdown(phba); lpfc_init_link(phba, mbox, phba->cfg_topology, phba->cfg_link_speed); mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); lpfc_set_loopback_flag(phba); if (rc == MBX_NOT_FINISHED) mempool_free(mbox, phba->mbox_mem_pool); return 1; } /* External loopback plug insertion detected */ phba->link_flag |= LS_EXTERNAL_LOOPBACK; lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC, "1119 External Loopback plug detected\n"); /* abort the flogi coming back to ourselves * due to external loopback on the port. */ lpfc_els_abort_flogi(phba); return 0; } else if (rc > 0) { /* greater than */ spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_PT2PT_PLOGI; spin_unlock_irq(shost->host_lock); /* If we have the high WWPN we can assign our own * myDID; otherwise, we have to WAIT for a PLOGI * from the remote NPort to find out what it * will be. */ vport->fc_myDID = PT2PT_LocalID; } else { vport->fc_myDID = PT2PT_RemoteID; } /* * The vport state should go to LPFC_FLOGI only * AFTER we issue a FLOGI, not receive one. */ spin_lock_irq(shost->host_lock); fc_flag = vport->fc_flag; port_state = vport->port_state; vport->fc_flag |= FC_PT2PT; vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); /* Acking an unsol FLOGI. Count 1 for link bounce * work-around. */ vport->rcv_flogi_cnt++; spin_unlock_irq(shost->host_lock); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3311 Rcv Flogi PS x%x new PS x%x " "fc_flag x%x new fc_flag x%x\n", port_state, vport->port_state, fc_flag, vport->fc_flag); /* * We temporarily set fc_myDID to make it look like we are * a Fabric. This is done just so we end up with the right * did / sid on the FLOGI ACC rsp. */ did = vport->fc_myDID; vport->fc_myDID = Fabric_DID; memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); /* Defer ACC response until AFTER we issue a FLOGI */ if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com); phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com); vport->fc_myDID = did; lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3344 Deferring FLOGI ACC: rx_id: x%x," " ox_id: x%x, hba_flag x%x\n", phba->defer_flogi_acc_rx_id, phba->defer_flogi_acc_ox_id, phba->hba_flag); phba->defer_flogi_acc_flag = true; return 0; } /* Send back ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); /* Now lets put fc_myDID back to what its supposed to be */ vport->fc_myDID = did; return 0; } /** * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine processes Request Node Identification Data (RNID) IOCB * received as an ELS unsolicited event. Only when the RNID specified format * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to * Accept (ACC) the RNID ELS command. All the other RNID formats are * rejected by invoking the lpfc_els_rsp_reject() routine. * * Return code * 0 - Successfully processed rnid iocb (currently always return 0) **/ static int lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { struct lpfc_dmabuf *pcmd; uint32_t *lp; RNID *rn; struct ls_rjt stat; pcmd = cmdiocb->cmd_dmabuf; lp = (uint32_t *) pcmd->virt; lp++; rn = (RNID *) lp; /* RNID received */ switch (rn->Format) { case 0: case RNID_TOPOLOGY_DISC: /* Send back ACC */ lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); break; default: /* Reject this request because format not supported */ stat.un.b.lsRjtRsvd0 = 0; stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; stat.un.b.vendorUnique = 0; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); } return 0; } /** * lpfc_els_rcv_echo - Process an unsolicited echo iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * Return code * 0 - Successfully processed echo iocb (currently always return 0) **/ static int lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { uint8_t *pcmd; pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt; /* skip over first word of echo command to find echo data */ pcmd += sizeof(uint32_t); lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); return 0; } /** * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine processes a Link Incident Report Registration(LIRR) IOCB * received as an ELS unsolicited event. Currently, this function just invokes * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. * * Return code * 0 - Successfully processed lirr iocb (currently always return 0) **/ static int lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { struct ls_rjt stat; /* For now, unconditionally reject this command */ stat.un.b.lsRjtRsvd0 = 0; stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; stat.un.b.vendorUnique = 0; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } /** * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB * received as an ELS unsolicited event. A request to RRQ shall only * be accepted if the Originator Nx_Port N_Port_ID or the Responder * Nx_Port N_Port_ID of the target Exchange is the same as the * N_Port_ID of the Nx_Port that makes the request. If the RRQ is * not accepted, an LS_RJT with reason code "Unable to perform * command request" and reason code explanation "Invalid Originator * S_ID" shall be returned. For now, we just unconditionally accept * RRQ from the target. **/ static void lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); if (vport->phba->sli_rev == LPFC_SLI_REV4) lpfc_els_clear_rrq(vport, cmdiocb, ndlp); } /** * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * This routine is the completion callback function for the MBX_READ_LNK_STAT * mailbox command. This callback function is to actually send the Accept * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It * collects the link statistics from the completion of the MBX_READ_LNK_STAT * mailbox command, constructs the RLS response with the link statistics * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC * response to the RLS. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the RLS Accept Response * ELS IOCB command. * **/ static void lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { int rc = 0; MAILBOX_t *mb; IOCB_t *icmd; union lpfc_wqe128 *wqe; struct RLS_RSP *rls_rsp; uint8_t *pcmd; struct lpfc_iocbq *elsiocb; struct lpfc_nodelist *ndlp; uint16_t oxid; uint16_t rxid; uint32_t cmdsize; u32 ulp_context; mb = &pmb->u.mb; ndlp = pmb->ctx_ndlp; rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); pmb->ctx_buf = NULL; pmb->ctx_ndlp = NULL; if (mb->mbxStatus) { mempool_free(pmb, phba->mbox_mem_pool); return; } cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, lpfc_max_els_tries, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); /* Decrement the ndlp reference count from previous mbox command */ lpfc_nlp_put(ndlp); if (!elsiocb) { mempool_free(pmb, phba->mbox_mem_pool); return; } ulp_context = get_job_ulpcontext(phba, elsiocb); if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; /* Xri / rx_id */ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); } else { icmd = &elsiocb->iocb; icmd->ulpContext = rxid; icmd->unsli3.rcvsli3.ox_id = oxid; } pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_ACC; pcmd += sizeof(uint32_t); /* Skip past command */ rls_rsp = (struct RLS_RSP *)pcmd; rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); mempool_free(pmb, phba->mbox_mem_pool); /* Xmit ELS RLS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); } return; } /** * lpfc_els_rcv_rls - Process an unsolicited rls iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine processes Read Link Status (RLS) IOCB received as an * ELS unsolicited event. It first checks the remote port state. If the * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE * state, it invokes the lpfc_els_rsl_reject() routine to send the reject * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command * for reading the HBA link statistics. It is for the callback function, * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command * to actually sending out RPL Accept (ACC) response. * * Return codes * 0 - Successfully processed rls iocb (currently always return 0) **/ static int lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; struct ls_rjt stat; u32 ctx = get_job_ulpcontext(phba, cmdiocb); u32 ox_id = get_job_rcvoxid(phba, cmdiocb); if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) /* reject the unsolicited RLS request and done with it */ goto reject_out; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); if (mbox) { lpfc_read_lnk_stat(phba, mbox); mbox->ctx_buf = (void *)((unsigned long) (ox_id << 16 | ctx)); mbox->ctx_ndlp = lpfc_nlp_get(ndlp); if (!mbox->ctx_ndlp) goto node_err; mbox->vport = vport; mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) != MBX_NOT_FINISHED) /* Mbox completion will send ELS Response */ return 0; /* Decrement reference count used for the failed mbox * command. */ lpfc_nlp_put(ndlp); node_err: mempool_free(mbox, phba->mbox_mem_pool); } reject_out: /* issue rejection response */ stat.un.b.lsRjtRsvd0 = 0; stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; stat.un.b.vendorUnique = 0; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } /** * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine processes Read Timout Value (RTV) IOCB received as an * ELS unsolicited event. It first checks the remote port state. If the * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE * state, it invokes the lpfc_els_rsl_reject() routine to send the reject * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout * Value (RTV) unsolicited IOCB event. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the RTV Accept Response * ELS IOCB command. * * Return codes * 0 - Successfully processed rtv iocb (currently always return 0) **/ static int lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { int rc = 0; IOCB_t *icmd; union lpfc_wqe128 *wqe; struct lpfc_hba *phba = vport->phba; struct ls_rjt stat; struct RTV_RSP *rtv_rsp; uint8_t *pcmd; struct lpfc_iocbq *elsiocb; uint32_t cmdsize; u32 ulp_context; if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) /* reject the unsolicited RTV request and done with it */ goto reject_out; cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, lpfc_max_els_tries, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) return 1; pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_ACC; pcmd += sizeof(uint32_t); /* Skip past command */ ulp_context = get_job_ulpcontext(phba, elsiocb); /* use the command's xri in the response */ if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, get_job_ulpcontext(phba, cmdiocb)); bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, get_job_rcvoxid(phba, cmdiocb)); } else { icmd = &elsiocb->iocb; icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); } rtv_rsp = (struct RTV_RSP *)pcmd; /* populate RTV payload */ rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); /* Xmit ELS RLS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " "Data: x%x x%x x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi, rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 0; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); } return 0; reject_out: /* issue rejection response */ stat.un.b.lsRjtRsvd0 = 0; stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; stat.un.b.vendorUnique = 0; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb * @vport: pointer to a host virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. * @did: DID of the target. * @rrq: Pointer to the rrq struct. * * Build a ELS RRQ command and send it to the target. If the issue_iocb is * successful, the completion handler will clear the RRQ. * * Return codes * 0 - Successfully sent rrq els iocb. * 1 - Failed to send rrq els iocb. **/ static int lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t did, struct lpfc_node_rrq *rrq) { struct lpfc_hba *phba = vport->phba; struct RRQ *els_rrq; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; int ret; if (!ndlp) return 1; /* If ndlp is not NULL, we will bump the reference count on it */ cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, ELS_CMD_RRQ); if (!elsiocb) return 1; pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; /* For RRQ request, remainder of payload is Exchange IDs */ *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; pcmd += sizeof(uint32_t); els_rrq = (struct RRQ *) pcmd; bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); bf_set(rrq_rxid, els_rrq, rrq->rxid); bf_set(rrq_did, els_rrq, vport->fc_myDID); els_rrq->rrq = cpu_to_be32(els_rrq->rrq); els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue RRQ: did:x%x", did, rrq->xritag, rrq->rxid); elsiocb->context_un.rrq = rrq; elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) goto io_err; ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (ret == IOCB_ERROR) { lpfc_nlp_put(ndlp); goto io_err; } return 0; io_err: lpfc_els_free_iocb(phba, elsiocb); return 1; } /** * lpfc_send_rrq - Sends ELS RRQ if needed. * @phba: pointer to lpfc hba data structure. * @rrq: pointer to the active rrq. * * This routine will call the lpfc_issue_els_rrq if the rrq is * still active for the xri. If this function returns a failure then * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. * * Returns 0 Success. * 1 Failure. **/ int lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) { struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); if (!ndlp) return 1; if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) return lpfc_issue_els_rrq(rrq->vport, ndlp, rrq->nlp_DID, rrq); else return 1; } /** * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command * @vport: pointer to a host virtual N_Port data structure. * @cmdsize: size of the ELS command. * @oldiocb: pointer to the original lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the RPL Accept Response * ELS command. * * Return code * 0 - Successfully issued ACC RPL ELS command * 1 - Failed to issue ACC RPL ELS command **/ static int lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) { int rc = 0; struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; union lpfc_wqe128 *wqe; RPL_RSP rpl_rsp; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; u32 ulp_context; elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) return 1; ulp_context = get_job_ulpcontext(phba, elsiocb); if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; /* Xri / rx_id */ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, get_job_ulpcontext(phba, oldiocb)); bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, get_job_rcvoxid(phba, oldiocb)); } else { icmd = &elsiocb->iocb; icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); } pcmd = elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_ACC; pcmd += sizeof(uint16_t); *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); pcmd += sizeof(uint16_t); /* Setup the RPL ACC payload */ rpl_rsp.listLen = be32_to_cpu(1); rpl_rsp.index = 0; rpl_rsp.port_num_blk.portNum = 0; rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, sizeof(struct lpfc_name)); memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); /* Xmit ELS RPL ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0120 Xmit ELS RPL ACC response tag x%x " "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " "rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); return 1; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } return 0; } /** * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine processes Read Port List (RPL) IOCB received as an ELS * unsolicited event. It first checks the remote port state. If the remote * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it * invokes the lpfc_els_rsp_reject() routine to send reject response. * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine * to accept the RPL. * * Return code * 0 - Successfully processed rpl iocb (currently always return 0) **/ static int lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { struct lpfc_dmabuf *pcmd; uint32_t *lp; uint32_t maxsize; uint16_t cmdsize; RPL *rpl; struct ls_rjt stat; if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { /* issue rejection response */ stat.un.b.lsRjtRsvd0 = 0; stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; stat.un.b.vendorUnique = 0; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); /* rejected the unsolicited RPL request and done with it */ return 0; } pcmd = cmdiocb->cmd_dmabuf; lp = (uint32_t *) pcmd->virt; rpl = (RPL *) (lp + 1); maxsize = be32_to_cpu(rpl->maxsize); /* We support only one port */ if ((rpl->index == 0) && ((maxsize == 0) || ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); } else { cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); } lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); return 0; } /** * lpfc_els_rcv_farp - Process an unsolicited farp request els command * @vport: pointer to a virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine processes Fibre Channel Address Resolution Protocol * (FARP) Request IOCB received as an ELS unsolicited event. Currently, * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the * remote PortName is compared against the FC PortName stored in the @vport * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is * compared against the FC NodeName stored in the @vport data structure. * If any of these matches and the FARP_REQUEST_FARPR flag is set in the * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is * invoked to send out FARP Response to the remote node. Before sending the * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() * routine is invoked to log into the remote port first. * * Return code * 0 - Either the FARP Match Mode not supported or successfully processed **/ static int lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { struct lpfc_dmabuf *pcmd; uint32_t *lp; FARP *fp; uint32_t cnt, did; did = get_job_els_rsp64_did(vport->phba, cmdiocb); pcmd = cmdiocb->cmd_dmabuf; lp = (uint32_t *) pcmd->virt; lp++; fp = (FARP *) lp; /* FARP-REQ received from DID <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0601 FARP-REQ received from DID x%x\n", did); /* We will only support match on WWPN or WWNN */ if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { return 0; } cnt = 0; /* If this FARP command is searching for my portname */ if (fp->Mflags & FARP_MATCH_PORT) { if (memcmp(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)) == 0) cnt = 1; } /* If this FARP command is searching for my nodename */ if (fp->Mflags & FARP_MATCH_NODE) { if (memcmp(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)) == 0) cnt = 1; } if (cnt) { if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { /* Log back into the node before sending the FARP. */ if (fp->Rflags & FARP_REQUEST_PLOGI) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } /* Send a FARP response to that node */ if (fp->Rflags & FARP_REQUEST_FARPR) lpfc_issue_els_farpr(vport, did, 0); } } return 0; } /** * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * This routine processes Fibre Channel Address Resolution Protocol * Response (FARPR) IOCB received as an ELS unsolicited event. It simply * invokes the lpfc_els_rsp_acc() routine to the remote node to accept * the FARP response request. * * Return code * 0 - Successfully processed FARPR IOCB (currently always return 0) **/ static int lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { uint32_t did; did = get_job_els_rsp64_did(vport->phba, cmdiocb); /* FARP-RSP received from DID <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0600 FARP-RSP received from DID x%x\n", did); /* ACCEPT the Farp resp request */ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return 0; } /** * lpfc_els_rcv_fan - Process an unsolicited fan iocb command * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @fan_ndlp: pointer to a node-list data structure. * * This routine processes a Fabric Address Notification (FAN) IOCB * command received as an ELS unsolicited event. The FAN ELS command will * only be processed on a physical port (i.e., the @vport represents the * physical port). The fabric NodeName and PortName from the FAN IOCB are * compared against those in the phba data structure. If any of those is * different, the lpfc_initial_flogi() routine is invoked to initialize * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, * if both of those are identical, the lpfc_issue_fabric_reglogin() routine * is invoked to register login to the fabric. * * Return code * 0 - Successfully processed fan iocb (currently always return 0). **/ static int lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *fan_ndlp) { struct lpfc_hba *phba = vport->phba; uint32_t *lp; FAN *fp; lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt; fp = (FAN *) ++lp; /* FAN received; Fan does not have a reply sequence */ if ((vport == phba->pport) && (vport->port_state == LPFC_LOCAL_CFG_LINK)) { if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, sizeof(struct lpfc_name))) || (memcmp(&phba->fc_fabparam.portName, &fp->FportName, sizeof(struct lpfc_name)))) { /* This port has switched fabrics. FLOGI is required */ lpfc_issue_init_vfi(vport); } else { /* FAN verified - skip FLOGI */ vport->fc_myDID = vport->fc_prevDID; if (phba->sli_rev < LPFC_SLI_REV4) lpfc_issue_fabric_reglogin(vport); else { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3138 Need register VFI: (x%x/%x)\n", vport->fc_prevDID, vport->fc_myDID); lpfc_issue_reg_vfi(vport); } } } return 0; } /** * lpfc_els_rcv_edc - Process an unsolicited EDC iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * * Return code * 0 - Successfully processed echo iocb (currently always return 0) **/ static int lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; struct fc_els_edc *edc_req; struct fc_tlv_desc *tlv; uint8_t *payload; uint32_t *ptr, dtag; const char *dtag_nm; int desc_cnt = 0, bytes_remain; struct fc_diag_lnkflt_desc *plnkflt; payload = cmdiocb->cmd_dmabuf->virt; edc_req = (struct fc_els_edc *)payload; bytes_remain = be32_to_cpu(edc_req->desc_len); ptr = (uint32_t *)payload; lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, "3319 Rcv EDC payload len %d: x%x x%x x%x\n", bytes_remain, be32_to_cpu(*ptr), be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); /* No signal support unless there is a congestion descriptor */ phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; phba->cgn_sig_freq = 0; phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; if (bytes_remain <= 0) goto out; tlv = edc_req->desc; /* * cycle through EDC diagnostic descriptors to find the * congestion signaling capability descriptor */ while (bytes_remain) { if (bytes_remain < FC_TLV_DESC_HDR_SZ) { lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, "6464 Truncated TLV hdr on " "Diagnostic descriptor[%d]\n", desc_cnt); goto out; } dtag = be32_to_cpu(tlv->desc_tag); switch (dtag) { case ELS_DTAG_LNK_FAULT_CAP: if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != sizeof(struct fc_diag_lnkflt_desc)) { lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, "6465 Truncated Link Fault Diagnostic " "descriptor[%d]: %d vs 0x%zx 0x%zx\n", desc_cnt, bytes_remain, FC_TLV_DESC_SZ_FROM_LENGTH(tlv), sizeof(struct fc_diag_lnkflt_desc)); goto out; } plnkflt = (struct fc_diag_lnkflt_desc *)tlv; lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_LDS_EVENT, "4626 Link Fault Desc Data: x%08x len x%x " "da x%x dd x%x interval x%x\n", be32_to_cpu(plnkflt->desc_tag), be32_to_cpu(plnkflt->desc_len), be32_to_cpu( plnkflt->degrade_activate_threshold), be32_to_cpu( plnkflt->degrade_deactivate_threshold), be32_to_cpu(plnkflt->fec_degrade_interval)); break; case ELS_DTAG_CG_SIGNAL_CAP: if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != sizeof(struct fc_diag_cg_sig_desc)) { lpfc_printf_log( phba, KERN_WARNING, LOG_CGN_MGMT, "6466 Truncated cgn signal Diagnostic " "descriptor[%d]: %d vs 0x%zx 0x%zx\n", desc_cnt, bytes_remain, FC_TLV_DESC_SZ_FROM_LENGTH(tlv), sizeof(struct fc_diag_cg_sig_desc)); goto out; } phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; phba->cgn_reg_signal = phba->cgn_init_reg_signal; /* We start negotiation with lpfc_fabric_cgn_frequency. * When we process the EDC, we will settle on the * higher frequency. */ phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; lpfc_least_capable_settings( phba, (struct fc_diag_cg_sig_desc *)tlv); break; default: dtag_nm = lpfc_get_tlv_dtag_nm(dtag); lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, "6467 unknown Diagnostic " "Descriptor[%d]: tag x%x (%s)\n", desc_cnt, dtag, dtag_nm); } bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); tlv = fc_tlv_next_desc(tlv); desc_cnt++; } out: /* Need to send back an ACC */ lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); lpfc_config_cgn_signal(phba); return 0; } /** * lpfc_els_timeout - Handler funciton to the els timer * @t: timer context used to obtain the vport. * * This routine is invoked by the ELS timer after timeout. It posts the ELS * timer timeout event by setting the WORKER_ELS_TMO bit to the work port * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake * up the worker thread. It is for the worker thread to invoke the routine * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. **/ void lpfc_els_timeout(struct timer_list *t) { struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long iflag; spin_lock_irqsave(&vport->work_port_lock, iflag); tmo_posted = vport->work_port_events & WORKER_ELS_TMO; if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) vport->work_port_events |= WORKER_ELS_TMO; spin_unlock_irqrestore(&vport->work_port_lock, iflag); if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) lpfc_worker_wake_up(phba); return; } /** * lpfc_els_timeout_handler - Process an els timeout event * @vport: pointer to a virtual N_Port data structure. * * This routine is the actual handler function that processes an ELS timeout * event. It walks the ELS ring to get and abort all the IOCBs (except the * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by * invoking the lpfc_sli_issue_abort_iotag() routine. **/ void lpfc_els_timeout_handler(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ring *pring; struct lpfc_iocbq *tmp_iocb, *piocb; IOCB_t *cmd = NULL; struct lpfc_dmabuf *pcmd; uint32_t els_command = 0; uint32_t timeout; uint32_t remote_ID = 0xffffffff; LIST_HEAD(abort_list); u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; timeout = (uint32_t)(phba->fc_ratov << 1); pring = lpfc_phba_elsring(phba); if (unlikely(!pring)) return; if (phba->pport->load_flag & FC_UNLOADING) return; spin_lock_irq(&phba->hbalock); if (phba->sli_rev == LPFC_SLI_REV4) spin_lock(&pring->ring_lock); list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { ulp_command = get_job_cmnd(phba, piocb); ulp_context = get_job_ulpcontext(phba, piocb); did = get_job_els_rsp64_did(phba, piocb); if (phba->sli_rev == LPFC_SLI_REV4) { iotag = get_wqe_reqtag(piocb); } else { cmd = &piocb->iocb; iotag = cmd->ulpIoTag; } if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || ulp_command == CMD_ABORT_XRI_CX || ulp_command == CMD_ABORT_XRI_CN || ulp_command == CMD_CLOSE_XRI_CN) continue; if (piocb->vport != vport) continue; pcmd = piocb->cmd_dmabuf; if (pcmd) els_command = *(uint32_t *) (pcmd->virt); if (els_command == ELS_CMD_FARP || els_command == ELS_CMD_FARPR || els_command == ELS_CMD_FDISC) continue; if (piocb->drvrTimeout > 0) { if (piocb->drvrTimeout >= timeout) piocb->drvrTimeout -= timeout; else piocb->drvrTimeout = 0; continue; } remote_ID = 0xffffffff; if (ulp_command != CMD_GEN_REQUEST64_CR) { remote_ID = did; } else { struct lpfc_nodelist *ndlp; ndlp = __lpfc_findnode_rpi(vport, ulp_context); if (ndlp) remote_ID = ndlp->nlp_DID; } list_add_tail(&piocb->dlist, &abort_list); } if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0127 ELS timeout Data: x%x x%x x%x " "x%x\n", els_command, remote_ID, ulp_command, iotag); spin_lock_irq(&phba->hbalock); list_del_init(&piocb->dlist); lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); spin_unlock_irq(&phba->hbalock); } /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); if (!list_empty(&pring->txcmplq)) if (!(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&vport->els_tmofunc, jiffies + msecs_to_jiffies(1000 * timeout)); } /** * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport * @vport: pointer to a host virtual N_Port data structure. * * This routine is used to clean up all the outstanding ELS commands on a * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() * routine. After that, it walks the ELS transmit queue to remove all the * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For * the IOCBs with a non-NULL completion callback function, the callback * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion * callback function, the IOCB will simply be released. Finally, it walks * the ELS transmit completion queue to issue an abort IOCB to any transmit * completion queue IOCB that is associated with the @vport and is not * an IOCB from libdfc (i.e., the management plane IOCBs that are not * part of the discovery state machine) out to HBA by invoking the * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the * abort IOCB to any transmit completion queueed IOCB, it does not guarantee * the IOCBs are aborted when this function returns. **/ void lpfc_els_flush_cmd(struct lpfc_vport *vport) { LIST_HEAD(abort_list); LIST_HEAD(cancel_list); struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ring *pring; struct lpfc_iocbq *tmp_iocb, *piocb; u32 ulp_command; unsigned long iflags = 0; bool mbx_tmo_err; lpfc_fabric_abort_vport(vport); /* * For SLI3, only the hbalock is required. But SLI4 needs to coordinate * with the ring insert operation. Because lpfc_sli_issue_abort_iotag * ultimately grabs the ring_lock, the driver must splice the list into * a working list and release the locks before calling the abort. */ spin_lock_irqsave(&phba->hbalock, iflags); pring = lpfc_phba_elsring(phba); /* Bail out if we've no ELS wq, like in PCI error recovery case. */ if (unlikely(!pring)) { spin_unlock_irqrestore(&phba->hbalock, iflags); return; } if (phba->sli_rev == LPFC_SLI_REV4) spin_lock(&pring->ring_lock); mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags); /* First we need to issue aborts to outstanding cmds on txcmpl */ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err) continue; if (piocb->vport != vport) continue; if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err) continue; /* On the ELS ring we can have ELS_REQUESTs or * GEN_REQUESTs waiting for a response. */ ulp_command = get_job_cmnd(phba, piocb); if (ulp_command == CMD_ELS_REQUEST64_CR) { list_add_tail(&piocb->dlist, &abort_list); /* If the link is down when flushing ELS commands * the firmware will not complete them till after * the link comes back up. This may confuse * discovery for the new link up, so we need to * change the compl routine to just clean up the iocb * and avoid any retry logic. */ if (phba->link_state == LPFC_LINK_DOWN) piocb->cmd_cmpl = lpfc_cmpl_els_link_down; } else if (ulp_command == CMD_GEN_REQUEST64_CR || mbx_tmo_err) list_add_tail(&piocb->dlist, &abort_list); } if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring->ring_lock); spin_unlock_irqrestore(&phba->hbalock, iflags); /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { spin_lock_irqsave(&phba->hbalock, iflags); list_del_init(&piocb->dlist); if (mbx_tmo_err) list_move_tail(&piocb->list, &cancel_list); else lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); spin_unlock_irqrestore(&phba->hbalock, iflags); } if (!list_empty(&cancel_list)) lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); else /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); if (!list_empty(&abort_list)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "3387 abort list for txq not empty\n"); INIT_LIST_HEAD(&abort_list); spin_lock_irqsave(&phba->hbalock, iflags); if (phba->sli_rev == LPFC_SLI_REV4) spin_lock(&pring->ring_lock); /* No need to abort the txq list, * just queue them up for lpfc_sli_cancel_iocbs */ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { ulp_command = get_job_cmnd(phba, piocb); if (piocb->cmd_flag & LPFC_IO_LIBDFC) continue; /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ if (ulp_command == CMD_QUE_RING_BUF_CN || ulp_command == CMD_QUE_RING_BUF64_CN || ulp_command == CMD_CLOSE_XRI_CN || ulp_command == CMD_ABORT_XRI_CN || ulp_command == CMD_ABORT_XRI_CX) continue; if (piocb->vport != vport) continue; list_del_init(&piocb->list); list_add_tail(&piocb->list, &abort_list); } /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ if (vport == phba->pport) { list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, list) { list_del_init(&piocb->list); list_add_tail(&piocb->list, &abort_list); } } if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring->ring_lock); spin_unlock_irqrestore(&phba->hbalock, iflags); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &abort_list, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); return; } /** * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA * @phba: pointer to lpfc hba data structure. * * This routine is used to clean up all the outstanding ELS commands on a * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() * routine. After that, it walks the ELS transmit queue to remove all the * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For * the IOCBs with the completion callback function associated, the callback * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion * callback function associated, the IOCB will simply be released. Finally, * it walks the ELS transmit completion queue to issue an abort IOCB to any * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the * management plane IOCBs that are not part of the discovery state machine) * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. **/ void lpfc_els_flush_all_cmd(struct lpfc_hba *phba) { struct lpfc_vport *vport; spin_lock_irq(&phba->port_list_lock); list_for_each_entry(vport, &phba->port_list, listentry) lpfc_els_flush_cmd(vport); spin_unlock_irq(&phba->port_list_lock); return; } /** * lpfc_send_els_failure_event - Posts an ELS command failure event * @phba: Pointer to hba context object. * @cmdiocbp: Pointer to command iocb which reported error. * @rspiocbp: Pointer to response iocb which reported error. * * This function sends an event when there is an ELS command * failure. **/ void lpfc_send_els_failure_event(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbp, struct lpfc_iocbq *rspiocbp) { struct lpfc_vport *vport = cmdiocbp->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_lsrjt_event lsrjt_event; struct lpfc_fabric_event_header fabric_event; struct ls_rjt stat; struct lpfc_nodelist *ndlp; uint32_t *pcmd; u32 ulp_status, ulp_word4; ndlp = cmdiocbp->ndlp; if (!ndlp) return; ulp_status = get_job_ulpstatus(phba, rspiocbp); ulp_word4 = get_job_word4(phba, rspiocbp); if (ulp_status == IOSTAT_LS_RJT) { lsrjt_event.header.event_type = FC_REG_ELS_EVENT; lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt; lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(lsrjt_event), (char *)&lsrjt_event, LPFC_NL_VENDOR_ID); return; } if (ulp_status == IOSTAT_NPORT_BSY || ulp_status == IOSTAT_FABRIC_BSY) { fabric_event.event_type = FC_REG_FABRIC_EVENT; if (ulp_status == IOSTAT_NPORT_BSY) fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; else fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; memcpy(fabric_event.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(fabric_event), (char *)&fabric_event, LPFC_NL_VENDOR_ID); return; } } /** * lpfc_send_els_event - Posts unsolicited els event * @vport: Pointer to vport object. * @ndlp: Pointer FC node object. * @payload: ELS command code type. * * This function posts an event when there is an incoming * unsolicited ELS command. **/ static void lpfc_send_els_event(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t *payload) { struct lpfc_els_event_header *els_data = NULL; struct lpfc_logo_event *logo_data = NULL; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (*payload == ELS_CMD_LOGO) { logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); if (!logo_data) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0148 Failed to allocate memory " "for LOGO event\n"); return; } els_data = &logo_data->header; } else { els_data = kmalloc(sizeof(struct lpfc_els_event_header), GFP_KERNEL); if (!els_data) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0149 Failed to allocate memory " "for ELS event\n"); return; } } els_data->event_type = FC_REG_ELS_EVENT; switch (*payload) { case ELS_CMD_PLOGI: els_data->subcategory = LPFC_EVENT_PLOGI_RCV; break; case ELS_CMD_PRLO: els_data->subcategory = LPFC_EVENT_PRLO_RCV; break; case ELS_CMD_ADISC: els_data->subcategory = LPFC_EVENT_ADISC_RCV; break; case ELS_CMD_LOGO: els_data->subcategory = LPFC_EVENT_LOGO_RCV; /* Copy the WWPN in the LOGO payload */ memcpy(logo_data->logo_wwpn, &payload[2], sizeof(struct lpfc_name)); break; default: kfree(els_data); return; } memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); if (*payload == ELS_CMD_LOGO) { fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(struct lpfc_logo_event), (char *)logo_data, LPFC_NL_VENDOR_ID); kfree(logo_data); } else { fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(struct lpfc_els_event_header), (char *)els_data, LPFC_NL_VENDOR_ID); kfree(els_data); } return; } DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, FC_FPIN_LI_EVT_TYPES_INIT); DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, FC_FPIN_DELI_EVT_TYPES_INIT); DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, FC_FPIN_CONGN_EVT_TYPES_INIT); DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, fc_fpin_congn_severity_types, FC_FPIN_CONGN_SEVERITY_INIT); /** * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port * @phba: Pointer to phba object. * @wwnlist: Pointer to list of WWPNs in FPIN payload * @cnt: count of WWPNs in FPIN payload * * This routine is called by LI and PC descriptors. * Limit the number of WWPNs displayed to 6 log messages, 6 per log message */ static void lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) { char buf[LPFC_FPIN_WWPN_LINE_SZ]; __be64 wwn; u64 wwpn; int i, len; int line = 0; int wcnt = 0; bool endit = false; len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); for (i = 0; i < cnt; i++) { /* Are we on the last WWPN */ if (i == (cnt - 1)) endit = true; /* Extract the next WWPN from the payload */ wwn = *wwnlist++; wwpn = be64_to_cpu(wwn); len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, " %016llx", wwpn); /* Log a message if we are on the last WWPN * or if we hit the max allowed per message. */ wcnt++; if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { buf[len] = 0; lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "4686 %s\n", buf); /* Check if we reached the last WWPN */ if (endit) return; /* Limit the number of log message displayed per FPIN */ line++; if (line == LPFC_FPIN_WWPN_NUM_LINE) { lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "4687 %d WWPNs Truncated\n", cnt - i - 1); return; } /* Start over with next log message */ wcnt = 0; len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Additional WWPNs:"); } } } /** * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. * @phba: Pointer to phba object. * @tlv: Pointer to the Link Integrity Notification Descriptor. * * This function processes a Link Integrity FPIN event by logging a message. **/ static void lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) { struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; const char *li_evt_str; u32 li_evt, cnt; li_evt = be16_to_cpu(li->event_type); li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); cnt = be32_to_cpu(li->pname_count); lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "4680 FPIN Link Integrity %s (x%x) " "Detecting PN x%016llx Attached PN x%016llx " "Duration %d mSecs Count %d Port Cnt %d\n", li_evt_str, li_evt, be64_to_cpu(li->detecting_wwpn), be64_to_cpu(li->attached_wwpn), be32_to_cpu(li->event_threshold), be32_to_cpu(li->event_count), cnt); lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); } /** * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. * @phba: Pointer to hba object. * @tlv: Pointer to the Delivery Notification Descriptor TLV * * This function processes a Delivery FPIN event by logging a message. **/ static void lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) { struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; const char *del_rsn_str; u32 del_rsn; __be32 *frame; del_rsn = be16_to_cpu(del->deli_reason_code); del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); /* Skip over desc_tag/desc_len header to payload */ frame = (__be32 *)(del + 1); lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "4681 FPIN Delivery %s (x%x) " "Detecting PN x%016llx Attached PN x%016llx " "DiscHdr0 x%08x " "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " "DiscHdr4 x%08x DiscHdr5 x%08x\n", del_rsn_str, del_rsn, be64_to_cpu(del->detecting_wwpn), be64_to_cpu(del->attached_wwpn), be32_to_cpu(frame[0]), be32_to_cpu(frame[1]), be32_to_cpu(frame[2]), be32_to_cpu(frame[3]), be32_to_cpu(frame[4]), be32_to_cpu(frame[5])); } /** * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. * @phba: Pointer to hba object. * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV * * This function processes a Peer Congestion FPIN event by logging a message. **/ static void lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) { struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; const char *pc_evt_str; u32 pc_evt, cnt; pc_evt = be16_to_cpu(pc->event_type); pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); cnt = be32_to_cpu(pc->pname_count); lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, "4684 FPIN Peer Congestion %s (x%x) " "Duration %d mSecs " "Detecting PN x%016llx Attached PN x%016llx " "Impacted Port Cnt %d\n", pc_evt_str, pc_evt, be32_to_cpu(pc->event_period), be64_to_cpu(pc->detecting_wwpn), be64_to_cpu(pc->attached_wwpn), cnt); lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); } /** * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification * @phba: Pointer to hba object. * @tlv: Pointer to the Congestion Notification Descriptor TLV * * This function processes an FPIN Congestion Notifiction. The notification * could be an Alarm or Warning. This routine feeds that data into driver's * running congestion algorithm. It also processes the FPIN by * logging a message. It returns 1 to indicate deliver this message * to the upper layer or 0 to indicate don't deliver it. **/ static int lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) { struct lpfc_cgn_info *cp; struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; const char *cgn_evt_str; u32 cgn_evt; const char *cgn_sev_str; u32 cgn_sev; uint16_t value; u32 crc; bool nm_log = false; int rc = 1; cgn_evt = be16_to_cpu(cgn->event_type); cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); cgn_sev = cgn->severity; cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); /* The driver only takes action on a Credit Stall or Oversubscription * event type to engage the IO algorithm. The driver prints an * unmaskable message only for Lost Credit and Credit Stall. * TODO: Still need to have definition of host action on clear, * lost credit and device specific event types. */ switch (cgn_evt) { case FPIN_CONGN_LOST_CREDIT: nm_log = true; break; case FPIN_CONGN_CREDIT_STALL: nm_log = true; fallthrough; case FPIN_CONGN_OVERSUBSCRIPTION: if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) nm_log = false; switch (cgn_sev) { case FPIN_CONGN_SEVERITY_ERROR: /* Take action here for an Alarm event */ if (phba->cmf_active_mode != LPFC_CFG_OFF) { if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { /* Track of alarm cnt for SYNC_WQE */ atomic_inc(&phba->cgn_sync_alarm_cnt); } /* Track alarm cnt for cgn_info regardless * of whether CMF is configured for Signals * or FPINs. */ atomic_inc(&phba->cgn_fabric_alarm_cnt); goto cleanup; } break; case FPIN_CONGN_SEVERITY_WARNING: /* Take action here for a Warning event */ if (phba->cmf_active_mode != LPFC_CFG_OFF) { if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { /* Track of warning cnt for SYNC_WQE */ atomic_inc(&phba->cgn_sync_warn_cnt); } /* Track warning cnt and freq for cgn_info * regardless of whether CMF is configured for * Signals or FPINs. */ atomic_inc(&phba->cgn_fabric_warn_cnt); cleanup: /* Save frequency in ms */ phba->cgn_fpin_frequency = be32_to_cpu(cgn->event_period); value = phba->cgn_fpin_frequency; if (phba->cgn_i) { cp = (struct lpfc_cgn_info *) phba->cgn_i->virt; cp->cgn_alarm_freq = cpu_to_le16(value); cp->cgn_warn_freq = cpu_to_le16(value); crc = lpfc_cgn_calc_crc32 (cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); cp->cgn_info_crc = cpu_to_le32(crc); } /* Don't deliver to upper layer since * driver took action on this tlv. */ rc = 0; } break; } break; } /* Change the log level to unmaskable for the following event types. */ lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), LOG_CGN_MGMT | LOG_ELS, "4683 FPIN CONGESTION %s type %s (x%x) Event " "Duration %d mSecs\n", cgn_sev_str, cgn_evt_str, cgn_evt, be32_to_cpu(cgn->event_period)); return rc; } void lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) { struct lpfc_hba *phba = vport->phba; struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; const char *dtag_nm; int desc_cnt = 0, bytes_remain, cnt; u32 dtag, deliver = 0; int len; /* FPINs handled only if we are in the right discovery state */ if (vport->port_state < LPFC_DISC_AUTH) return; /* make sure there is the full fpin header */ if (fpin_length < sizeof(struct fc_els_fpin)) return; /* Sanity check descriptor length. The desc_len value does not * include space for the ELS command and the desc_len fields. */ len = be32_to_cpu(fpin->desc_len); if (fpin_length < len + sizeof(struct fc_els_fpin)) { lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, "4671 Bad ELS FPIN length %d: %d\n", len, fpin_length); return; } tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; first_tlv = tlv; bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); /* process each descriptor separately */ while (bytes_remain >= FC_TLV_DESC_HDR_SZ && bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { dtag = be32_to_cpu(tlv->desc_tag); switch (dtag) { case ELS_DTAG_LNK_INTEGRITY: lpfc_els_rcv_fpin_li(phba, tlv); deliver = 1; break; case ELS_DTAG_DELIVERY: lpfc_els_rcv_fpin_del(phba, tlv); deliver = 1; break; case ELS_DTAG_PEER_CONGEST: lpfc_els_rcv_fpin_peer_cgn(phba, tlv); deliver = 1; break; case ELS_DTAG_CONGESTION: deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); break; default: dtag_nm = lpfc_get_tlv_dtag_nm(dtag); lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, "4678 unknown FPIN descriptor[%d]: " "tag x%x (%s)\n", desc_cnt, dtag, dtag_nm); /* If descriptor is bad, drop the rest of the data */ return; } lpfc_cgn_update_stat(phba, dtag); cnt = be32_to_cpu(tlv->desc_len); /* Sanity check descriptor length. The desc_len value does not * include space for the desc_tag and the desc_len fields. */ len -= (cnt + sizeof(struct fc_tlv_desc)); if (len < 0) { dtag_nm = lpfc_get_tlv_dtag_nm(dtag); lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, "4672 Bad FPIN descriptor TLV length " "%d: %d %d %s\n", cnt, len, fpin_length, dtag_nm); return; } current_tlv = tlv; bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); tlv = fc_tlv_next_desc(tlv); /* Format payload such that the FPIN delivered to the * upper layer is a single descriptor FPIN. */ if (desc_cnt) memcpy(first_tlv, current_tlv, (cnt + sizeof(struct fc_els_fpin))); /* Adjust the length so that it only reflects a * single descriptor FPIN. */ fpin_length = cnt + sizeof(struct fc_els_fpin); fpin->desc_len = cpu_to_be32(fpin_length); fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ /* Send every descriptor individually to the upper layer */ if (deliver) fc_host_fpin_rcv(lpfc_shost_from_vport(vport), fpin_length, (char *)fpin, 0); desc_cnt++; } } /** * lpfc_els_unsol_buffer - Process an unsolicited event data buffer * @phba: pointer to lpfc hba data structure. * @pring: pointer to a SLI ring. * @vport: pointer to a host virtual N_Port data structure. * @elsiocb: pointer to lpfc els command iocb data structure. * * This routine is used for processing the IOCB associated with a unsolicited * event. It first determines whether there is an existing ndlp that matches * the DID from the unsolicited IOCB. If not, it will create a new one with * the DID from the unsolicited IOCB. The ELS command from the unsolicited * IOCB is then used to invoke the proper routine and to set up proper state * of the discovery state machine. **/ static void lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) { struct lpfc_nodelist *ndlp; struct ls_rjt stat; u32 *payload, payload_len; u32 cmd = 0, did = 0, newnode, status = 0; uint8_t rjt_exp, rjt_err = 0, init_link = 0; struct lpfc_wcqe_complete *wcqe_cmpl = NULL; LPFC_MBOXQ_t *mbox; if (!vport || !elsiocb->cmd_dmabuf) goto dropit; newnode = 0; wcqe_cmpl = &elsiocb->wcqe_cmpl; payload = elsiocb->cmd_dmabuf->virt; if (phba->sli_rev == LPFC_SLI_REV4) payload_len = wcqe_cmpl->total_data_placed; else payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; status = get_job_ulpstatus(phba, elsiocb); cmd = *payload; if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) lpfc_sli3_post_buffer(phba, pring, 1); did = get_job_els_rsp64_did(phba, elsiocb); if (status) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV Unsol ELS: status:x%x/x%x did:x%x", status, get_job_word4(phba, elsiocb), did); goto dropit; } /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) goto dropit; /* Ignore traffic received during vport shutdown. */ if (vport->load_flag & FC_UNLOADING) goto dropit; /* If NPort discovery is delayed drop incoming ELS */ if ((vport->fc_flag & FC_DISC_DELAYED) && (cmd != ELS_CMD_PLOGI)) goto dropit; ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { /* Cannot find existing Fabric ndlp, so allocate a new one */ ndlp = lpfc_nlp_init(vport, did); if (!ndlp) goto dropit; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); newnode = 1; if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) ndlp->nlp_type |= NLP_FABRIC; } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); newnode = 1; } phba->fc_stat.elsRcvFrame++; /* * Do not process any unsolicited ELS commands * if the ndlp is in DEV_LOSS */ spin_lock_irq(&ndlp->lock); if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { spin_unlock_irq(&ndlp->lock); if (newnode) lpfc_nlp_put(ndlp); goto dropit; } spin_unlock_irq(&ndlp->lock); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) goto dropit; elsiocb->vport = vport; if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { cmd &= ELS_CMD_MASK; } /* ELS command <elsCmd> received from NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0112 ELS command x%x received from NPORT x%x " "refcnt %d Data: x%x x%x x%x x%x\n", cmd, did, kref_read(&ndlp->kref), vport->port_state, vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && (cmd != ELS_CMD_FLOGI) && !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { rjt_err = LSRJT_LOGICAL_BSY; rjt_exp = LSEXP_NOTHING_MORE; goto lsrjt; } switch (cmd) { case ELS_CMD_PLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV PLOGI: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPLOGI++; ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); if (phba->sli_rev == LPFC_SLI_REV4 && (phba->pport->fc_flag & FC_PT2PT)) { vport->fc_prevDID = vport->fc_myDID; /* Our DID needs to be updated before registering * the vfi. This is done in lpfc_rcv_plogi but * that is called after the reg_vfi. */ vport->fc_myDID = bf_get(els_rsp64_sid, &elsiocb->wqe.xmit_els_rsp); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3312 Remote port assigned DID x%x " "%x\n", vport->fc_myDID, vport->fc_prevDID); } lpfc_send_els_event(vport, ndlp, payload); /* If Nport discovery is delayed, reject PLOGIs */ if (vport->fc_flag & FC_DISC_DELAYED) { rjt_err = LSRJT_UNABLE_TPC; rjt_exp = LSEXP_NOTHING_MORE; break; } if (vport->port_state < LPFC_DISC_AUTH) { if (!(phba->pport->fc_flag & FC_PT2PT) || (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { rjt_err = LSRJT_UNABLE_TPC; rjt_exp = LSEXP_NOTHING_MORE; break; } } spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; spin_unlock_irq(&ndlp->lock); lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); break; case ELS_CMD_FLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV FLOGI: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFLOGI++; /* If the driver believes fabric discovery is done and is ready, * bounce the link. There is some descrepancy. */ if (vport->port_state >= LPFC_LOCAL_CFG_LINK && vport->fc_flag & FC_PT2PT && vport->rcv_flogi_cnt >= 1) { rjt_err = LSRJT_LOGICAL_BSY; rjt_exp = LSEXP_NOTHING_MORE; init_link++; goto lsrjt; } lpfc_els_rcv_flogi(vport, elsiocb, ndlp); /* retain node if our response is deferred */ if (phba->defer_flogi_acc_flag) break; if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); break; case ELS_CMD_LOGO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV LOGO: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvLOGO++; lpfc_send_els_event(vport, ndlp, payload); if (vport->port_state < LPFC_DISC_AUTH) { rjt_err = LSRJT_UNABLE_TPC; rjt_exp = LSEXP_NOTHING_MORE; break; } lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); break; case ELS_CMD_PRLO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV PRLO: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLO++; lpfc_send_els_event(vport, ndlp, payload); if (vport->port_state < LPFC_DISC_AUTH) { rjt_err = LSRJT_UNABLE_TPC; rjt_exp = LSEXP_NOTHING_MORE; break; } lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); break; case ELS_CMD_LCB: phba->fc_stat.elsRcvLCB++; lpfc_els_rcv_lcb(vport, elsiocb, ndlp); break; case ELS_CMD_RDP: phba->fc_stat.elsRcvRDP++; lpfc_els_rcv_rdp(vport, elsiocb, ndlp); break; case ELS_CMD_RSCN: phba->fc_stat.elsRcvRSCN++; lpfc_els_rcv_rscn(vport, elsiocb, ndlp); if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); break; case ELS_CMD_ADISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV ADISC: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); lpfc_send_els_event(vport, ndlp, payload); phba->fc_stat.elsRcvADISC++; if (vport->port_state < LPFC_DISC_AUTH) { rjt_err = LSRJT_UNABLE_TPC; rjt_exp = LSEXP_NOTHING_MORE; break; } lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_ADISC); break; case ELS_CMD_PDISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV PDISC: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPDISC++; if (vport->port_state < LPFC_DISC_AUTH) { rjt_err = LSRJT_UNABLE_TPC; rjt_exp = LSEXP_NOTHING_MORE; break; } lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PDISC); break; case ELS_CMD_FARPR: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV FARPR: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFARPR++; lpfc_els_rcv_farpr(vport, elsiocb, ndlp); break; case ELS_CMD_FARP: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV FARP: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFARP++; lpfc_els_rcv_farp(vport, elsiocb, ndlp); break; case ELS_CMD_FAN: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV FAN: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFAN++; lpfc_els_rcv_fan(vport, elsiocb, ndlp); break; case ELS_CMD_PRLI: case ELS_CMD_NVMEPRLI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV PRLI: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLI++; if ((vport->port_state < LPFC_DISC_AUTH) && (vport->fc_flag & FC_FABRIC)) { rjt_err = LSRJT_UNABLE_TPC; rjt_exp = LSEXP_NOTHING_MORE; break; } lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); break; case ELS_CMD_LIRR: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV LIRR: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvLIRR++; lpfc_els_rcv_lirr(vport, elsiocb, ndlp); if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); break; case ELS_CMD_RLS: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RLS: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRLS++; lpfc_els_rcv_rls(vport, elsiocb, ndlp); if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); break; case ELS_CMD_RPL: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RPL: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRPL++; lpfc_els_rcv_rpl(vport, elsiocb, ndlp); if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); break; case ELS_CMD_RNID: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RNID: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRNID++; lpfc_els_rcv_rnid(vport, elsiocb, ndlp); if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); break; case ELS_CMD_RTV: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RTV: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRTV++; lpfc_els_rcv_rtv(vport, elsiocb, ndlp); if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); break; case ELS_CMD_RRQ: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RRQ: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRRQ++; lpfc_els_rcv_rrq(vport, elsiocb, ndlp); if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); break; case ELS_CMD_ECHO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV ECHO: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvECHO++; lpfc_els_rcv_echo(vport, elsiocb, ndlp); if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); break; case ELS_CMD_REC: /* receive this due to exchange closed */ rjt_err = LSRJT_UNABLE_TPC; rjt_exp = LSEXP_INVALID_OX_RX; break; case ELS_CMD_FPIN: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV FPIN: did:x%x/ste:x%x flg:x%x", did, vport->port_state, ndlp->nlp_flag); lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, payload_len); /* There are no replies, so no rjt codes */ break; case ELS_CMD_EDC: lpfc_els_rcv_edc(vport, elsiocb, ndlp); break; case ELS_CMD_RDF: phba->fc_stat.elsRcvRDF++; /* Accept RDF only from fabric controller */ if (did != Fabric_Cntl_DID) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, "1115 Received RDF from invalid DID " "x%x\n", did); rjt_err = LSRJT_PROTOCOL_ERR; rjt_exp = LSEXP_NOTHING_MORE; goto lsrjt; } lpfc_els_rcv_rdf(vport, elsiocb, ndlp); break; default: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", cmd, did, vport->port_state); /* Unsupported ELS command, reject */ rjt_err = LSRJT_CMD_UNSUPPORTED; rjt_exp = LSEXP_NOTHING_MORE; /* Unknown ELS command <elsCmd> received from NPORT <did> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0115 Unknown ELS command x%x " "received from NPORT x%x\n", cmd, did); if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); break; } lsrjt: /* check if need to LS_RJT received ELS cmd */ if (rjt_err) { memset(&stat, 0, sizeof(stat)); stat.un.b.lsRjtRsnCode = rjt_err; stat.un.b.lsRjtRsnCodeExp = rjt_exp; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, NULL); /* Remove the reference from above for new nodes. */ if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } /* Release the reference on this elsiocb, not the ndlp. */ lpfc_nlp_put(elsiocb->ndlp); elsiocb->ndlp = NULL; /* Special case. Driver received an unsolicited command that * unsupportable given the driver's current state. Reset the * link and start over. */ if (init_link) { mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return; lpfc_linkdown(phba); lpfc_init_link(phba, mbox, phba->cfg_topology, phba->cfg_link_speed); mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED) mempool_free(mbox, phba->mbox_mem_pool); } return; dropit: if (vport && !(vport->load_flag & FC_UNLOADING)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0111 Dropping received ELS cmd " "Data: x%x x%x x%x x%x\n", cmd, status, get_job_word4(phba, elsiocb), did); phba->fc_stat.elsRcvDrop++; } /** * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring * @phba: pointer to lpfc hba data structure. * @pring: pointer to a SLI ring. * @elsiocb: pointer to lpfc els iocb data structure. * * This routine is used to process an unsolicited event received from a SLI * (Service Level Interface) ring. The actual processing of the data buffer * associated with the unsolicited event is done by invoking the routine * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the * SLI ring on which the unsolicited event was received. **/ void lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *elsiocb) { struct lpfc_vport *vport = elsiocb->vport; u32 ulp_command, status, parameter, bde_count = 0; IOCB_t *icmd; struct lpfc_wcqe_complete *wcqe_cmpl = NULL; struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf; struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf; dma_addr_t paddr; elsiocb->cmd_dmabuf = NULL; elsiocb->rsp_dmabuf = NULL; elsiocb->bpl_dmabuf = NULL; wcqe_cmpl = &elsiocb->wcqe_cmpl; ulp_command = get_job_cmnd(phba, elsiocb); status = get_job_ulpstatus(phba, elsiocb); parameter = get_job_word4(phba, elsiocb); if (phba->sli_rev == LPFC_SLI_REV4) bde_count = wcqe_cmpl->word3; else bde_count = elsiocb->iocb.ulpBdeCount; if (status == IOSTAT_NEED_BUFFER) { lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); } else if (status == IOSTAT_LOCAL_REJECT && (parameter & IOERR_PARAM_MASK) == IOERR_RCV_BUFFER_WAITING) { phba->fc_stat.NoRcvBuf++; /* Not enough posted buffers; Try posting more buffers */ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) lpfc_sli3_post_buffer(phba, pring, 0); return; } if (phba->sli_rev == LPFC_SLI_REV3) { icmd = &elsiocb->iocb; if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (ulp_command == CMD_IOCB_RCV_ELS64_CX || ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { if (icmd->unsli3.rcvsli3.vpi == 0xffff) vport = phba->pport; else vport = lpfc_find_vport_by_vpid(phba, icmd->unsli3.rcvsli3.vpi); } } /* If there are no BDEs associated * with this IOCB, there is nothing to do. */ if (bde_count == 0) return; /* Account for SLI2 or SLI3 and later unsolicited buffering */ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { elsiocb->cmd_dmabuf = bdeBuf1; if (bde_count == 2) elsiocb->bpl_dmabuf = bdeBuf2; } else { icmd = &elsiocb->iocb; paddr = getPaddr(icmd->un.cont64[0].addrHigh, icmd->un.cont64[0].addrLow); elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, paddr); if (bde_count == 2) { paddr = getPaddr(icmd->un.cont64[1].addrHigh, icmd->un.cont64[1].addrLow); elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, paddr); } } lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); /* * The different unsolicited event handlers would tell us * if they are done with "mp" by setting cmd_dmabuf to NULL. */ if (elsiocb->cmd_dmabuf) { lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); elsiocb->cmd_dmabuf = NULL; } if (elsiocb->bpl_dmabuf) { lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf); elsiocb->bpl_dmabuf = NULL; } } static void lpfc_start_fdmi(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; /* If this is the first time, allocate an ndlp and initialize * it. Otherwise, make sure the node is enabled and then do the * login. */ ndlp = lpfc_findnode_did(vport, FDMI_DID); if (!ndlp) { ndlp = lpfc_nlp_init(vport, FDMI_DID); if (ndlp) { ndlp->nlp_type |= NLP_FABRIC; } else { return; } } lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } /** * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr * @phba: pointer to lpfc hba data structure. * @vport: pointer to a virtual N_Port data structure. * * This routine issues a Port Login (PLOGI) to the Name Server with * State Change Request (SCR) for a @vport. This routine will create an * ndlp for the Name Server associated to the @vport if such node does * not already exist. The PLOGI to Name Server is issued by invoking the * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface * (FDMI) is configured to the @vport, a FDMI node will be created and * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. **/ void lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); /* * If lpfc_delay_discovery parameter is set and the clean address * bit is cleared and fc fabric parameters chenged, delay FC NPort * discovery. */ spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_DISC_DELAYED) { spin_unlock_irq(shost->host_lock); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "3334 Delay fc port discovery for %d secs\n", phba->fc_ratov); mod_timer(&vport->delayed_disc_tmo, jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); return; } spin_unlock_irq(shost->host_lock); ndlp = lpfc_findnode_did(vport, NameServer_DID); if (!ndlp) { ndlp = lpfc_nlp_init(vport, NameServer_DID); if (!ndlp) { if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { lpfc_disc_start(vport); return; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0251 NameServer login: no memory\n"); return; } } ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0252 Cannot issue NameServer login\n"); return; } if ((phba->cfg_enable_SmartSAN || (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && (vport->load_flag & FC_ALLOW_FDMI)) lpfc_start_fdmi(vport); } /** * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * This routine is the completion callback function to register new vport * mailbox command. If the new vport mailbox command completes successfully, * the fabric registration login shall be performed on physical port (the * new vport created is actually a physical port, with VPI 0) or the port * login to Name Server for State Change Request (SCR) will be performed * on virtual port (real virtual port, with VPI greater than 0). **/ static void lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; MAILBOX_t *mb = &pmb->u.mb; int rc; spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); if (mb->mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0915 Register VPI failed : Status: x%x" " upd bit: x%x \n", mb->mbxStatus, mb->un.varRegVpi.upd); if (phba->sli_rev == LPFC_SLI_REV4 && mb->un.varRegVpi.upd) goto mbox_err_exit ; switch (mb->mbxStatus) { case 0x11: /* unsupported feature */ case 0x9603: /* max_vpi exceeded */ case 0x9602: /* Link event since CLEAR_LA */ /* giving up on vport registration */ lpfc_vport_set_state(vport, FC_VPORT_FAILED); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); break; /* If reg_vpi fail with invalid VPI status, re-init VPI */ case 0x20: spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); lpfc_init_vpi(phba, pmb, vport->vpi); pmb->vport = vport; pmb->mbox_cmpl = lpfc_init_vpi_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2732 Failed to issue INIT_VPI" " mailbox command\n"); } else { lpfc_nlp_put(ndlp); return; } fallthrough; default: /* Try to recover from this error */ if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_unreg_all_rpis(vport); lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); if (mb->mbxStatus == MBX_NOT_FINISHED) break; if ((vport->port_type == LPFC_PHYSICAL_PORT) && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { if (phba->sli_rev == LPFC_SLI_REV4) lpfc_issue_init_vfi(vport); else lpfc_initial_flogi(vport); } else { lpfc_initial_fdisc(vport); } break; } } else { spin_lock_irq(shost->host_lock); vport->vpi_state |= LPFC_VPI_REGISTERED; spin_unlock_irq(shost->host_lock); if (vport == phba->pport) { if (phba->sli_rev < LPFC_SLI_REV4) lpfc_issue_fabric_reglogin(vport); else { /* * If the physical port is instantiated using * FDISC, do not start vport discovery. */ if (vport->port_state != LPFC_FDISC) lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } } else { lpfc_do_scr_ns_plogi(phba, vport); } } mbox_err_exit: /* Now, we decrement the ndlp reference count held for this * callback function */ lpfc_nlp_put(ndlp); mempool_free(pmb, phba->mbox_mem_pool); return; } /** * lpfc_register_new_vport - Register a new vport with a HBA * @phba: pointer to lpfc hba data structure. * @vport: pointer to a host virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. * * This routine registers the @vport as a new virtual port with a HBA. * It is done through a registering vpi mailbox command. **/ void lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); LPFC_MBOXQ_t *mbox; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_reg_vpi(vport, mbox); mbox->vport = vport; mbox->ctx_ndlp = lpfc_nlp_get(ndlp); if (!mbox->ctx_ndlp) { mempool_free(mbox, phba->mbox_mem_pool); goto mbox_err_exit; } mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED) { /* mailbox command not success, decrement ndlp * reference count for this command */ lpfc_nlp_put(ndlp); mempool_free(mbox, phba->mbox_mem_pool); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0253 Register VPI: Can't send mbox\n"); goto mbox_err_exit; } } else { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0254 Register VPI: no memory\n"); goto mbox_err_exit; } return; mbox_err_exit: lpfc_vport_set_state(vport, FC_VPORT_FAILED); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); return; } /** * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer * @phba: pointer to lpfc hba data structure. * * This routine cancels the retry delay timers to all the vports. **/ void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; uint32_t link_state; int i; /* Treat this failure as linkdown for all vports */ link_state = phba->link_state; lpfc_linkdown(phba); phba->link_state = link_state; vports = lpfc_create_vport_work_array(phba); if (vports) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { ndlp = lpfc_findnode_did(vports[i], Fabric_DID); if (ndlp) lpfc_cancel_retry_delay_tmo(vports[i], ndlp); lpfc_els_flush_cmd(vports[i]); } lpfc_destroy_vport_work_array(phba, vports); } } /** * lpfc_retry_pport_discovery - Start timer to retry FLOGI. * @phba: pointer to lpfc hba data structure. * * This routine abort all pending discovery commands and * start a timer to retry FLOGI for the physical port * discovery. **/ void lpfc_retry_pport_discovery(struct lpfc_hba *phba) { struct lpfc_nodelist *ndlp; /* Cancel the all vports retry delay retry timers */ lpfc_cancel_all_vport_retry_delay_timer(phba); /* If fabric require FLOGI, then re-instantiate physical login */ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); if (!ndlp) return; mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(&ndlp->lock); ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; phba->pport->port_state = LPFC_FLOGI; return; } /** * lpfc_fabric_login_reqd - Check if FLOGI required. * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to FDISC command iocb. * @rspiocb: pointer to FDISC response iocb. * * This routine checks if a FLOGI is reguired for FDISC * to succeed. **/ static int lpfc_fabric_login_reqd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); if (ulp_status != IOSTAT_FABRIC_RJT || ulp_word4 != RJT_LOGIN_REQUIRED) return 0; else return 1; } /** * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is the completion callback function to a Fabric Discover * (FDISC) ELS command. Since all the FDISC ELS commands are issued * single threaded, each FDISC completion callback function will reset * the discovery timer for all vports such that the timers will not get * unnecessary timeout. The function checks the FDISC IOCB status. If error * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID * assigned to the vport has been changed with the completion of the FDISC * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) * are unregistered from the HBA, and then the lpfc_register_new_vport() * routine is invoked to register new vport with the HBA. Otherwise, the * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name * Server for State Change Request (SCR). **/ static void lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp = cmdiocb->ndlp; struct lpfc_nodelist *np; struct lpfc_nodelist *next_np; struct lpfc_iocbq *piocb; struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; struct serv_parm *sp; uint8_t fabric_param_changed; u32 ulp_status, ulp_word4; ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0123 FDISC completes. x%x/x%x prevDID: x%x\n", ulp_status, ulp_word4, vport->fc_prevDID); /* Since all FDISCs are being single threaded, we * must reset the discovery timer for ALL vports * waiting to send FDISC when one completes. */ list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { lpfc_set_disctmo(piocb->vport); } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "FDISC cmpl: status:x%x/x%x prevdid:x%x", ulp_status, ulp_word4, vport->fc_prevDID); if (ulp_status) { if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { lpfc_retry_pport_discovery(phba); goto out; } /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) goto out; /* FDISC failed */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0126 FDISC failed. (x%x/x%x)\n", ulp_status, ulp_word4); goto fdisc_failed; } lpfc_check_nlp_post_devloss(vport, ndlp); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VPORT_CVL_RCVD; vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; vport->fc_flag |= FC_FABRIC; if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) vport->fc_flag |= FC_PUBLIC_LOOP; spin_unlock_irq(shost->host_lock); vport->fc_myDID = ulp_word4 & Mask_DID; lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); if (!prsp) goto out; sp = prsp->virt + sizeof(uint32_t); fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); memcpy(&vport->fabric_portname, &sp->portName, sizeof(struct lpfc_name)); memcpy(&vport->fabric_nodename, &sp->nodeName, sizeof(struct lpfc_name)); if (fabric_param_changed && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { /* If our NportID changed, we need to ensure all * remaining NPORTs get unreg_login'ed so we can * issue unreg_vpi. */ list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { if ((np->nlp_state != NLP_STE_NPR_NODE) || !(np->nlp_flag & NLP_NPR_ADISC)) continue; spin_lock_irq(&ndlp->lock); np->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(&ndlp->lock); lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_unreg_all_rpis(vport); lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; if (phba->sli_rev == LPFC_SLI_REV4) vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; else vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; spin_unlock_irq(shost->host_lock); } else if ((phba->sli_rev == LPFC_SLI_REV4) && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { /* * Driver needs to re-reg VPI in order for f/w * to update the MAC address. */ lpfc_register_new_vport(phba, vport, ndlp); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); goto out; } if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) lpfc_issue_init_vpi(vport); else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) lpfc_register_new_vport(phba, vport, ndlp); else lpfc_do_scr_ns_plogi(phba, vport); /* The FDISC completed successfully. Move the fabric ndlp to * UNMAPPED state and register with the transport. */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); goto out; fdisc_failed: if (vport->fc_vport && (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) lpfc_vport_set_state(vport, FC_VPORT_FAILED); /* Cancel discovery timer */ lpfc_can_disctmo(vport); out: lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } /** * lpfc_issue_els_fdisc - Issue a fdisc iocb command * @vport: pointer to a virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. * @retry: number of retries to the command IOCB. * * This routine prepares and issues a Fabric Discover (FDISC) IOCB to * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() * routine to issue the IOCB, which makes sure only one outstanding fabric * IOCB will be sent off HBA at any given time. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the FDISC ELS command. * * Return code * 0 - Successfully issued fdisc iocb command * 1 - Failed to issue fdisc iocb command **/ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry) { struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; union lpfc_wqe128 *wqe = NULL; struct lpfc_iocbq *elsiocb; struct serv_parm *sp; uint8_t *pcmd; uint16_t cmdsize; int did = ndlp->nlp_DID; int rc; vport->port_state = LPFC_FDISC; vport->fc_myDID = 0; cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, ELS_CMD_FDISC); if (!elsiocb) { lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0255 Issue FDISC: no IOCB\n"); return 1; } if (phba->sli_rev == LPFC_SLI_REV4) { wqe = &elsiocb->wqe; bf_set(els_req64_sid, &wqe->els_req, 0); bf_set(els_req64_sp, &wqe->els_req, 1); } else { icmd = &elsiocb->iocb; icmd->un.elsreq64.myID = 0; icmd->un.elsreq64.fl = 1; icmd->ulpCt_h = 1; icmd->ulpCt_l = 0; } pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; pcmd += sizeof(uint32_t); /* CSP Word 1 */ memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); sp = (struct serv_parm *) pcmd; /* Setup CSPs accordingly for Fabric */ sp->cmn.e_d_tov = 0; sp->cmn.w2.r_a_tov = 0; sp->cmn.virtual_fabric_support = 0; sp->cls1.classValid = 0; sp->cls2.seqDelivery = 1; sp->cls3.seqDelivery = 1; pcmd += sizeof(uint32_t); /* CSP Word 2 */ pcmd += sizeof(uint32_t); /* CSP Word 3 */ pcmd += sizeof(uint32_t); /* CSP Word 4 */ pcmd += sizeof(uint32_t); /* Port Name */ memcpy(pcmd, &vport->fc_portname, 8); pcmd += sizeof(uint32_t); /* Node Name */ pcmd += sizeof(uint32_t); /* Node Name */ memcpy(pcmd, &vport->fc_nodename, 8); sp->cmn.valid_vendor_ver_level = 0; memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); lpfc_set_disctmo(vport); phba->fc_stat.elsXmitFDISC++; elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue FDISC: did:x%x", did, 0, 0); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) goto err_out; rc = lpfc_issue_fabric_iocb(phba, elsiocb); if (rc == IOCB_ERROR) { lpfc_nlp_put(ndlp); goto err_out; } lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); return 0; err_out: lpfc_els_free_iocb(phba, elsiocb); lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0256 Issue FDISC: Cannot send IOCB\n"); return 1; } /** * lpfc_cmpl_els_npiv_logo - Completion function with vport logo * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is the completion callback function to the issuing of a LOGO * ELS command off a vport. It frees the command IOCB and then decrement the * reference count held on ndlp for this completion function, indicating that * the reference to the ndlp is no long needed. Note that the * lpfc_els_free_iocb() routine decrements the ndlp reference held for this * callback function and an additional explicit ndlp reference decrementation * will trigger the actual release of the ndlp. **/ static void lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; IOCB_t *irsp; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); u32 ulp_status, ulp_word4, did, tmo; ndlp = cmdiocb->ndlp; ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); if (phba->sli_rev == LPFC_SLI_REV4) { did = get_job_els_rsp64_did(phba, cmdiocb); tmo = get_wqe_tmo(cmdiocb); } else { irsp = &rspiocb->iocb; did = get_job_els_rsp64_did(phba, rspiocb); tmo = irsp->ulpTimeout; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "LOGO npiv cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, did); /* NPIV LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2928 NPIV LOGO completes to NPort x%x " "Data: x%x x%x x%x x%x x%x x%x x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4, tmo, vport->num_disc_nodes, kref_read(&ndlp->kref), ndlp->nlp_flag, ndlp->fc4_xpt_flags); if (ulp_status == IOSTAT_SUCCESS) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; vport->fc_flag &= ~FC_FABRIC; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); } if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { /* Wake up lpfc_vport_delete if waiting...*/ if (ndlp->logo_waitq) wake_up(ndlp->logo_waitq); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; spin_unlock_irq(&ndlp->lock); } /* Safe to release resources now. */ lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } /** * lpfc_issue_els_npiv_logo - Issue a logo off a vport * @vport: pointer to a virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. * * This routine issues a LOGO ELS command to an @ndlp off a @vport. * * Note that the ndlp reference count will be incremented by 1 for holding the * ndlp and the reference to ndlp will be stored into the ndlp field of * the IOCB for the completion callback function to the LOGO ELS command. * * Return codes * 0 - Successfully issued logo off the @vport * 1 - Failed to issue logo off the @vport **/ int lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { int rc = 0; struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; uint16_t cmdsize; cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, ELS_CMD_LOGO); if (!elsiocb) return 1; pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; pcmd += sizeof(uint32_t); /* Fill in LOGO payload */ *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); pcmd += sizeof(uint32_t); memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue LOGO npiv did:x%x flg:x%x", ndlp->nlp_DID, ndlp->nlp_flag, 0); elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_LOGO_SND; spin_unlock_irq(&ndlp->lock); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); goto err; } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); goto err; } return 0; err: spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_LOGO_SND; spin_unlock_irq(&ndlp->lock); return 1; } /** * lpfc_fabric_block_timeout - Handler function to the fabric block timer * @t: timer context used to obtain the lpfc hba. * * This routine is invoked by the fabric iocb block timer after * timeout. It posts the fabric iocb block timeout event by setting the * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes * lpfc_worker_wake_up() routine to wake up the worker thread. It is for * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the * posted event WORKER_FABRIC_BLOCK_TMO. **/ void lpfc_fabric_block_timeout(struct timer_list *t) { struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); unsigned long iflags; uint32_t tmo_posted; spin_lock_irqsave(&phba->pport->work_port_lock, iflags); tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; if (!tmo_posted) phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); if (!tmo_posted) lpfc_worker_wake_up(phba); return; } /** * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list * @phba: pointer to lpfc hba data structure. * * This routine issues one fabric iocb from the driver internal list to * the HBA. It first checks whether it's ready to issue one fabric iocb to * the HBA (whether there is no outstanding fabric iocb). If so, it shall * remove one pending fabric iocb from the driver internal list and invokes * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. **/ static void lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) { struct lpfc_iocbq *iocb; unsigned long iflags; int ret; repeat: iocb = NULL; spin_lock_irqsave(&phba->hbalock, iflags); /* Post any pending iocb to the SLI layer */ if (atomic_read(&phba->fabric_iocb_count) == 0) { list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), list); if (iocb) /* Increment fabric iocb count to hold the position */ atomic_inc(&phba->fabric_iocb_count); } spin_unlock_irqrestore(&phba->hbalock, iflags); if (iocb) { iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; iocb->cmd_flag |= LPFC_IO_FABRIC; lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, "Fabric sched1: ste:x%x", iocb->vport->port_state, 0, 0); ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); if (ret == IOCB_ERROR) { iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; iocb->fabric_cmd_cmpl = NULL; iocb->cmd_flag &= ~LPFC_IO_FABRIC; set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; iocb->cmd_cmpl(phba, iocb, iocb); atomic_dec(&phba->fabric_iocb_count); goto repeat; } } } /** * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command * @phba: pointer to lpfc hba data structure. * * This routine unblocks the issuing fabric iocb command. The function * will clear the fabric iocb block bit and then invoke the routine * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb * from the driver internal fabric iocb list. **/ void lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) { clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); lpfc_resume_fabric_iocbs(phba); return; } /** * lpfc_block_fabric_iocbs - Block issuing fabric iocb command * @phba: pointer to lpfc hba data structure. * * This routine blocks the issuing fabric iocb for a specified amount of * time (currently 100 ms). This is done by set the fabric iocb block bit * and set up a timeout timer for 100ms. When the block bit is set, no more * fabric iocb will be issued out of the HBA. **/ static void lpfc_block_fabric_iocbs(struct lpfc_hba *phba) { int blocked; blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); /* Start a timer to unblock fabric iocbs after 100ms */ if (!blocked) mod_timer(&phba->fabric_block_timer, jiffies + msecs_to_jiffies(100)); return; } /** * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @rspiocb: pointer to lpfc response iocb data structure. * * This routine is the callback function that is put to the fabric iocb's * callback function pointer (iocb->cmd_cmpl). The original iocb's callback * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback * function first restores and invokes the original iocb's callback function * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next * fabric bound iocb from the driver internal fabric iocb list onto the wire. **/ static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct ls_rjt stat; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); switch (ulp_status) { case IOSTAT_NPORT_RJT: case IOSTAT_FABRIC_RJT: if (ulp_word4 & RJT_UNAVAIL_TEMP) lpfc_block_fabric_iocbs(phba); break; case IOSTAT_NPORT_BSY: case IOSTAT_FABRIC_BSY: lpfc_block_fabric_iocbs(phba); break; case IOSTAT_LS_RJT: stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) lpfc_block_fabric_iocbs(phba); break; } BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; cmdiocb->fabric_cmd_cmpl = NULL; cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); atomic_dec(&phba->fabric_iocb_count); if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { /* Post any pending iocbs to HBA */ lpfc_resume_fabric_iocbs(phba); } } /** * lpfc_issue_fabric_iocb - Issue a fabric iocb command * @phba: pointer to lpfc hba data structure. * @iocb: pointer to lpfc command iocb data structure. * * This routine is used as the top-level API for issuing a fabric iocb command * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver * function makes sure that only one fabric bound iocb will be outstanding at * any given time. As such, this function will first check to see whether there * is already an outstanding fabric iocb on the wire. If so, it will put the * newly issued iocb onto the driver internal fabric iocb list, waiting to be * issued later. Otherwise, it will issue the iocb on the wire and update the * fabric iocb count it indicate that there is one fabric iocb on the wire. * * Note, this implementation has a potential sending out fabric IOCBs out of * order. The problem is caused by the construction of the "ready" boolen does * not include the condition that the internal fabric IOCB list is empty. As * such, it is possible a fabric IOCB issued by this routine might be "jump" * ahead of the fabric IOCBs in the internal list. * * Return code * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully * IOCB_ERROR - failed to issue fabric iocb **/ static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) { unsigned long iflags; int ready; int ret; BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); spin_lock_irqsave(&phba->hbalock, iflags); ready = atomic_read(&phba->fabric_iocb_count) == 0 && !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); if (ready) /* Increment fabric iocb count to hold the position */ atomic_inc(&phba->fabric_iocb_count); spin_unlock_irqrestore(&phba->hbalock, iflags); if (ready) { iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; iocb->cmd_flag |= LPFC_IO_FABRIC; lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, "Fabric sched2: ste:x%x", iocb->vport->port_state, 0, 0); ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); if (ret == IOCB_ERROR) { iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; iocb->fabric_cmd_cmpl = NULL; iocb->cmd_flag &= ~LPFC_IO_FABRIC; atomic_dec(&phba->fabric_iocb_count); } } else { spin_lock_irqsave(&phba->hbalock, iflags); list_add_tail(&iocb->list, &phba->fabric_iocb_list); spin_unlock_irqrestore(&phba->hbalock, iflags); ret = IOCB_SUCCESS; } return ret; } /** * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list * @vport: pointer to a virtual N_Port data structure. * * This routine aborts all the IOCBs associated with a @vport from the * driver internal fabric IOCB list. The list contains fabric IOCBs to be * issued to the ELS IOCB ring. This abort function walks the fabric IOCB * list, removes each IOCB associated with the @vport off the list, set the * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function * associated with the IOCB. **/ static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) { LIST_HEAD(completions); struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *tmp_iocb, *piocb; spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, list) { if (piocb->vport != vport) continue; list_move_tail(&piocb->list, &completions); } spin_unlock_irq(&phba->hbalock); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } /** * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list * @ndlp: pointer to a node-list data structure. * * This routine aborts all the IOCBs associated with an @ndlp from the * driver internal fabric IOCB list. The list contains fabric IOCBs to be * issued to the ELS IOCB ring. This abort function walks the fabric IOCB * list, removes each IOCB associated with the @ndlp off the list, set the * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function * associated with the IOCB. **/ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); struct lpfc_hba *phba = ndlp->phba; struct lpfc_iocbq *tmp_iocb, *piocb; struct lpfc_sli_ring *pring; pring = lpfc_phba_elsring(phba); if (unlikely(!pring)) return; spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, list) { if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { list_move_tail(&piocb->list, &completions); } } spin_unlock_irq(&phba->hbalock); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } /** * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list * @phba: pointer to lpfc hba data structure. * * This routine aborts all the IOCBs currently on the driver internal * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS * IOCB ring. This function takes the entire IOCB list off the fabric IOCB * list, removes IOCBs off the list, set the status field to * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with * the IOCB. **/ void lpfc_fabric_abort_hba(struct lpfc_hba *phba) { LIST_HEAD(completions); spin_lock_irq(&phba->hbalock); list_splice_init(&phba->fabric_iocb_list, &completions); spin_unlock_irq(&phba->hbalock); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } /** * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport * @vport: pointer to lpfc vport data structure. * * This routine is invoked by the vport cleanup for deletions and the cleanup * for an ndlp on removal. **/ void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; struct lpfc_nodelist *ndlp = NULL; unsigned long iflag = 0; spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); list_for_each_entry_safe(sglq_entry, sglq_next, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { lpfc_nlp_put(sglq_entry->ndlp); ndlp = sglq_entry->ndlp; sglq_entry->ndlp = NULL; /* If the xri on the abts_els_sgl list is for the Fport * node and the vport is unloading, the xri aborted wcqe * likely isn't coming back. Just release the sgl. */ if ((vport->load_flag & FC_UNLOADING) && ndlp->nlp_DID == Fabric_DID) { list_del(&sglq_entry->list); sglq_entry->state = SGL_FREED; list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_els_sgl_list); } } } spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); return; } /** * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort * @phba: pointer to lpfc hba data structure. * @axri: pointer to the els xri abort wcqe structure. * * This routine is invoked by the worker thread to process a SLI4 slow-path * ELS aborted xri. **/ void lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri) { uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); uint16_t lxri = 0; struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; unsigned long iflag = 0; struct lpfc_nodelist *ndlp; struct lpfc_sli_ring *pring; pring = lpfc_phba_elsring(phba); spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); list_for_each_entry_safe(sglq_entry, sglq_next, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { if (sglq_entry->sli4_xritag == xri) { list_del(&sglq_entry->list); ndlp = sglq_entry->ndlp; sglq_entry->ndlp = NULL; list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_els_sgl_list); sglq_entry->state = SGL_FREED; spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); if (ndlp) { lpfc_set_rrq_active(phba, ndlp, sglq_entry->sli4_lxritag, rxid, 1); lpfc_nlp_put(ndlp); } /* Check if TXQ queue needs to be serviced */ if (pring && !list_empty(&pring->txq)) lpfc_worker_wake_up(phba); return; } } spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); lxri = lpfc_sli4_xri_inrange(phba, xri); if (lxri == NO_XRI) return; spin_lock_irqsave(&phba->hbalock, iflag); sglq_entry = __lpfc_get_active_sglq(phba, lxri); if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { spin_unlock_irqrestore(&phba->hbalock, iflag); return; } sglq_entry->state = SGL_XRI_ABORTED; spin_unlock_irqrestore(&phba->hbalock, iflag); return; } /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. * @vport: pointer to virtual port object. * @ndlp: nodelist pointer for the impacted node. * * The driver calls this routine in response to an SLI4 XRI ABORT CQE * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, * the driver is required to send a LOGO to the remote node before it * attempts to recover its login to the remote node. */ void lpfc_sli_abts_recover_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost; struct lpfc_hba *phba; unsigned long flags = 0; shost = lpfc_shost_from_vport(vport); phba = vport->phba; if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3093 No rport recovery needed. " "rport in state 0x%x\n", ndlp->nlp_state); return; } lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3094 Start rport recovery on shost id 0x%x " "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " "flags 0x%x\n", shost->host_no, ndlp->nlp_DID, vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag); /* * The rport is not responding. Remove the FCP-2 flag to prevent * an ADISC in the follow-up recovery code. */ spin_lock_irqsave(&ndlp->lock, flags); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_flag |= NLP_ISSUE_LOGO; spin_unlock_irqrestore(&ndlp->lock, flags); lpfc_unreg_rpi(vport, ndlp); } static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) { bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); } static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) { u32 i; if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) return; for (i = min; i <= max; i++) set_bit(i, vport->vmid_priority_range); } static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) { set_bit(ctcl_vmid, vport->vmid_priority_range); } u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) { u32 i; i = find_first_bit(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); if (i == LPFC_VMID_MAX_PRIORITY_RANGE) return 0; clear_bit(i, vport->vmid_priority_range); return i; } #define MAX_PRIORITY_DESC 255 static void lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct priority_range_desc *desc; struct lpfc_dmabuf *prsp = NULL; struct lpfc_vmid_priority_range *vmid_range = NULL; u32 *data; struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); u8 *pcmd, max_desc; u32 len, i; struct lpfc_nodelist *ndlp = cmdiocb->ndlp; prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); if (!prsp) goto out; pcmd = prsp->virt; data = (u32 *)pcmd; if (data[0] == ELS_CMD_LS_RJT) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, "3277 QFPA LS_RJT x%x x%x\n", data[0], data[1]); goto out; } if (ulp_status) { lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, "6529 QFPA failed with status x%x x%x\n", ulp_status, ulp_word4); goto out; } if (!vport->qfpa_res) { max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), GFP_KERNEL); if (!vport->qfpa_res) goto out; } len = *((u32 *)(pcmd + 4)); len = be32_to_cpu(len); memcpy(vport->qfpa_res, pcmd, len + 8); len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; desc = (struct priority_range_desc *)(pcmd + 8); vmid_range = vport->vmid_priority.vmid_range; if (!vmid_range) { vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), GFP_KERNEL); if (!vmid_range) { kfree(vport->qfpa_res); goto out; } vport->vmid_priority.vmid_range = vmid_range; } vport->vmid_priority.num_descriptors = len; for (i = 0; i < len; i++, vmid_range++, desc++) { lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, "6539 vmid values low=%d, high=%d, qos=%d, " "local ve id=%d\n", desc->lo_range, desc->hi_range, desc->qos_priority, desc->local_ve_id); vmid_range->low = desc->lo_range << 1; if (desc->local_ve_id == QFPA_ODD_ONLY) vmid_range->low++; if (desc->qos_priority) vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; vmid_range->qos = desc->qos_priority; vmid_range->high = desc->hi_range << 1; if ((desc->local_ve_id == QFPA_ODD_ONLY) || (desc->local_ve_id == QFPA_EVEN_ODD)) vmid_range->high++; } lpfc_init_cs_ctl_bitmap(vport); for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { lpfc_vmid_set_cs_ctl_range(vport, vport->vmid_priority.vmid_range[i].low, vport->vmid_priority.vmid_range[i].high); } vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; out: lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } int lpfc_issue_els_qfpa(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; struct lpfc_iocbq *elsiocb; u8 *pcmd; int ret; ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) return -ENXIO; elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, ndlp->nlp_DID, ELS_CMD_QFPA); if (!elsiocb) return -ENOMEM; pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; *((u32 *)(pcmd)) = ELS_CMD_QFPA; pcmd += 4; elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(vport->phba, elsiocb); return -ENXIO; } ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); if (ret != IOCB_SUCCESS) { lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return -EIO; } vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; return 0; } int lpfc_vmid_uvem(struct lpfc_vport *vport, struct lpfc_vmid *vmid, bool instantiated) { struct lpfc_vem_id_desc *vem_id_desc; struct lpfc_nodelist *ndlp; struct lpfc_iocbq *elsiocb; struct instantiated_ve_desc *inst_desc; struct lpfc_vmid_context *vmid_context; u8 *pcmd; u32 *len; int ret = 0; ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) return -ENXIO; vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); if (!vmid_context) return -ENOMEM; elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, ndlp, Fabric_DID, ELS_CMD_UVEM); if (!elsiocb) goto out; lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, "3427 Host vmid %s %d\n", vmid->host_vmid, instantiated); vmid_context->vmp = vmid; vmid_context->nlp = ndlp; vmid_context->instantiated = instantiated; elsiocb->vmid_tag.vmid_context = vmid_context; pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0, sizeof(vport->lpfc_vmid_host_uuid))) memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, sizeof(vport->lpfc_vmid_host_uuid)); *((u32 *)(pcmd)) = ELS_CMD_UVEM; len = (u32 *)(pcmd + 4); *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, sizeof(vem_id_desc->vem_id)); inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); memcpy(inst_desc->global_vem_id, vmid->host_vmid, sizeof(inst_desc->global_vem_id)); bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); bf_set(lpfc_instantiated_local_id, inst_desc, vmid->un.cs_ctl_vmid); if (instantiated) { inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); } else { inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); } inst_desc->word6 = cpu_to_be32(inst_desc->word6); elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(vport->phba, elsiocb); goto out; } ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); if (ret != IOCB_SUCCESS) { lpfc_els_free_iocb(vport->phba, elsiocb); lpfc_nlp_put(ndlp); goto out; } return 0; out: kfree(vmid_context); return -EIO; } static void lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = icmdiocb->vport; struct lpfc_dmabuf *prsp = NULL; struct lpfc_vmid_context *vmid_context = icmdiocb->vmid_tag.vmid_context; struct lpfc_nodelist *ndlp = icmdiocb->ndlp; u8 *pcmd; u32 *data; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf; struct lpfc_vmid *vmid; vmid = vmid_context->vmp; if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) ndlp = NULL; prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); if (!prsp) goto out; pcmd = prsp->virt; data = (u32 *)pcmd; if (data[0] == ELS_CMD_LS_RJT) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); goto out; } if (ulp_status) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, "4533 UVEM error status %x: %x\n", ulp_status, ulp_word4); goto out; } spin_lock(&phba->hbalock); /* Set IN USE flag */ vport->vmid_flag |= LPFC_VMID_IN_USE; phba->pport->vmid_flag |= LPFC_VMID_IN_USE; spin_unlock(&phba->hbalock); if (vmid_context->instantiated) { write_lock(&vport->vmid_lock); vmid->flag |= LPFC_VMID_REGISTERED; vmid->flag &= ~LPFC_VMID_REQ_REGISTER; write_unlock(&vport->vmid_lock); } out: kfree(vmid_context); lpfc_els_free_iocb(phba, icmdiocb); lpfc_nlp_put(ndlp); }
linux-master
drivers/scsi/lpfc/lpfc_els.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/sched/clock.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/miscdevice.h> #include <linux/percpu.h> #include <linux/irq.h> #include <linux/bitops.h> #include <linux/crash_dump.h> #include <linux/cpu.h> #include <linux/cpuhotplug.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_tcq.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc.h" #include "lpfc_scsi.h" #include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_version.h" #include "lpfc_ids.h" static enum cpuhp_state lpfc_cpuhp_state; /* Used when mapping IRQ vectors in a driver centric manner */ static uint32_t lpfc_present_cpu; static bool lpfc_pldv_detect; static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); static void lpfc_cpuhp_remove(struct lpfc_hba *phba); static void lpfc_cpuhp_add(struct lpfc_hba *phba); static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); static int lpfc_sli4_queue_verify(struct lpfc_hba *); static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); static int lpfc_setup_endian_order(struct lpfc_hba *); static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); static void lpfc_free_els_sgl_list(struct lpfc_hba *); static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); static void lpfc_init_sgl_list(struct lpfc_hba *); static int lpfc_init_active_sgl_array(struct lpfc_hba *); static void lpfc_free_active_sgl(struct lpfc_hba *); static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); static void lpfc_sli4_disable_intr(struct lpfc_hba *); static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; static DEFINE_IDR(lpfc_hba_index); #define LPFC_NVMET_BUF_POST 254 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); static void lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts); /** * lpfc_config_port_prep - Perform lpfc initialization prior to config port * @phba: pointer to lpfc hba data structure. * * This routine will do LPFC initialization prior to issuing the CONFIG_PORT * mailbox command. It retrieves the revision information from the HBA and * collects the Vital Product Data (VPD) about the HBA for preparing the * configuration of the HBA. * * Return codes: * 0 - success. * -ERESTART - requests the SLI layer to reset the HBA and try again. * Any other value - indicates an error. **/ int lpfc_config_port_prep(struct lpfc_hba *phba) { lpfc_vpd_t *vp = &phba->vpd; int i = 0, rc; LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; char *lpfc_vpd_data = NULL; uint16_t offset = 0; static char licensed[56] = "key unlock for use with gnu public licensed code only\0"; static int init_key = 1; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } mb = &pmb->u.mb; phba->link_state = LPFC_INIT_MBX_CMDS; if (lpfc_is_LC_HBA(phba->pcidev->device)) { if (init_key) { uint32_t *ptext = (uint32_t *) licensed; for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) *ptext = cpu_to_be32(*ptext); init_key = 0; } lpfc_read_nv(phba, pmb); memset((char*)mb->un.varRDnvp.rsvd3, 0, sizeof (mb->un.varRDnvp.rsvd3)); memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, sizeof (licensed)); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0324 Config Port initialization " "error, mbxCmd x%x READ_NVPARM, " "mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); mempool_free(pmb, phba->mbox_mem_pool); return -ERESTART; } memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, sizeof(phba->wwnn)); memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, sizeof(phba->wwpn)); } /* * Clear all option bits except LPFC_SLI3_BG_ENABLED, * which was already set in lpfc_get_cfgparam() */ phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; /* Setup and issue mailbox READ REV command */ lpfc_read_rev(phba, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0439 Adapter failed to init, mbxCmd x%x " "READ_REV, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); mempool_free( pmb, phba->mbox_mem_pool); return -ERESTART; } /* * The value of rr must be 1 since the driver set the cv field to 1. * This setting requires the FW to set all revision fields. */ if (mb->un.varRdRev.rr == 0) { vp->rev.rBit = 0; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0440 Adapter failed to init, READ_REV has " "missing revision information.\n"); mempool_free(pmb, phba->mbox_mem_pool); return -ERESTART; } if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { mempool_free(pmb, phba->mbox_mem_pool); return -EINVAL; } /* Save information as VPD data */ vp->rev.rBit = 1; memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); vp->rev.biuRev = mb->un.varRdRev.biuRev; vp->rev.smRev = mb->un.varRdRev.smRev; vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; vp->rev.endecRev = mb->un.varRdRev.endecRev; vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; vp->rev.fcphLow = mb->un.varRdRev.fcphLow; vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; vp->rev.postKernRev = mb->un.varRdRev.postKernRev; vp->rev.opFwRev = mb->un.varRdRev.opFwRev; /* If the sli feature level is less then 9, we must * tear down all RPIs and VPIs on link down if NPIV * is enabled. */ if (vp->rev.feaLevelHigh < 9) phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; if (lpfc_is_LC_HBA(phba->pcidev->device)) memcpy(phba->RandomData, (char *)&mb->un.varWords[24], sizeof (phba->RandomData)); /* Get adapter VPD information */ lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); if (!lpfc_vpd_data) goto out_free_mbox; do { lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0441 VPD not present on adapter, " "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); mb->un.varDmp.word_cnt = 0; } /* dump mem may return a zero when finished or we got a * mailbox error, either way we are done. */ if (mb->un.varDmp.word_cnt == 0) break; if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, lpfc_vpd_data + offset, mb->un.varDmp.word_cnt); offset += mb->un.varDmp.word_cnt; } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); lpfc_parse_vpd(phba, lpfc_vpd_data, offset); kfree(lpfc_vpd_data); out_free_mbox: mempool_free(pmb, phba->mbox_mem_pool); return 0; } /** * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd * @phba: pointer to lpfc hba data structure. * @pmboxq: pointer to the driver internal queue element for mailbox command. * * This is the completion handler for driver's configuring asynchronous event * mailbox command to the device. If the mailbox command returns successfully, * it will set internal async event support flag to 1; otherwise, it will * set internal async event support flag to 0. **/ static void lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) { if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) phba->temp_sensor_support = 1; else phba->temp_sensor_support = 0; mempool_free(pmboxq, phba->mbox_mem_pool); return; } /** * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler * @phba: pointer to lpfc hba data structure. * @pmboxq: pointer to the driver internal queue element for mailbox command. * * This is the completion handler for dump mailbox command for getting * wake up parameters. When this command complete, the response contain * Option rom version of the HBA. This function translate the version number * into a human readable string and store it in OptionROMVersion. **/ static void lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct prog_id *prg; uint32_t prog_id_word; char dist = ' '; /* character array used for decoding dist type. */ char dist_char[] = "nabx"; if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); return; } prg = (struct prog_id *) &prog_id_word; /* word 7 contain option rom version */ prog_id_word = pmboxq->u.mb.un.varWords[7]; /* Decode the Option rom version word to a readable string */ dist = dist_char[prg->dist]; if ((prg->dist == 3) && (prg->num == 0)) snprintf(phba->OptionROMVersion, 32, "%d.%d%d", prg->ver, prg->rev, prg->lev); else snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", prg->ver, prg->rev, prg->lev, dist, prg->num); mempool_free(pmboxq, phba->mbox_mem_pool); return; } /** * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, * @vport: pointer to lpfc vport data structure. * * * Return codes * None. **/ void lpfc_update_vport_wwn(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; /* * If the name is empty or there exists a soft name * then copy the service params name, otherwise use the fc name */ if (vport->fc_nodename.u.wwn[0] == 0) memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); else memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); /* * If the port name has changed, then set the Param changes flag * to unreg the login */ if (vport->fc_portname.u.wwn[0] != 0 && memcmp(&vport->fc_portname, &vport->fc_sparam.portName, sizeof(struct lpfc_name))) { vport->vport_flag |= FAWWPN_PARAM_CHG; if (phba->sli_rev == LPFC_SLI_REV4 && vport->port_type == LPFC_PHYSICAL_PORT && phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) { if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)) phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC; lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_DISCOVERY | LOG_ELS, "2701 FA-PWWN change WWPN from %llx to " "%llx: vflag x%x fawwpn_flag x%x\n", wwn_to_u64(vport->fc_portname.u.wwn), wwn_to_u64 (vport->fc_sparam.portName.u.wwn), vport->vport_flag, phba->sli4_hba.fawwpn_flag); memcpy(&vport->fc_portname, &vport->fc_sparam.portName, sizeof(struct lpfc_name)); } } if (vport->fc_portname.u.wwn[0] == 0) memcpy(&vport->fc_portname, &vport->fc_sparam.portName, sizeof(struct lpfc_name)); else memcpy(&vport->fc_sparam.portName, &vport->fc_portname, sizeof(struct lpfc_name)); } /** * lpfc_config_port_post - Perform lpfc initialization after config port * @phba: pointer to lpfc hba data structure. * * This routine will do LPFC initialization after the CONFIG_PORT mailbox * command call. It performs all internal resource and state setups on the * port: post IOCB buffers, enable appropriate host interrupt attentions, * ELS ring timers, etc. * * Return codes * 0 - success. * Any other value - error. **/ int lpfc_config_port_post(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; struct lpfc_dmabuf *mp; struct lpfc_sli *psli = &phba->sli; uint32_t status, timeout; int i, j; int rc; spin_lock_irq(&phba->hbalock); /* * If the Config port completed correctly the HBA is not * over heated any more. */ if (phba->over_temp_state == HBA_OVER_TEMP) phba->over_temp_state = HBA_NORMAL_TEMP; spin_unlock_irq(&phba->hbalock); pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } mb = &pmb->u.mb; /* Get login parameters for NID. */ rc = lpfc_read_sparam(phba, pmb, 0); if (rc) { mempool_free(pmb, phba->mbox_mem_pool); return -ENOMEM; } pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0448 Adapter failed init, mbxCmd x%x " "READ_SPARM mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); phba->link_state = LPFC_HBA_ERROR; lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); return -EIO; } mp = (struct lpfc_dmabuf *)pmb->ctx_buf; /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no * longer needed. Prevent unintended ctx_buf access as the mbox is * reused. */ memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); pmb->ctx_buf = NULL; lpfc_update_vport_wwn(vport); /* Update the fc_host data structures with new wwn. */ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); fc_host_max_npiv_vports(shost) = phba->max_vpi; /* If no serial number in VPD data, use low 6 bytes of WWNN */ /* This should be consolidated into parse_vpd ? - mr */ if (phba->SerialNumber[0] == 0) { uint8_t *outptr; outptr = &vport->fc_nodename.u.s.IEEE[0]; for (i = 0; i < 12; i++) { status = *outptr++; j = ((status & 0xf0) >> 4); if (j <= 9) phba->SerialNumber[i] = (char)((uint8_t) 0x30 + (uint8_t) j); else phba->SerialNumber[i] = (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); i++; j = (status & 0xf); if (j <= 9) phba->SerialNumber[i] = (char)((uint8_t) 0x30 + (uint8_t) j); else phba->SerialNumber[i] = (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); } } lpfc_read_config(phba, pmb); pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0453 Adapter failed to init, mbxCmd x%x " "READ_CONFIG, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); phba->link_state = LPFC_HBA_ERROR; mempool_free( pmb, phba->mbox_mem_pool); return -EIO; } /* Check if the port is disabled */ lpfc_sli_read_link_ste(phba); /* Reset the DFT_HBA_Q_DEPTH to the max xri */ if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "3359 HBA queue depth changed from %d to %d\n", phba->cfg_hba_queue_depth, mb->un.varRdConfig.max_xri); phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; } phba->lmt = mb->un.varRdConfig.lmt; /* Get the default values for Model Name and Description */ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); phba->link_state = LPFC_LINK_DOWN; /* Only process IOCBs on ELS ring till hba_state is READY */ if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; /* Post receive buffers for desired rings */ if (phba->sli_rev != 3) lpfc_post_rcv_buf(phba); /* * Configure HBA MSI-X attention conditions to messages if MSI-X mode */ if (phba->intr_type == MSIX) { rc = lpfc_config_msi(phba, pmb); if (rc) { mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0352 Config MSI mailbox command " "failed, mbxCmd x%x, mbxStatus x%x\n", pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } } spin_lock_irq(&phba->hbalock); /* Initialize ERATT handling flag */ phba->hba_flag &= ~HBA_ERATT_HANDLED; /* Enable appropriate host interrupts */ if (lpfc_readl(phba->HCregaddr, &status)) { spin_unlock_irq(&phba->hbalock); return -EIO; } status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; if (psli->num_rings > 0) status |= HC_R0INT_ENA; if (psli->num_rings > 1) status |= HC_R1INT_ENA; if (psli->num_rings > 2) status |= HC_R2INT_ENA; if (psli->num_rings > 3) status |= HC_R3INT_ENA; if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && (phba->cfg_poll & DISABLE_FCP_RING_INT)) status &= ~(HC_R0INT_ENA); writel(status, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); /* Set up ring-0 (ELS) timer */ timeout = phba->fc_ratov * 2; mod_timer(&vport->els_tmofunc, jiffies + msecs_to_jiffies(1000 * timeout)); /* Set up heart beat (HB) timer */ mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); if (phba->hba_flag & LINK_DISABLED) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2598 Adapter Link is disabled.\n"); lpfc_down_link(phba, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2599 Adapter failed to issue DOWN_LINK" " mbox command rc 0x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { mempool_free(pmb, phba->mbox_mem_pool); rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); if (rc) return rc; } /* MBOX buffer will be freed in mbox compl */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } lpfc_config_async(phba, pmb, LPFC_ELS_RING); pmb->mbox_cmpl = lpfc_config_async_cmpl; pmb->vport = phba->pport; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0456 Adapter failed to issue " "ASYNCEVT_ENABLE mbox status x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); } /* Get Option rom version */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } lpfc_dump_wakeup_param(phba, pmb); pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; pmb->vport = phba->pport; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0435 Adapter failed " "to get Option ROM version status x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); } return 0; } /** * lpfc_sli4_refresh_params - update driver copy of params. * @phba: Pointer to HBA context object. * * This is called to refresh driver copy of dynamic fields from the * common_get_sli4_parameters descriptor. **/ int lpfc_sli4_refresh_params(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; struct lpfc_mqe *mqe; struct lpfc_sli4_parameters *mbx_sli4_parameters; int length, rc; mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return -ENOMEM; mqe = &mboxq->u.mqe; /* Read the port's SLI4 Config Parameters */ length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, length, LPFC_SLI4_MBX_EMBED); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (unlikely(rc)) { mempool_free(mboxq, phba->mbox_mem_pool); return rc; } mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; phba->sli4_hba.pc_sli4_params.mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters); /* Are we forcing MI off via module parameter? */ if (phba->cfg_enable_mi) phba->sli4_hba.pc_sli4_params.mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters); else phba->sli4_hba.pc_sli4_params.mi_ver = 0; phba->sli4_hba.pc_sli4_params.cmf = bf_get(cfg_cmf, mbx_sli4_parameters); phba->sli4_hba.pc_sli4_params.pls = bf_get(cfg_pvl, mbx_sli4_parameters); mempool_free(mboxq, phba->mbox_mem_pool); return rc; } /** * lpfc_hba_init_link - Initialize the FC link * @phba: pointer to lpfc hba data structure. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT * * This routine will issue the INIT_LINK mailbox command call. * It is available to other drivers through the lpfc_hba data * structure for use as a delayed link up mechanism with the * module parameter lpfc_suppress_link_up. * * Return code * 0 - success * Any other value - error **/ static int lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) { return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); } /** * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology * @phba: pointer to lpfc hba data structure. * @fc_topology: desired fc topology. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT * * This routine will issue the INIT_LINK mailbox command call. * It is available to other drivers through the lpfc_hba data * structure for use as a delayed link up mechanism with the * module parameter lpfc_suppress_link_up. * * Return code * 0 - success * Any other value - error **/ int lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, uint32_t flag) { struct lpfc_vport *vport = phba->pport; LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; int rc; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } mb = &pmb->u.mb; pmb->vport = vport; if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) { /* Reset link speed to auto */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1302 Invalid speed for this board:%d " "Reset link speed to auto.\n", phba->cfg_link_speed); phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; } lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; if (phba->sli_rev < LPFC_SLI_REV4) lpfc_set_loopback_flag(phba); rc = lpfc_sli_issue_mbox(phba, pmb, flag); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0498 Adapter failed to init, mbxCmd x%x " "INIT_LINK, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); if (phba->sli_rev <= LPFC_SLI_REV3) { /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ /* Clear all pending interrupts */ writel(0xffffffff, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ } phba->link_state = LPFC_HBA_ERROR; if (rc != MBX_BUSY || flag == MBX_POLL) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; if (flag == MBX_POLL) mempool_free(pmb, phba->mbox_mem_pool); return 0; } /** * lpfc_hba_down_link - this routine downs the FC link * @phba: pointer to lpfc hba data structure. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT * * This routine will issue the DOWN_LINK mailbox command call. * It is available to other drivers through the lpfc_hba data * structure for use to stop the link. * * Return code * 0 - success * Any other value - error **/ static int lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) { LPFC_MBOXQ_t *pmb; int rc; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0491 Adapter Link is disabled.\n"); lpfc_down_link(phba, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, flag); if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2522 Adapter failed to issue DOWN_LINK" " mbox command rc 0x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } if (flag == MBX_POLL) mempool_free(pmb, phba->mbox_mem_pool); return 0; } /** * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will do LPFC uninitialization before the HBA is reset when * bringing down the SLI Layer. * * Return codes * 0 - success. * Any other value - error. **/ int lpfc_hba_down_prep(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; if (phba->sli_rev <= LPFC_SLI_REV3) { /* Disable interrupts */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } if (phba->pport->load_flag & FC_UNLOADING) lpfc_cleanup_discovery_resources(phba->pport); else { vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_cleanup_discovery_resources(vports[i]); lpfc_destroy_vport_work_array(phba, vports); } return 0; } /** * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free * rspiocb which got deferred * * @phba: pointer to lpfc HBA data structure. * * This routine will cleanup completed slow path events after HBA is reset * when bringing down the SLI Layer. * * * Return codes * void. **/ static void lpfc_sli4_free_sp_events(struct lpfc_hba *phba) { struct lpfc_iocbq *rspiocbq; struct hbq_dmabuf *dmabuf; struct lpfc_cq_event *cq_event; spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~HBA_SP_QUEUE_EVT; spin_unlock_irq(&phba->hbalock); while (!list_empty(&phba->sli4_hba.sp_queue_event)) { /* Get the response iocb from the head of work queue */ spin_lock_irq(&phba->hbalock); list_remove_head(&phba->sli4_hba.sp_queue_event, cq_event, struct lpfc_cq_event, list); spin_unlock_irq(&phba->hbalock); switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { case CQE_CODE_COMPL_WQE: rspiocbq = container_of(cq_event, struct lpfc_iocbq, cq_event); lpfc_sli_release_iocbq(phba, rspiocbq); break; case CQE_CODE_RECEIVE: case CQE_CODE_RECEIVE_V1: dmabuf = container_of(cq_event, struct hbq_dmabuf, cq_event); lpfc_in_buf_free(phba, &dmabuf->dbuf); } } } /** * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will cleanup posted ELS buffers after the HBA is reset * when bringing down the SLI Layer. * * * Return codes * void. **/ static void lpfc_hba_free_post_buf(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *mp, *next_mp; LIST_HEAD(buflist); int count; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) lpfc_sli_hbqbuf_free_all(phba); else { /* Cleanup preposted buffers on the ELS ring */ pring = &psli->sli3_ring[LPFC_ELS_RING]; spin_lock_irq(&phba->hbalock); list_splice_init(&pring->postbufq, &buflist); spin_unlock_irq(&phba->hbalock); count = 0; list_for_each_entry_safe(mp, next_mp, &buflist, list) { list_del(&mp->list); count++; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } spin_lock_irq(&phba->hbalock); pring->postbufq_cnt -= count; spin_unlock_irq(&phba->hbalock); } } /** * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will cleanup the txcmplq after the HBA is reset when bringing * down the SLI Layer. * * Return codes * void **/ static void lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; LIST_HEAD(completions); int i; struct lpfc_iocbq *piocb, *next_iocb; if (phba->sli_rev != LPFC_SLI_REV4) { for (i = 0; i < psli->num_rings; i++) { pring = &psli->sli3_ring[i]; spin_lock_irq(&phba->hbalock); /* At this point in time the HBA is either reset or DOA * Nothing should be on txcmplq as it will * NEVER complete. */ list_splice_init(&pring->txcmplq, &completions); pring->txcmplq_cnt = 0; spin_unlock_irq(&phba->hbalock); lpfc_sli_abort_iocb_ring(phba, pring); } /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); return; } list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { pring = qp->pring; if (!pring) continue; spin_lock_irq(&pring->ring_lock); list_for_each_entry_safe(piocb, next_iocb, &pring->txcmplq, list) piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; list_splice_init(&pring->txcmplq, &completions); pring->txcmplq_cnt = 0; spin_unlock_irq(&pring->ring_lock); lpfc_sli_abort_iocb_ring(phba, pring); } /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } /** * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will do uninitialization after the HBA is reset when bring * down the SLI Layer. * * Return codes * 0 - success. * Any other value - error. **/ static int lpfc_hba_down_post_s3(struct lpfc_hba *phba) { lpfc_hba_free_post_buf(phba); lpfc_hba_clean_txcmplq(phba); return 0; } /** * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will do uninitialization after the HBA is reset when bring * down the SLI Layer. * * Return codes * 0 - success. * Any other value - error. **/ static int lpfc_hba_down_post_s4(struct lpfc_hba *phba) { struct lpfc_io_buf *psb, *psb_next; struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; struct lpfc_sli4_hdw_queue *qp; LIST_HEAD(aborts); LIST_HEAD(nvme_aborts); LIST_HEAD(nvmet_aborts); struct lpfc_sglq *sglq_entry = NULL; int cnt, idx; lpfc_sli_hbqbuf_free_all(phba); lpfc_hba_clean_txcmplq(phba); /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be * on the lpfc_els_sgl_list so that it can either be freed if the * driver is unloading or reposted if the driver is restarting * the port. */ /* sgl_list_lock required because worker thread uses this * list. */ spin_lock_irq(&phba->sli4_hba.sgl_list_lock); list_for_each_entry(sglq_entry, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) sglq_entry->state = SGL_FREED; list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, &phba->sli4_hba.lpfc_els_sgl_list); spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); /* abts_xxxx_buf_list_lock required because worker thread uses this * list. */ spin_lock_irq(&phba->hbalock); cnt = 0; for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; spin_lock(&qp->abts_io_buf_list_lock); list_splice_init(&qp->lpfc_abts_io_buf_list, &aborts); list_for_each_entry_safe(psb, psb_next, &aborts, list) { psb->pCmd = NULL; psb->status = IOSTAT_SUCCESS; cnt++; } spin_lock(&qp->io_buf_list_put_lock); list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); qp->put_io_bufs += qp->abts_scsi_io_bufs; qp->put_io_bufs += qp->abts_nvme_io_bufs; qp->abts_scsi_io_bufs = 0; qp->abts_nvme_io_bufs = 0; spin_unlock(&qp->io_buf_list_put_lock); spin_unlock(&qp->abts_io_buf_list_lock); } spin_unlock_irq(&phba->hbalock); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, &nvmet_aborts); spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); } } lpfc_sli4_free_sp_events(phba); return cnt; } /** * lpfc_hba_down_post - Wrapper func for hba down post routine * @phba: pointer to lpfc HBA data structure. * * This routine wraps the actual SLI3 or SLI4 routine for performing * uninitialization after the HBA is reset when bring down the SLI Layer. * * Return codes * 0 - success. * Any other value - error. **/ int lpfc_hba_down_post(struct lpfc_hba *phba) { return (*phba->lpfc_hba_down_post)(phba); } /** * lpfc_hb_timeout - The HBA-timer timeout handler * @t: timer context used to obtain the pointer to lpfc hba data structure. * * This is the HBA-timer timeout handler registered to the lpfc driver. When * this timer fires, a HBA timeout event shall be posted to the lpfc driver * work-port-events bitmap and the worker thread is notified. This timeout * event will be used by the worker thread to invoke the actual timeout * handler routine, lpfc_hb_timeout_handler. Any periodical operations will * be performed in the timeout handler and the HBA timeout event bit shall * be cleared by the worker thread after it has taken the event bitmap out. **/ static void lpfc_hb_timeout(struct timer_list *t) { struct lpfc_hba *phba; uint32_t tmo_posted; unsigned long iflag; phba = from_timer(phba, t, hb_tmofunc); /* Check for heart beat timeout conditions */ spin_lock_irqsave(&phba->pport->work_port_lock, iflag); tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; if (!tmo_posted) phba->pport->work_port_events |= WORKER_HB_TMO; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); /* Tell the worker thread there is work to do */ if (!tmo_posted) lpfc_worker_wake_up(phba); return; } /** * lpfc_rrq_timeout - The RRQ-timer timeout handler * @t: timer context used to obtain the pointer to lpfc hba data structure. * * This is the RRQ-timer timeout handler registered to the lpfc driver. When * this timer fires, a RRQ timeout event shall be posted to the lpfc driver * work-port-events bitmap and the worker thread is notified. This timeout * event will be used by the worker thread to invoke the actual timeout * handler routine, lpfc_rrq_handler. Any periodical operations will * be performed in the timeout handler and the RRQ timeout event bit shall * be cleared by the worker thread after it has taken the event bitmap out. **/ static void lpfc_rrq_timeout(struct timer_list *t) { struct lpfc_hba *phba; unsigned long iflag; phba = from_timer(phba, t, rrq_tmr); spin_lock_irqsave(&phba->pport->work_port_lock, iflag); if (!(phba->pport->load_flag & FC_UNLOADING)) phba->hba_flag |= HBA_RRQ_ACTIVE; else phba->hba_flag &= ~HBA_RRQ_ACTIVE; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); if (!(phba->pport->load_flag & FC_UNLOADING)) lpfc_worker_wake_up(phba); } /** * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function * @phba: pointer to lpfc hba data structure. * @pmboxq: pointer to the driver internal queue element for mailbox command. * * This is the callback function to the lpfc heart-beat mailbox command. * If configured, the lpfc driver issues the heart-beat mailbox command to * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the * heart-beat mailbox command is issued, the driver shall set up heart-beat * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks * heart-beat outstanding state. Once the mailbox command comes back and * no error conditions detected, the heart-beat mailbox command timer is * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding * state is cleared for the next heart-beat. If the timer expired with the * heart-beat outstanding state set, the driver will put the HBA offline. **/ static void lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) { unsigned long drvr_flag; spin_lock_irqsave(&phba->hbalock, drvr_flag); phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Check and reset heart-beat timer if necessary */ mempool_free(pmboxq, phba->mbox_mem_pool); if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && !(phba->link_state == LPFC_HBA_ERROR) && !(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); return; } /* * lpfc_idle_stat_delay_work - idle_stat tracking * * This routine tracks per-eq idle_stat and determines polling decisions. * * Return codes: * None **/ static void lpfc_idle_stat_delay_work(struct work_struct *work) { struct lpfc_hba *phba = container_of(to_delayed_work(work), struct lpfc_hba, idle_stat_delay_work); struct lpfc_queue *eq; struct lpfc_sli4_hdw_queue *hdwq; struct lpfc_idle_stat *idle_stat; u32 i, idle_percent; u64 wall, wall_idle, diff_wall, diff_idle, busy_time; if (phba->pport->load_flag & FC_UNLOADING) return; if (phba->link_state == LPFC_HBA_ERROR || phba->pport->fc_flag & FC_OFFLINE_MODE || phba->cmf_active_mode != LPFC_CFG_OFF) goto requeue; for_each_present_cpu(i) { hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; eq = hdwq->hba_eq; /* Skip if we've already handled this eq's primary CPU */ if (eq->chann != i) continue; idle_stat = &phba->sli4_hba.idle_stat[i]; /* get_cpu_idle_time returns values as running counters. Thus, * to know the amount for this period, the prior counter values * need to be subtracted from the current counter values. * From there, the idle time stat can be calculated as a * percentage of 100 - the sum of the other consumption times. */ wall_idle = get_cpu_idle_time(i, &wall, 1); diff_idle = wall_idle - idle_stat->prev_idle; diff_wall = wall - idle_stat->prev_wall; if (diff_wall <= diff_idle) busy_time = 0; else busy_time = diff_wall - diff_idle; idle_percent = div64_u64(100 * busy_time, diff_wall); idle_percent = 100 - idle_percent; if (idle_percent < 15) eq->poll_mode = LPFC_QUEUE_WORK; else eq->poll_mode = LPFC_THREADED_IRQ; idle_stat->prev_idle = wall_idle; idle_stat->prev_wall = wall; } requeue: schedule_delayed_work(&phba->idle_stat_delay_work, msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); } static void lpfc_hb_eq_delay_work(struct work_struct *work) { struct lpfc_hba *phba = container_of(to_delayed_work(work), struct lpfc_hba, eq_delay_work); struct lpfc_eq_intr_info *eqi, *eqi_new; struct lpfc_queue *eq, *eq_next; unsigned char *ena_delay = NULL; uint32_t usdelay; int i; if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) return; if (phba->link_state == LPFC_HBA_ERROR || phba->pport->fc_flag & FC_OFFLINE_MODE) goto requeue; ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), GFP_KERNEL); if (!ena_delay) goto requeue; for (i = 0; i < phba->cfg_irq_chann; i++) { /* Get the EQ corresponding to the IRQ vector */ eq = phba->sli4_hba.hba_eq_hdl[i].eq; if (!eq) continue; if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { eq->q_flag &= ~HBA_EQ_DELAY_CHK; ena_delay[eq->last_cpu] = 1; } } for_each_present_cpu(i) { eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); if (ena_delay[i]) { usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) usdelay = LPFC_MAX_AUTO_EQ_DELAY; } else { usdelay = 0; } eqi->icnt = 0; list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { if (unlikely(eq->last_cpu != i)) { eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, eq->last_cpu); list_move_tail(&eq->cpu_list, &eqi_new->list); continue; } if (usdelay != eq->q_mode) lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, usdelay); } } kfree(ena_delay); requeue: queue_delayed_work(phba->wq, &phba->eq_delay_work, msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); } /** * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution * @phba: pointer to lpfc hba data structure. * * For each heartbeat, this routine does some heuristic methods to adjust * XRI distribution. The goal is to fully utilize free XRIs. **/ static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) { u32 i; u32 hwq_count; hwq_count = phba->cfg_hdw_queue; for (i = 0; i < hwq_count; i++) { /* Adjust XRIs in private pool */ lpfc_adjust_pvt_pool_count(phba, i); /* Adjust high watermark */ lpfc_adjust_high_watermark(phba, i); #ifdef LPFC_MXP_STAT /* Snapshot pbl, pvt and busy count */ lpfc_snapshot_mxp(phba, i); #endif } } /** * lpfc_issue_hb_mbox - Issues heart-beat mailbox command * @phba: pointer to lpfc hba data structure. * * If a HB mbox is not already in progrees, this routine will allocate * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command, * and issue it. The HBA_HBEAT_INP flag means the command is in progress. **/ int lpfc_issue_hb_mbox(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmboxq; int retval; /* Is a Heartbeat mbox already in progress */ if (phba->hba_flag & HBA_HBEAT_INP) return 0; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) return -ENOMEM; lpfc_heart_beat(phba, pmboxq); pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; pmboxq->vport = phba->pport; retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); if (retval != MBX_BUSY && retval != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); return -ENXIO; } phba->hba_flag |= HBA_HBEAT_INP; return 0; } /** * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command * @phba: pointer to lpfc hba data structure. * * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless * of the value of lpfc_enable_hba_heartbeat. * If lpfc_enable_hba_heartbeat is set, the timeout routine will always * try to issue a MBX_HEARTBEAT mbox command. **/ void lpfc_issue_hb_tmo(struct lpfc_hba *phba) { if (phba->cfg_enable_hba_heartbeat) return; phba->hba_flag |= HBA_HBEAT_TMO; } /** * lpfc_hb_timeout_handler - The HBA-timer timeout handler * @phba: pointer to lpfc hba data structure. * * This is the actual HBA-timer timeout handler to be invoked by the worker * thread whenever the HBA timer fired and HBA-timeout event posted. This * handler performs any periodic operations needed for the device. If such * periodic event has already been attended to either in the interrupt handler * or by processing slow-ring or fast-ring events within the HBA-timer * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets * the timer for the next timeout period. If lpfc heart-beat mailbox command * is configured and there is no heart-beat mailbox command outstanding, a * heart-beat mailbox is issued and timer set properly. Otherwise, if there * has been a heart-beat mailbox command outstanding, the HBA shall be put * to offline. **/ void lpfc_hb_timeout_handler(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct lpfc_dmabuf *buf_ptr; int retval = 0; int i, tmo; struct lpfc_sli *psli = &phba->sli; LIST_HEAD(completions); if (phba->cfg_xri_rebalancing) { /* Multi-XRI pools handler */ lpfc_hb_mxp_handler(phba); } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { lpfc_rcv_seq_check_edtov(vports[i]); lpfc_fdmi_change_check(vports[i]); } lpfc_destroy_vport_work_array(phba, vports); if ((phba->link_state == LPFC_HBA_ERROR) || (phba->pport->load_flag & FC_UNLOADING) || (phba->pport->fc_flag & FC_OFFLINE_MODE)) return; if (phba->elsbuf_cnt && (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { spin_lock_irq(&phba->hbalock); list_splice_init(&phba->elsbuf, &completions); phba->elsbuf_cnt = 0; phba->elsbuf_prev_cnt = 0; spin_unlock_irq(&phba->hbalock); while (!list_empty(&completions)) { list_remove_head(&completions, buf_ptr, struct lpfc_dmabuf, list); lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); } } phba->elsbuf_prev_cnt = phba->elsbuf_cnt; /* If there is no heart beat outstanding, issue a heartbeat command */ if (phba->cfg_enable_hba_heartbeat) { /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ spin_lock_irq(&phba->pport->work_port_lock); if (time_after(phba->last_completion_time + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); if (phba->hba_flag & HBA_HBEAT_INP) tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); else tmo = (1000 * LPFC_HB_MBOX_INTERVAL); goto out; } spin_unlock_irq(&phba->pport->work_port_lock); /* Check if a MBX_HEARTBEAT is already in progress */ if (phba->hba_flag & HBA_HBEAT_INP) { /* * If heart beat timeout called with HBA_HBEAT_INP set * we need to give the hb mailbox cmd a chance to * complete or TMO. */ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0459 Adapter heartbeat still outstanding: " "last compl time was %d ms.\n", jiffies_to_msecs(jiffies - phba->last_completion_time)); tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); } else { if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && (list_empty(&psli->mboxq))) { retval = lpfc_issue_hb_mbox(phba); if (retval) { tmo = (1000 * LPFC_HB_MBOX_INTERVAL); goto out; } phba->skipped_hb = 0; } else if (time_before_eq(phba->last_completion_time, phba->skipped_hb)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2857 Last completion time not " " updated in %d ms\n", jiffies_to_msecs(jiffies - phba->last_completion_time)); } else phba->skipped_hb = jiffies; tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); goto out; } } else { /* Check to see if we want to force a MBX_HEARTBEAT */ if (phba->hba_flag & HBA_HBEAT_TMO) { retval = lpfc_issue_hb_mbox(phba); if (retval) tmo = (1000 * LPFC_HB_MBOX_INTERVAL); else tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); goto out; } tmo = (1000 * LPFC_HB_MBOX_INTERVAL); } out: mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); } /** * lpfc_offline_eratt - Bring lpfc offline on hardware error attention * @phba: pointer to lpfc hba data structure. * * This routine is called to bring the HBA offline when HBA hardware error * other than Port Error 6 has been detected. **/ static void lpfc_offline_eratt(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_offline(phba); lpfc_reset_barrier(phba); spin_lock_irq(&phba->hbalock); lpfc_sli_brdreset(phba); spin_unlock_irq(&phba->hbalock); lpfc_hba_down_post(phba); lpfc_sli_brdready(phba, HS_MBRDY); lpfc_unblock_mgmt_io(phba); phba->link_state = LPFC_HBA_ERROR; return; } /** * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention * @phba: pointer to lpfc hba data structure. * * This routine is called to bring a SLI4 HBA offline when HBA hardware error * other than Port Error 6 has been detected. **/ void lpfc_sli4_offline_eratt(struct lpfc_hba *phba) { spin_lock_irq(&phba->hbalock); if (phba->link_state == LPFC_HBA_ERROR && test_bit(HBA_PCI_ERR, &phba->bit_flags)) { spin_unlock_irq(&phba->hbalock); return; } phba->link_state = LPFC_HBA_ERROR; spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_sli_flush_io_rings(phba); lpfc_offline(phba); lpfc_hba_down_post(phba); lpfc_unblock_mgmt_io(phba); } /** * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked to handle the deferred HBA hardware error * conditions. This type of error is indicated by HBA by setting ER1 * and another ER bit in the host status register. The driver will * wait until the ER1 bit clears before handling the error condition. **/ static void lpfc_handle_deferred_eratt(struct lpfc_hba *phba) { uint32_t old_host_status = phba->work_hs; struct lpfc_sli *psli = &phba->sli; /* If the pci channel is offline, ignore possible errors, * since we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~DEFER_ERATT; spin_unlock_irq(&phba->hbalock); return; } lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0479 Deferred Adapter Hardware Error " "Data: x%x x%x x%x\n", phba->work_hs, phba->work_status[0], phba->work_status[1]); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* * Firmware stops when it triggred erratt. That could cause the I/Os * dropped by the firmware. Error iocb (I/O) on txcmplq and let the * SCSI layer retry it after re-establishing link. */ lpfc_sli_abort_fcp_rings(phba); /* * There was a firmware error. Take the hba offline and then * attempt to restart it. */ lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); /* Wait for the ER1 bit to clear.*/ while (phba->work_hs & HS_FFER1) { msleep(100); if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { phba->work_hs = UNPLUG_ERR ; break; } /* If driver is unloading let the worker thread continue */ if (phba->pport->load_flag & FC_UNLOADING) { phba->work_hs = 0; break; } } /* * This is to ptrotect against a race condition in which * first write to the host attention register clear the * host status register. */ if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) phba->work_hs = old_host_status & ~HS_FFER1; spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~DEFER_ERATT; spin_unlock_irq(&phba->hbalock); phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); phba->work_status[1] = readl(phba->MBslimaddr + 0xac); } static void lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) { struct lpfc_board_event_header board_event; struct Scsi_Host *shost; board_event.event_type = FC_REG_BOARD_EVENT; board_event.subcategory = LPFC_EVENT_PORTINTERR; shost = lpfc_shost_from_vport(phba->pport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(board_event), (char *) &board_event, LPFC_NL_VENDOR_ID); } /** * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked to handle the following HBA hardware error * conditions: * 1 - HBA error attention interrupt * 2 - DMA ring index out of range * 3 - Mailbox command came back as unknown **/ static void lpfc_handle_eratt_s3(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_sli *psli = &phba->sli; uint32_t event_data; unsigned long temperature; struct temp_event temp_event_data; struct Scsi_Host *shost; /* If the pci channel is offline, ignore possible errors, * since we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~DEFER_ERATT; spin_unlock_irq(&phba->hbalock); return; } /* If resets are disabled then leave the HBA alone and return */ if (!phba->cfg_enable_hba_reset) return; /* Send an internal error event to mgmt application */ lpfc_board_errevt_to_mgmt(phba); if (phba->hba_flag & DEFER_ERATT) lpfc_handle_deferred_eratt(phba); if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { if (phba->work_hs & HS_FFER6) /* Re-establishing Link */ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "1301 Re-establishing Link " "Data: x%x x%x x%x\n", phba->work_hs, phba->work_status[0], phba->work_status[1]); if (phba->work_hs & HS_FFER8) /* Device Zeroization */ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "2861 Host Authentication device " "zeroization Data:x%x x%x x%x\n", phba->work_hs, phba->work_status[0], phba->work_status[1]); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* * Firmware stops when it triggled erratt with HS_FFER6. * That could cause the I/Os dropped by the firmware. * Error iocb (I/O) on txcmplq and let the SCSI layer * retry it after re-establishing link. */ lpfc_sli_abort_fcp_rings(phba); /* * There was a firmware error. Take the hba offline and then * attempt to restart it. */ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_offline(phba); lpfc_sli_brdrestart(phba); if (lpfc_online(phba) == 0) { /* Initialize the HBA */ lpfc_unblock_mgmt_io(phba); return; } lpfc_unblock_mgmt_io(phba); } else if (phba->work_hs & HS_CRIT_TEMP) { temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; temp_event_data.event_code = LPFC_CRIT_TEMP; temp_event_data.data = (uint32_t)temperature; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0406 Adapter maximum temperature exceeded " "(%ld), taking this port offline " "Data: x%x x%x x%x\n", temperature, phba->work_hs, phba->work_status[0], phba->work_status[1]); shost = lpfc_shost_from_vport(phba->pport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(temp_event_data), (char *) &temp_event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); spin_lock_irq(&phba->hbalock); phba->over_temp_state = HBA_OVER_TEMP; spin_unlock_irq(&phba->hbalock); lpfc_offline_eratt(phba); } else { /* The if clause above forces this code path when the status * failure is a value other than FFER6. Do not call the offline * twice. This is the adapter hardware error path. */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0457 Adapter Hardware Error " "Data: x%x x%x x%x\n", phba->work_hs, phba->work_status[0], phba->work_status[1]); event_data = FC_REG_DUMP_EVENT; shost = lpfc_shost_from_vport(vport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(event_data), (char *) &event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); lpfc_offline_eratt(phba); } return; } /** * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg * @phba: pointer to lpfc hba data structure. * @mbx_action: flag for mailbox shutdown action. * @en_rn_msg: send reset/port recovery message. * This routine is invoked to perform an SLI4 port PCI function reset in * response to port status register polling attention. It waits for port * status register (ERR, RDY, RN) bits before proceeding with function reset. * During this process, interrupt vectors are freed and later requested * for handling possible port resource change. **/ static int lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, bool en_rn_msg) { int rc; uint32_t intr_mode; LPFC_MBOXQ_t *mboxq; if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) { /* * On error status condition, driver need to wait for port * ready before performing reset. */ rc = lpfc_sli4_pdev_status_reg_wait(phba); if (rc) return rc; } /* need reset: attempt for port recovery */ if (en_rn_msg) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2887 Reset Needed: Attempting Port " "Recovery...\n"); /* If we are no wait, the HBA has been reset and is not * functional, thus we should clear * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. */ if (mbx_action == LPFC_MBX_NO_WAIT) { spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; if (phba->sli.mbox_active) { mboxq = phba->sli.mbox_active; mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; __lpfc_mbox_cmpl_put(phba, mboxq); phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; phba->sli.mbox_active = NULL; } spin_unlock_irq(&phba->hbalock); } lpfc_offline_prep(phba, mbx_action); lpfc_sli_flush_io_rings(phba); lpfc_offline(phba); /* release interrupt for possible resource change */ lpfc_sli4_disable_intr(phba); rc = lpfc_sli_brdrestart(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6309 Failed to restart board\n"); return rc; } /* request and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3175 Failed to enable interrupt\n"); return -EIO; } phba->intr_mode = intr_mode; rc = lpfc_online(phba); if (rc == 0) lpfc_unblock_mgmt_io(phba); return rc; } /** * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked to handle the SLI4 HBA hardware error attention * conditions. **/ static void lpfc_handle_eratt_s4(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; uint32_t event_data; struct Scsi_Host *shost; uint32_t if_type; struct lpfc_register portstat_reg = {0}; uint32_t reg_err1, reg_err2; uint32_t uerrlo_reg, uemasklo_reg; uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; bool en_rn_msg = true; struct temp_event temp_event_data; struct lpfc_register portsmphr_reg; int rc, i; /* If the pci channel is offline, ignore possible errors, since * we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3166 pci channel is offline\n"); lpfc_sli_flush_io_rings(phba); return; } memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: pci_rd_rc1 = lpfc_readl( phba->sli4_hba.u.if_type0.UERRLOregaddr, &uerrlo_reg); pci_rd_rc2 = lpfc_readl( phba->sli4_hba.u.if_type0.UEMASKLOregaddr, &uemasklo_reg); /* consider PCI bus read error as pci_channel_offline */ if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) return; if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { lpfc_sli4_offline_eratt(phba); return; } lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "7623 Checking UE recoverable"); for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, &portsmphr_reg.word0)) continue; smphr_port_status = bf_get(lpfc_port_smphr_port_status, &portsmphr_reg); if ((smphr_port_status & LPFC_PORT_SEM_MASK) == LPFC_PORT_SEM_UE_RECOVERABLE) break; /*Sleep for 1Sec, before checking SEMAPHORE */ msleep(1000); } lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "4827 smphr_port_status x%x : Waited %dSec", smphr_port_status, i); /* Recoverable UE, reset the HBA device */ if ((smphr_port_status & LPFC_PORT_SEM_MASK) == LPFC_PORT_SEM_UE_RECOVERABLE) { for (i = 0; i < 20; i++) { msleep(1000); if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, &portsmphr_reg.word0) && (LPFC_POST_STAGE_PORT_READY == bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))) { rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, en_rn_msg); if (rc == 0) return; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "4215 Failed to recover UE"); break; } } } lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "7624 Firmware not ready: Failing UE recovery," " waited %dSec", i); phba->link_state = LPFC_HBA_ERROR; break; case LPFC_SLI_INTF_IF_TYPE_2: case LPFC_SLI_INTF_IF_TYPE_6: pci_rd_rc1 = lpfc_readl( phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0); /* consider PCI bus read error as pci_channel_offline */ if (pci_rd_rc1 == -EIO) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3151 PCI bus read access failure: x%x\n", readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); lpfc_sli4_offline_eratt(phba); return; } reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2889 Port Overtemperature event, " "taking port offline Data: x%x x%x\n", reg_err1, reg_err2); phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; temp_event_data.event_code = LPFC_CRIT_TEMP; temp_event_data.data = 0xFFFFFFFF; shost = lpfc_shost_from_vport(phba->pport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(temp_event_data), (char *)&temp_event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); spin_lock_irq(&phba->hbalock); phba->over_temp_state = HBA_OVER_TEMP; spin_unlock_irq(&phba->hbalock); lpfc_sli4_offline_eratt(phba); return; } if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3143 Port Down: Firmware Update " "Detected\n"); en_rn_msg = false; } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3144 Port Down: Debug Dump\n"); else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3145 Port Down: Provisioning\n"); /* If resets are disabled then leave the HBA alone and return */ if (!phba->cfg_enable_hba_reset) return; /* Check port status register for function reset */ rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, en_rn_msg); if (rc == 0) { /* don't report event on forced debug dump */ if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) return; else break; } /* fall through for not able to recover */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3152 Unrecoverable error\n"); lpfc_sli4_offline_eratt(phba); break; case LPFC_SLI_INTF_IF_TYPE_1: default: break; } lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "3123 Report dump event to upper layer\n"); /* Send an internal error event to mgmt application */ lpfc_board_errevt_to_mgmt(phba); event_data = FC_REG_DUMP_EVENT; shost = lpfc_shost_from_vport(vport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(event_data), (char *) &event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); } /** * lpfc_handle_eratt - Wrapper func for handling hba error attention * @phba: pointer to lpfc HBA data structure. * * This routine wraps the actual SLI3 or SLI4 hba error attention handling * routine from the API jump table function pointer from the lpfc_hba struct. * * Return codes * 0 - success. * Any other value - error. **/ void lpfc_handle_eratt(struct lpfc_hba *phba) { (*phba->lpfc_handle_eratt)(phba); } /** * lpfc_handle_latt - The HBA link event handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked from the worker thread to handle a HBA host * attention link event. SLI3 only. **/ void lpfc_handle_latt(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_sli *psli = &phba->sli; LPFC_MBOXQ_t *pmb; volatile uint32_t control; int rc = 0; pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { rc = 1; goto lpfc_handle_latt_err_exit; } rc = lpfc_mbox_rsrc_prep(phba, pmb); if (rc) { rc = 2; mempool_free(pmb, phba->mbox_mem_pool); goto lpfc_handle_latt_err_exit; } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); psli->slistat.link_event++; lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; pmb->vport = vport; /* Block ELS IOCBs until we have processed this mbox command */ phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { rc = 4; goto lpfc_handle_latt_free_mbuf; } /* Clear Link Attention in HA REG */ spin_lock_irq(&phba->hbalock); writel(HA_LATT, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); return; lpfc_handle_latt_free_mbuf: phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); lpfc_handle_latt_err_exit: /* Enable Link attention interrupts */ spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ /* Clear Link Attention in HA REG */ writel(HA_LATT, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); lpfc_linkdown(phba); phba->link_state = LPFC_HBA_ERROR; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); return; } static void lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex) { int i, j; while (length > 0) { /* Look for Serial Number */ if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) { *pindex += 2; i = vpd[*pindex]; *pindex += 1; j = 0; length -= (3+i); while (i--) { phba->SerialNumber[j++] = vpd[(*pindex)++]; if (j == 31) break; } phba->SerialNumber[j] = 0; continue; } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) { phba->vpd_flag |= VPD_MODEL_DESC; *pindex += 2; i = vpd[*pindex]; *pindex += 1; j = 0; length -= (3+i); while (i--) { phba->ModelDesc[j++] = vpd[(*pindex)++]; if (j == 255) break; } phba->ModelDesc[j] = 0; continue; } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) { phba->vpd_flag |= VPD_MODEL_NAME; *pindex += 2; i = vpd[*pindex]; *pindex += 1; j = 0; length -= (3+i); while (i--) { phba->ModelName[j++] = vpd[(*pindex)++]; if (j == 79) break; } phba->ModelName[j] = 0; continue; } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) { phba->vpd_flag |= VPD_PROGRAM_TYPE; *pindex += 2; i = vpd[*pindex]; *pindex += 1; j = 0; length -= (3+i); while (i--) { phba->ProgramType[j++] = vpd[(*pindex)++]; if (j == 255) break; } phba->ProgramType[j] = 0; continue; } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) { phba->vpd_flag |= VPD_PORT; *pindex += 2; i = vpd[*pindex]; *pindex += 1; j = 0; length -= (3 + i); while (i--) { if ((phba->sli_rev == LPFC_SLI_REV4) && (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET)) { j++; (*pindex)++; } else phba->Port[j++] = vpd[(*pindex)++]; if (j == 19) break; } if ((phba->sli_rev != LPFC_SLI_REV4) || (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_NON)) phba->Port[j] = 0; continue; } else { *pindex += 2; i = vpd[*pindex]; *pindex += 1; *pindex += i; length -= (3 + i); } } } /** * lpfc_parse_vpd - Parse VPD (Vital Product Data) * @phba: pointer to lpfc hba data structure. * @vpd: pointer to the vital product data. * @len: length of the vital product data in bytes. * * This routine parses the Vital Product Data (VPD). The VPD is treated as * an array of characters. In this routine, the ModelName, ProgramType, and * ModelDesc, etc. fields of the phba data structure will be populated. * * Return codes * 0 - pointer to the VPD passed in is NULL * 1 - success **/ int lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) { uint8_t lenlo, lenhi; int Length; int i; int finished = 0; int index = 0; if (!vpd) return 0; /* Vital Product */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0455 Vital Product Data: x%x x%x x%x x%x\n", (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], (uint32_t) vpd[3]); while (!finished && (index < (len - 4))) { switch (vpd[index]) { case 0x82: case 0x91: index += 1; lenlo = vpd[index]; index += 1; lenhi = vpd[index]; index += 1; i = ((((unsigned short)lenhi) << 8) + lenlo); index += i; break; case 0x90: index += 1; lenlo = vpd[index]; index += 1; lenhi = vpd[index]; index += 1; Length = ((((unsigned short)lenhi) << 8) + lenlo); if (Length > len - index) Length = len - index; lpfc_fill_vpd(phba, vpd, Length, &index); finished = 0; break; case 0x78: finished = 1; break; default: index ++; break; } } return(1); } /** * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description * @phba: pointer to lpfc hba data structure. * @mdp: pointer to the data structure to hold the derived model name. * @descp: pointer to the data structure to hold the derived description. * * This routine retrieves HBA's description based on its registered PCI device * ID. The @descp passed into this function points to an array of 256 chars. It * shall be returned with the model name, maximum speed, and the host bus type. * The @mdp passed into this function points to an array of 80 chars. When the * function returns, the @mdp will be filled with the model name. **/ static void lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) { uint16_t sub_dev_id = phba->pcidev->subsystem_device; char *model = "<Unknown>"; int tbolt = 0; switch (sub_dev_id) { case PCI_DEVICE_ID_CLRY_161E: model = "161E"; break; case PCI_DEVICE_ID_CLRY_162E: model = "162E"; break; case PCI_DEVICE_ID_CLRY_164E: model = "164E"; break; case PCI_DEVICE_ID_CLRY_161P: model = "161P"; break; case PCI_DEVICE_ID_CLRY_162P: model = "162P"; break; case PCI_DEVICE_ID_CLRY_164P: model = "164P"; break; case PCI_DEVICE_ID_CLRY_321E: model = "321E"; break; case PCI_DEVICE_ID_CLRY_322E: model = "322E"; break; case PCI_DEVICE_ID_CLRY_324E: model = "324E"; break; case PCI_DEVICE_ID_CLRY_321P: model = "321P"; break; case PCI_DEVICE_ID_CLRY_322P: model = "322P"; break; case PCI_DEVICE_ID_CLRY_324P: model = "324P"; break; case PCI_DEVICE_ID_TLFC_2XX2: model = "2XX2"; tbolt = 1; break; case PCI_DEVICE_ID_TLFC_3162: model = "3162"; tbolt = 1; break; case PCI_DEVICE_ID_TLFC_3322: model = "3322"; tbolt = 1; break; default: model = "Unknown"; break; } if (mdp && mdp[0] == '\0') snprintf(mdp, 79, "%s", model); if (descp && descp[0] == '\0') snprintf(descp, 255, "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s", (tbolt) ? "ThunderLink FC " : "Celerity FC-", model, phba->Port); } /** * lpfc_get_hba_model_desc - Retrieve HBA device model name and description * @phba: pointer to lpfc hba data structure. * @mdp: pointer to the data structure to hold the derived model name. * @descp: pointer to the data structure to hold the derived description. * * This routine retrieves HBA's description based on its registered PCI device * ID. The @descp passed into this function points to an array of 256 chars. It * shall be returned with the model name, maximum speed, and the host bus type. * The @mdp passed into this function points to an array of 80 chars. When the * function returns, the @mdp will be filled with the model name. **/ static void lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) { lpfc_vpd_t *vp; uint16_t dev_id = phba->pcidev->device; int max_speed; int GE = 0; int oneConnect = 0; /* default is not a oneConnect */ struct { char *name; char *bus; char *function; } m = {"<Unknown>", "", ""}; if (mdp && mdp[0] != '\0' && descp && descp[0] != '\0') return; if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) { lpfc_get_atto_model_desc(phba, mdp, descp); return; } if (phba->lmt & LMT_64Gb) max_speed = 64; else if (phba->lmt & LMT_32Gb) max_speed = 32; else if (phba->lmt & LMT_16Gb) max_speed = 16; else if (phba->lmt & LMT_10Gb) max_speed = 10; else if (phba->lmt & LMT_8Gb) max_speed = 8; else if (phba->lmt & LMT_4Gb) max_speed = 4; else if (phba->lmt & LMT_2Gb) max_speed = 2; else if (phba->lmt & LMT_1Gb) max_speed = 1; else max_speed = 0; vp = &phba->vpd; switch (dev_id) { case PCI_DEVICE_ID_FIREFLY: m = (typeof(m)){"LP6000", "PCI", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SUPERFLY: if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) m = (typeof(m)){"LP7000", "PCI", ""}; else m = (typeof(m)){"LP7000E", "PCI", ""}; m.function = "Obsolete, Unsupported Fibre Channel Adapter"; break; case PCI_DEVICE_ID_DRAGONFLY: m = (typeof(m)){"LP8000", "PCI", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_CENTAUR: if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) m = (typeof(m)){"LP9002", "PCI", ""}; else m = (typeof(m)){"LP9000", "PCI", ""}; m.function = "Obsolete, Unsupported Fibre Channel Adapter"; break; case PCI_DEVICE_ID_RFLY: m = (typeof(m)){"LP952", "PCI", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PEGASUS: m = (typeof(m)){"LP9802", "PCI-X", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_THOR: m = (typeof(m)){"LP10000", "PCI-X", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_VIPER: m = (typeof(m)){"LPX1000", "PCI-X", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PFLY: m = (typeof(m)){"LP982", "PCI-X", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TFLY: m = (typeof(m)){"LP1050", "PCI-X", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS: m = (typeof(m)){"LP11000", "PCI-X2", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS_SCSP: m = (typeof(m)){"LP11000-SP", "PCI-X2", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS_DCSP: m = (typeof(m)){"LP11002-SP", "PCI-X2", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE: m = (typeof(m)){"LPe1000", "PCIe", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE_SCSP: m = (typeof(m)){"LPe1000-SP", "PCIe", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE_DCSP: m = (typeof(m)){"LPe1002-SP", "PCIe", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BMID: m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BSMB: m = (typeof(m)){"LP111", "PCI-X2", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR: m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR_SCSP: m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR_DCSP: m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; GE = 1; break; case PCI_DEVICE_ID_ZMID: m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZSMB: m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP101: m = (typeof(m)){"LP101", "PCI-X", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP10000S: m = (typeof(m)){"LP10000-S", "PCI", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP11000S: m = (typeof(m)){"LP11000-S", "PCI-X2", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LPE11000S: m = (typeof(m)){"LPe11000-S", "PCIe", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT: m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_MID: m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_SMB: m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_DCSP: m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_SCSP: m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_S: m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_VF: m = (typeof(m)){"LPev12000", "PCIe IOV", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_PF: m = (typeof(m)){"LPev12000", "PCIe IOV", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_S: m = (typeof(m)){"LPemv12002-S", "PCIe IOV", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TIGERSHARK: oneConnect = 1; m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; break; case PCI_DEVICE_ID_TOMCAT: oneConnect = 1; m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; break; case PCI_DEVICE_ID_FALCON: m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", "EmulexSecure Fibre"}; break; case PCI_DEVICE_ID_BALIUS: m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_FC: m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_FC_VF: m = (typeof(m)){"LPe16000", "PCIe", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_FCOE: oneConnect = 1; m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; break; case PCI_DEVICE_ID_LANCER_FCOE_VF: oneConnect = 1; m = (typeof(m)){"OCe15100", "PCIe", "Obsolete, Unsupported FCoE"}; break; case PCI_DEVICE_ID_LANCER_G6_FC: m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_G7_FC: m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_G7P_FC: m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SKYHAWK: case PCI_DEVICE_ID_SKYHAWK_VF: oneConnect = 1; m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; break; default: m = (typeof(m)){"Unknown", "", ""}; break; } if (mdp && mdp[0] == '\0') snprintf(mdp, 79,"%s", m.name); /* * oneConnect hba requires special processing, they are all initiators * and we put the port number on the end */ if (descp && descp[0] == '\0') { if (oneConnect) snprintf(descp, 255, "Emulex OneConnect %s, %s Initiator %s", m.name, m.function, phba->Port); else if (max_speed == 0) snprintf(descp, 255, "Emulex %s %s %s", m.name, m.bus, m.function); else snprintf(descp, 255, "Emulex %s %d%s %s %s", m.name, max_speed, (GE) ? "GE" : "Gb", m.bus, m.function); } } /** * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring * @phba: pointer to lpfc hba data structure. * @pring: pointer to a IOCB ring. * @cnt: the number of IOCBs to be posted to the IOCB ring. * * This routine posts a given number of IOCBs with the associated DMA buffer * descriptors specified by the cnt argument to the given IOCB ring. * * Return codes * The number of IOCBs NOT able to be posted to the IOCB ring. **/ int lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) { IOCB_t *icmd; struct lpfc_iocbq *iocb; struct lpfc_dmabuf *mp1, *mp2; cnt += pring->missbufcnt; /* While there are buffers to post */ while (cnt > 0) { /* Allocate buffer for command iocb */ iocb = lpfc_sli_get_iocbq(phba); if (iocb == NULL) { pring->missbufcnt = cnt; return cnt; } icmd = &iocb->iocb; /* 2 buffers can be posted per command */ /* Allocate buffer to post */ mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); if (mp1) mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); if (!mp1 || !mp1->virt) { kfree(mp1); lpfc_sli_release_iocbq(phba, iocb); pring->missbufcnt = cnt; return cnt; } INIT_LIST_HEAD(&mp1->list); /* Allocate buffer to post */ if (cnt > 1) { mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); if (mp2) mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp2->phys); if (!mp2 || !mp2->virt) { kfree(mp2); lpfc_mbuf_free(phba, mp1->virt, mp1->phys); kfree(mp1); lpfc_sli_release_iocbq(phba, iocb); pring->missbufcnt = cnt; return cnt; } INIT_LIST_HEAD(&mp2->list); } else { mp2 = NULL; } icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; icmd->ulpBdeCount = 1; cnt--; if (mp2) { icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; cnt--; icmd->ulpBdeCount = 2; } icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; icmd->ulpLe = 1; if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == IOCB_ERROR) { lpfc_mbuf_free(phba, mp1->virt, mp1->phys); kfree(mp1); cnt++; if (mp2) { lpfc_mbuf_free(phba, mp2->virt, mp2->phys); kfree(mp2); cnt++; } lpfc_sli_release_iocbq(phba, iocb); pring->missbufcnt = cnt; return cnt; } lpfc_sli_ringpostbuf_put(phba, pring, mp1); if (mp2) lpfc_sli_ringpostbuf_put(phba, pring, mp2); } pring->missbufcnt = 0; return 0; } /** * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring * @phba: pointer to lpfc hba data structure. * * This routine posts initial receive IOCB buffers to the ELS ring. The * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is * set to 64 IOCBs. SLI3 only. * * Return codes * 0 - success (currently always success) **/ static int lpfc_post_rcv_buf(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; /* Ring 0, ELS / CT buffers */ lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); /* Ring 2 - FCP no buffers needed */ return 0; } #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) /** * lpfc_sha_init - Set up initial array of hash table entries * @HashResultPointer: pointer to an array as hash table. * * This routine sets up the initial values to the array of hash table entries * for the LC HBAs. **/ static void lpfc_sha_init(uint32_t * HashResultPointer) { HashResultPointer[0] = 0x67452301; HashResultPointer[1] = 0xEFCDAB89; HashResultPointer[2] = 0x98BADCFE; HashResultPointer[3] = 0x10325476; HashResultPointer[4] = 0xC3D2E1F0; } /** * lpfc_sha_iterate - Iterate initial hash table with the working hash table * @HashResultPointer: pointer to an initial/result hash table. * @HashWorkingPointer: pointer to an working hash table. * * This routine iterates an initial hash table pointed by @HashResultPointer * with the values from the working hash table pointeed by @HashWorkingPointer. * The results are putting back to the initial hash table, returned through * the @HashResultPointer as the result hash table. **/ static void lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) { int t; uint32_t TEMP; uint32_t A, B, C, D, E; t = 16; do { HashWorkingPointer[t] = S(1, HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 8] ^ HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); } while (++t <= 79); t = 0; A = HashResultPointer[0]; B = HashResultPointer[1]; C = HashResultPointer[2]; D = HashResultPointer[3]; E = HashResultPointer[4]; do { if (t < 20) { TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; } else if (t < 40) { TEMP = (B ^ C ^ D) + 0x6ED9EBA1; } else if (t < 60) { TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; } else { TEMP = (B ^ C ^ D) + 0xCA62C1D6; } TEMP += S(5, A) + E + HashWorkingPointer[t]; E = D; D = C; C = S(30, B); B = A; A = TEMP; } while (++t <= 79); HashResultPointer[0] += A; HashResultPointer[1] += B; HashResultPointer[2] += C; HashResultPointer[3] += D; HashResultPointer[4] += E; } /** * lpfc_challenge_key - Create challenge key based on WWPN of the HBA * @RandomChallenge: pointer to the entry of host challenge random number array. * @HashWorking: pointer to the entry of the working hash array. * * This routine calculates the working hash array referred by @HashWorking * from the challenge random numbers associated with the host, referred by * @RandomChallenge. The result is put into the entry of the working hash * array and returned by reference through @HashWorking. **/ static void lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) { *HashWorking = (*RandomChallenge ^ *HashWorking); } /** * lpfc_hba_init - Perform special handling for LC HBA initialization * @phba: pointer to lpfc hba data structure. * @hbainit: pointer to an array of unsigned 32-bit integers. * * This routine performs the special handling for LC HBA initialization. **/ void lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) { int t; uint32_t *HashWorking; uint32_t *pwwnn = (uint32_t *) phba->wwnn; HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); if (!HashWorking) return; HashWorking[0] = HashWorking[78] = *pwwnn++; HashWorking[1] = HashWorking[79] = *pwwnn; for (t = 0; t < 7; t++) lpfc_challenge_key(phba->RandomData + t, HashWorking + t); lpfc_sha_init(hbainit); lpfc_sha_iterate(hbainit, HashWorking); kfree(HashWorking); } /** * lpfc_cleanup - Performs vport cleanups before deleting a vport * @vport: pointer to a virtual N_Port data structure. * * This routine performs the necessary cleanups before deleting the @vport. * It invokes the discovery state machine to perform necessary state * transitions and to release the ndlps associated with the @vport. Note, * the physical port is treated as @vport 0. **/ void lpfc_cleanup(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp, *next_ndlp; int i = 0; if (phba->link_state > LPFC_LINK_DOWN) lpfc_port_link_failure(vport); /* Clean up VMID resources */ if (lpfc_is_vmid_enabled(phba)) lpfc_vmid_vport_cleanup(vport); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (vport->port_type != LPFC_PHYSICAL_PORT && ndlp->nlp_DID == Fabric_DID) { /* Just free up ndlp with Fabric_DID for vports */ lpfc_nlp_put(ndlp); continue; } if (ndlp->nlp_DID == Fabric_Cntl_DID && ndlp->nlp_state == NLP_STE_UNUSED_NODE) { lpfc_nlp_put(ndlp); continue; } /* Fabric Ports not in UNMAPPED state are cleaned up in the * DEVICE_RM event. */ if (ndlp->nlp_type & NLP_FABRIC && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } /* This is a special case flush to return all * IOs before entering this loop. There are * two points in the code where a flush is * avoided if the FC_UNLOADING flag is set. * one is in the multipool destroy, * (this prevents a crash) and the other is * in the nvme abort handler, ( also prevents * a crash). Both of these exceptions are * cases where the slot is still accessible. * The flush here is only when the pci slot * is offline. */ if (vport->load_flag & FC_UNLOADING && pci_channel_offline(phba->pcidev)) lpfc_sli_flush_io_rings(vport->phba); /* At this point, ALL ndlp's should be gone * because of the previous NLP_EVT_DEVICE_RM. * Lets wait for this to happen, if needed. */ while (!list_empty(&vport->fc_nodes)) { if (i++ > 3000) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0233 Nodelist not empty\n"); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_DISCOVERY, "0282 did:x%x ndlp:x%px " "refcnt:%d xflags x%x nflag x%x\n", ndlp->nlp_DID, (void *)ndlp, kref_read(&ndlp->kref), ndlp->fc4_xpt_flags, ndlp->nlp_flag); } break; } /* Wait for any activity on ndlps to settle */ msleep(10); } lpfc_cleanup_vports_rrqs(vport, NULL); } /** * lpfc_stop_vport_timers - Stop all the timers associated with a vport * @vport: pointer to a virtual N_Port data structure. * * This routine stops all the timers associated with a @vport. This function * is invoked before disabling or deleting a @vport. Note that the physical * port is treated as @vport 0. **/ void lpfc_stop_vport_timers(struct lpfc_vport *vport) { del_timer_sync(&vport->els_tmofunc); del_timer_sync(&vport->delayed_disc_tmo); lpfc_can_disctmo(vport); return; } /** * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer * @phba: pointer to lpfc hba data structure. * * This routine stops the SLI4 FCF rediscover wait timer if it's on. The * caller of this routine should already hold the host lock. **/ void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) { /* Clear pending FCF rediscovery wait flag */ phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; /* Now, try to stop the timer */ del_timer(&phba->fcf.redisc_wait); } /** * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer * @phba: pointer to lpfc hba data structure. * * This routine stops the SLI4 FCF rediscover wait timer if it's on. It * checks whether the FCF rediscovery wait timer is pending with the host * lock held before proceeding with disabling the timer and clearing the * wait timer pendig flag. **/ void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) { spin_lock_irq(&phba->hbalock); if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { /* FCF rediscovery timer already fired or stopped */ spin_unlock_irq(&phba->hbalock); return; } __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); /* Clear failover in progress flags */ phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); spin_unlock_irq(&phba->hbalock); } /** * lpfc_cmf_stop - Stop CMF processing * @phba: pointer to lpfc hba data structure. * * This is called when the link goes down or if CMF mode is turned OFF. * It is also called when going offline or unloaded just before the * congestion info buffer is unregistered. **/ void lpfc_cmf_stop(struct lpfc_hba *phba) { int cpu; struct lpfc_cgn_stat *cgs; /* We only do something if CMF is enabled */ if (!phba->sli4_hba.pc_sli4_params.cmf) return; lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6221 Stop CMF / Cancel Timer\n"); /* Cancel the CMF timer */ hrtimer_cancel(&phba->cmf_stats_timer); hrtimer_cancel(&phba->cmf_timer); /* Zero CMF counters */ atomic_set(&phba->cmf_busy, 0); for_each_present_cpu(cpu) { cgs = per_cpu_ptr(phba->cmf_stat, cpu); atomic64_set(&cgs->total_bytes, 0); atomic64_set(&cgs->rcv_bytes, 0); atomic_set(&cgs->rx_io_cnt, 0); atomic64_set(&cgs->rx_latency, 0); } atomic_set(&phba->cmf_bw_wait, 0); /* Resume any blocked IO - Queue unblock on workqueue */ queue_work(phba->wq, &phba->unblock_request_work); } static inline uint64_t lpfc_get_max_line_rate(struct lpfc_hba *phba) { uint64_t rate = lpfc_sli_port_speed_get(phba); return ((((unsigned long)rate) * 1024 * 1024) / 10); } void lpfc_cmf_signal_init(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6223 Signal CMF init\n"); /* Use the new fc_linkspeed to recalculate */ phba->cmf_interval_rate = LPFC_CMF_INTERVAL; phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * phba->cmf_interval_rate, 1000); phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; /* This is a signal to firmware to sync up CMF BW with link speed */ lpfc_issue_cmf_sync_wqe(phba, 0, 0); } /** * lpfc_cmf_start - Start CMF processing * @phba: pointer to lpfc hba data structure. * * This is called when the link comes up or if CMF mode is turned OFF * to Monitor or Managed. **/ void lpfc_cmf_start(struct lpfc_hba *phba) { struct lpfc_cgn_stat *cgs; int cpu; /* We only do something if CMF is enabled */ if (!phba->sli4_hba.pc_sli4_params.cmf || phba->cmf_active_mode == LPFC_CFG_OFF) return; /* Reinitialize congestion buffer info */ lpfc_init_congestion_buf(phba); atomic_set(&phba->cgn_fabric_warn_cnt, 0); atomic_set(&phba->cgn_fabric_alarm_cnt, 0); atomic_set(&phba->cgn_sync_alarm_cnt, 0); atomic_set(&phba->cgn_sync_warn_cnt, 0); atomic_set(&phba->cmf_busy, 0); for_each_present_cpu(cpu) { cgs = per_cpu_ptr(phba->cmf_stat, cpu); atomic64_set(&cgs->total_bytes, 0); atomic64_set(&cgs->rcv_bytes, 0); atomic_set(&cgs->rx_io_cnt, 0); atomic64_set(&cgs->rx_latency, 0); } phba->cmf_latency.tv_sec = 0; phba->cmf_latency.tv_nsec = 0; lpfc_cmf_signal_init(phba); lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6222 Start CMF / Timer\n"); phba->cmf_timer_cnt = 0; hrtimer_start(&phba->cmf_timer, ktime_set(0, LPFC_CMF_INTERVAL * NSEC_PER_MSEC), HRTIMER_MODE_REL); hrtimer_start(&phba->cmf_stats_timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC), HRTIMER_MODE_REL); /* Setup for latency check in IO cmpl routines */ ktime_get_real_ts64(&phba->cmf_latency); atomic_set(&phba->cmf_bw_wait, 0); atomic_set(&phba->cmf_stop_io, 0); } /** * lpfc_stop_hba_timers - Stop all the timers associated with an HBA * @phba: pointer to lpfc hba data structure. * * This routine stops all the timers associated with a HBA. This function is * invoked before either putting a HBA offline or unloading the driver. **/ void lpfc_stop_hba_timers(struct lpfc_hba *phba) { if (phba->pport) lpfc_stop_vport_timers(phba->pport); cancel_delayed_work_sync(&phba->eq_delay_work); cancel_delayed_work_sync(&phba->idle_stat_delay_work); del_timer_sync(&phba->sli.mbox_tmo); del_timer_sync(&phba->fabric_block_timer); del_timer_sync(&phba->eratt_poll); del_timer_sync(&phba->hb_tmofunc); if (phba->sli_rev == LPFC_SLI_REV4) { del_timer_sync(&phba->rrq_tmr); phba->hba_flag &= ~HBA_RRQ_ACTIVE; } phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: /* Stop any LightPulse device specific driver timers */ del_timer_sync(&phba->fcp_poll_timer); break; case LPFC_PCI_DEV_OC: /* Stop any OneConnect device specific driver timers */ lpfc_sli4_stop_fcf_redisc_wait_timer(phba); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0297 Invalid device group (x%x)\n", phba->pci_dev_grp); break; } return; } /** * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked * @phba: pointer to lpfc hba data structure. * @mbx_action: flag for mailbox no wait action. * * This routine marks a HBA's management interface as blocked. Once the HBA's * management interface is marked as blocked, all the user space access to * the HBA, whether they are from sysfs interface or libdfc interface will * all be blocked. The HBA is set to block the management interface when the * driver prepares the HBA interface for online or offline. **/ static void lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) { unsigned long iflag; uint8_t actcmd = MBX_HEARTBEAT; unsigned long timeout; spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; spin_unlock_irqrestore(&phba->hbalock, iflag); if (mbx_action == LPFC_MBX_NO_WAIT) return; timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; spin_lock_irqsave(&phba->hbalock, iflag); if (phba->sli.mbox_active) { actcmd = phba->sli.mbox_active->u.mb.mbxCommand; /* Determine how long we might wait for the active mailbox * command to be gracefully completed by firmware. */ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, phba->sli.mbox_active) * 1000) + jiffies; } spin_unlock_irqrestore(&phba->hbalock, iflag); /* Wait for the outstnading mailbox command to complete */ while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ msleep(2); if (time_after(jiffies, timeout)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2813 Mgmt IO is Blocked %x " "- mbox cmd %x still active\n", phba->sli.sli_flag, actcmd); break; } } } /** * lpfc_sli4_node_prep - Assign RPIs for active nodes. * @phba: pointer to lpfc hba data structure. * * Allocate RPIs for all active remote nodes. This is needed whenever * an SLI4 adapter is reset and the driver is not unloading. Its purpose * is to fixup the temporary rpi assignments. **/ void lpfc_sli4_node_prep(struct lpfc_hba *phba) { struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_vport **vports; int i, rpi; if (phba->sli_rev != LPFC_SLI_REV4) return; vports = lpfc_create_vport_work_array(phba); if (vports == NULL) return; for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if (vports[i]->load_flag & FC_UNLOADING) continue; list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, nlp_listp) { rpi = lpfc_sli4_alloc_rpi(phba); if (rpi == LPFC_RPI_ALLOC_ERROR) { /* TODO print log? */ continue; } ndlp->nlp_rpi = rpi; lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0009 Assign RPI x%x to ndlp x%px " "DID:x%06x flg:x%x\n", ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, ndlp->nlp_flag); } } lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_create_expedite_pool - create expedite pool * @phba: pointer to lpfc hba data structure. * * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 * to expedite pool. Mark them as expedite. **/ static void lpfc_create_expedite_pool(struct lpfc_hba *phba) { struct lpfc_sli4_hdw_queue *qp; struct lpfc_io_buf *lpfc_ncmd; struct lpfc_io_buf *lpfc_ncmd_next; struct lpfc_epd_pool *epd_pool; unsigned long iflag; epd_pool = &phba->epd_pool; qp = &phba->sli4_hba.hdwq[0]; spin_lock_init(&epd_pool->lock); spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); spin_lock(&epd_pool->lock); INIT_LIST_HEAD(&epd_pool->list); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &qp->lpfc_io_buf_list_put, list) { list_move_tail(&lpfc_ncmd->list, &epd_pool->list); lpfc_ncmd->expedite = true; qp->put_io_bufs--; epd_pool->count++; if (epd_pool->count >= XRI_BATCH) break; } spin_unlock(&epd_pool->lock); spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); } /** * lpfc_destroy_expedite_pool - destroy expedite pool * @phba: pointer to lpfc hba data structure. * * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put * of HWQ 0. Clear the mark. **/ static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) { struct lpfc_sli4_hdw_queue *qp; struct lpfc_io_buf *lpfc_ncmd; struct lpfc_io_buf *lpfc_ncmd_next; struct lpfc_epd_pool *epd_pool; unsigned long iflag; epd_pool = &phba->epd_pool; qp = &phba->sli4_hba.hdwq[0]; spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); spin_lock(&epd_pool->lock); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &epd_pool->list, list) { list_move_tail(&lpfc_ncmd->list, &qp->lpfc_io_buf_list_put); lpfc_ncmd->flags = false; qp->put_io_bufs++; epd_pool->count--; } spin_unlock(&epd_pool->lock); spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); } /** * lpfc_create_multixri_pools - create multi-XRI pools * @phba: pointer to lpfc hba data structure. * * This routine initialize public, private per HWQ. Then, move XRIs from * lpfc_io_buf_list_put to public pool. High and low watermark are also * Initialized. **/ void lpfc_create_multixri_pools(struct lpfc_hba *phba) { u32 i, j; u32 hwq_count; u32 count_per_hwq; struct lpfc_io_buf *lpfc_ncmd; struct lpfc_io_buf *lpfc_ncmd_next; unsigned long iflag; struct lpfc_sli4_hdw_queue *qp; struct lpfc_multixri_pool *multixri_pool; struct lpfc_pbl_pool *pbl_pool; struct lpfc_pvt_pool *pvt_pool; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, phba->sli4_hba.io_xri_cnt); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) lpfc_create_expedite_pool(phba); hwq_count = phba->cfg_hdw_queue; count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; for (i = 0; i < hwq_count; i++) { multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); if (!multixri_pool) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1238 Failed to allocate memory for " "multixri_pool\n"); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) lpfc_destroy_expedite_pool(phba); j = 0; while (j < i) { qp = &phba->sli4_hba.hdwq[j]; kfree(qp->p_multixri_pool); j++; } phba->cfg_xri_rebalancing = 0; return; } qp = &phba->sli4_hba.hdwq[i]; qp->p_multixri_pool = multixri_pool; multixri_pool->xri_limit = count_per_hwq; multixri_pool->rrb_next_hwqid = i; /* Deal with public free xri pool */ pbl_pool = &multixri_pool->pbl_pool; spin_lock_init(&pbl_pool->lock); spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); spin_lock(&pbl_pool->lock); INIT_LIST_HEAD(&pbl_pool->list); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &qp->lpfc_io_buf_list_put, list) { list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); qp->put_io_bufs--; pbl_pool->count++; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", pbl_pool->count, i); spin_unlock(&pbl_pool->lock); spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); /* Deal with private free xri pool */ pvt_pool = &multixri_pool->pvt_pool; pvt_pool->high_watermark = multixri_pool->xri_limit / 2; pvt_pool->low_watermark = XRI_BATCH; spin_lock_init(&pvt_pool->lock); spin_lock_irqsave(&pvt_pool->lock, iflag); INIT_LIST_HEAD(&pvt_pool->list); pvt_pool->count = 0; spin_unlock_irqrestore(&pvt_pool->lock, iflag); } } /** * lpfc_destroy_multixri_pools - destroy multi-XRI pools * @phba: pointer to lpfc hba data structure. * * This routine returns XRIs from public/private to lpfc_io_buf_list_put. **/ static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) { u32 i; u32 hwq_count; struct lpfc_io_buf *lpfc_ncmd; struct lpfc_io_buf *lpfc_ncmd_next; unsigned long iflag; struct lpfc_sli4_hdw_queue *qp; struct lpfc_multixri_pool *multixri_pool; struct lpfc_pbl_pool *pbl_pool; struct lpfc_pvt_pool *pvt_pool; if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) lpfc_destroy_expedite_pool(phba); if (!(phba->pport->load_flag & FC_UNLOADING)) lpfc_sli_flush_io_rings(phba); hwq_count = phba->cfg_hdw_queue; for (i = 0; i < hwq_count; i++) { qp = &phba->sli4_hba.hdwq[i]; multixri_pool = qp->p_multixri_pool; if (!multixri_pool) continue; qp->p_multixri_pool = NULL; spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); /* Deal with public free xri pool */ pbl_pool = &multixri_pool->pbl_pool; spin_lock(&pbl_pool->lock); lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", pbl_pool->count, i); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &pbl_pool->list, list) { list_move_tail(&lpfc_ncmd->list, &qp->lpfc_io_buf_list_put); qp->put_io_bufs++; pbl_pool->count--; } INIT_LIST_HEAD(&pbl_pool->list); pbl_pool->count = 0; spin_unlock(&pbl_pool->lock); /* Deal with private free xri pool */ pvt_pool = &multixri_pool->pvt_pool; spin_lock(&pvt_pool->lock); lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", pvt_pool->count, i); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &pvt_pool->list, list) { list_move_tail(&lpfc_ncmd->list, &qp->lpfc_io_buf_list_put); qp->put_io_bufs++; pvt_pool->count--; } INIT_LIST_HEAD(&pvt_pool->list); pvt_pool->count = 0; spin_unlock(&pvt_pool->lock); spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); kfree(multixri_pool); } } /** * lpfc_online - Initialize and bring a HBA online * @phba: pointer to lpfc hba data structure. * * This routine initializes the HBA and brings a HBA online. During this * process, the management interface is blocked to prevent user space access * to the HBA interfering with the driver initialization. * * Return codes * 0 - successful * 1 - failed **/ int lpfc_online(struct lpfc_hba *phba) { struct lpfc_vport *vport; struct lpfc_vport **vports; int i, error = 0; bool vpis_cleared = false; if (!phba) return 0; vport = phba->pport; if (!(vport->fc_flag & FC_OFFLINE_MODE)) return 0; lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0458 Bring Adapter online\n"); lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); if (phba->sli_rev == LPFC_SLI_REV4) { if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ lpfc_unblock_mgmt_io(phba); return 1; } spin_lock_irq(&phba->hbalock); if (!phba->sli4_hba.max_cfg_param.vpi_used) vpis_cleared = true; spin_unlock_irq(&phba->hbalock); /* Reestablish the local initiator port. * The offline process destroyed the previous lport. */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && !phba->nvmet_support) { error = lpfc_nvme_create_localport(phba->pport); if (error) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6132 NVME restore reg failed " "on nvmei error x%x\n", error); } } else { lpfc_sli_queue_init(phba); if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ lpfc_unblock_mgmt_io(phba); return 1; } } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { struct Scsi_Host *shost; shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->fc_flag &= ~FC_OFFLINE_MODE; if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; if (phba->sli_rev == LPFC_SLI_REV4) { vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; if ((vpis_cleared) && (vports[i]->port_type != LPFC_PHYSICAL_PORT)) vports[i]->vpi = 0; } spin_unlock_irq(shost->host_lock); } } lpfc_destroy_vport_work_array(phba, vports); if (phba->cfg_xri_rebalancing) lpfc_create_multixri_pools(phba); lpfc_cpuhp_add(phba); lpfc_unblock_mgmt_io(phba); return 0; } /** * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked * @phba: pointer to lpfc hba data structure. * * This routine marks a HBA's management interface as not blocked. Once the * HBA's management interface is marked as not blocked, all the user space * access to the HBA, whether they are from sysfs interface or libdfc * interface will be allowed. The HBA is set to block the management interface * when the driver prepares the HBA interface for online or offline and then * set to unblock the management interface afterwards. **/ void lpfc_unblock_mgmt_io(struct lpfc_hba * phba) { unsigned long iflag; spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; spin_unlock_irqrestore(&phba->hbalock, iflag); } /** * lpfc_offline_prep - Prepare a HBA to be brought offline * @phba: pointer to lpfc hba data structure. * @mbx_action: flag for mailbox shutdown action. * * This routine is invoked to prepare a HBA to be brought offline. It performs * unregistration login to all the nodes on all vports and flushes the mailbox * queue to make it ready to be brought offline. **/ void lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) { struct lpfc_vport *vport = phba->pport; struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_vport **vports; struct Scsi_Host *shost; int i; int offline; bool hba_pci_err; if (vport->fc_flag & FC_OFFLINE_MODE) return; lpfc_block_mgmt_io(phba, mbx_action); lpfc_linkdown(phba); offline = pci_channel_offline(phba->pcidev); hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); /* Issue an unreg_login to all nodes on all vports */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if (vports[i]->load_flag & FC_UNLOADING) continue; shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; vports[i]->fc_flag &= ~FC_VFI_REGISTERED; spin_unlock_irq(shost->host_lock); shost = lpfc_shost_from_vport(vports[i]); list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, nlp_listp) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(&ndlp->lock); if (offline || hba_pci_err) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_UNREG_INP | NLP_RPI_REGISTERED); spin_unlock_irq(&ndlp->lock); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli_rpi_release(vports[i], ndlp); } else { lpfc_unreg_rpi(vports[i], ndlp); } /* * Whenever an SLI4 port goes offline, free the * RPI. Get a new RPI when the adapter port * comes back online. */ if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_printf_vlog(vports[i], KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0011 Free RPI x%x on " "ndlp: x%px did x%x\n", ndlp->nlp_rpi, ndlp, ndlp->nlp_DID); lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; } if (ndlp->nlp_type & NLP_FABRIC) { lpfc_disc_state_machine(vports[i], ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); /* Don't remove the node unless the node * has been unregistered with the * transport, and we're not in recovery * before dev_loss_tmo triggered. * Otherwise, let dev_loss take care of * the node. */ if (!(ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) && !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) lpfc_disc_state_machine (vports[i], ndlp, NULL, NLP_EVT_DEVICE_RM); } } } } lpfc_destroy_vport_work_array(phba, vports); lpfc_sli_mbox_sys_shutdown(phba, mbx_action); if (phba->wq) flush_workqueue(phba->wq); } /** * lpfc_offline - Bring a HBA offline * @phba: pointer to lpfc hba data structure. * * This routine actually brings a HBA offline. It stops all the timers * associated with the HBA, brings down the SLI layer, and eventually * marks the HBA as in offline state for the upper layer protocol. **/ void lpfc_offline(struct lpfc_hba *phba) { struct Scsi_Host *shost; struct lpfc_vport **vports; int i; if (phba->pport->fc_flag & FC_OFFLINE_MODE) return; /* stop port and all timers associated with this hba */ lpfc_stop_port(phba); /* Tear down the local and target port registrations. The * nvme transports need to cleanup. */ lpfc_nvmet_destroy_targetport(phba); lpfc_nvme_destroy_localport(phba->pport); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_stop_vport_timers(vports[i]); lpfc_destroy_vport_work_array(phba, vports); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0460 Bring Adapter offline\n"); /* Bring down the SLI Layer and cleanup. The HBA is offline now. */ lpfc_sli_hba_down(phba); spin_lock_irq(&phba->hbalock); phba->work_ha = 0; spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->work_port_events = 0; vports[i]->fc_flag |= FC_OFFLINE_MODE; spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled * in hba_unset */ if (phba->pport->fc_flag & FC_OFFLINE_MODE) __lpfc_cpuhp_remove(phba); if (phba->cfg_xri_rebalancing) lpfc_destroy_multixri_pools(phba); } /** * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists * @phba: pointer to lpfc hba data structure. * * This routine is to free all the SCSI buffers and IOCBs from the driver * list back to kernel. It is called from lpfc_pci_remove_one to free * the internal resources before the device is removed from the system. **/ static void lpfc_scsi_free(struct lpfc_hba *phba) { struct lpfc_io_buf *sb, *sb_next; if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; spin_lock_irq(&phba->hbalock); /* Release all the lpfc_scsi_bufs maintained by this host. */ spin_lock(&phba->scsi_buf_list_put_lock); list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, list) { list_del(&sb->list); dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } spin_unlock(&phba->scsi_buf_list_put_lock); spin_lock(&phba->scsi_buf_list_get_lock); list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, list) { list_del(&sb->list); dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } spin_unlock(&phba->scsi_buf_list_get_lock); spin_unlock_irq(&phba->hbalock); } /** * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists * @phba: pointer to lpfc hba data structure. * * This routine is to free all the IO buffers and IOCBs from the driver * list back to kernel. It is called from lpfc_pci_remove_one to free * the internal resources before the device is removed from the system. **/ void lpfc_io_free(struct lpfc_hba *phba) { struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; struct lpfc_sli4_hdw_queue *qp; int idx; for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; /* Release all the lpfc_nvme_bufs maintained by this host. */ spin_lock(&qp->io_buf_list_put_lock); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &qp->lpfc_io_buf_list_put, list) { list_del(&lpfc_ncmd->list); qp->put_io_bufs--; dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); if (phba->cfg_xpsgl && !phba->nvmet_support) lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); kfree(lpfc_ncmd); qp->total_io_bufs--; } spin_unlock(&qp->io_buf_list_put_lock); spin_lock(&qp->io_buf_list_get_lock); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &qp->lpfc_io_buf_list_get, list) { list_del(&lpfc_ncmd->list); qp->get_io_bufs--; dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); if (phba->cfg_xpsgl && !phba->nvmet_support) lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); kfree(lpfc_ncmd); qp->total_io_bufs--; } spin_unlock(&qp->io_buf_list_get_lock); } } /** * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping * @phba: pointer to lpfc hba data structure. * * This routine first calculates the sizes of the current els and allocated * scsi sgl lists, and then goes through all sgls to updates the physical * XRIs assigned due to port function reset. During port initialization, the * current els and allocated scsi sgl lists are 0s. * * Return codes * 0 - successful (for now, it always returns 0) **/ int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) { struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; uint16_t i, lxri, xri_cnt, els_xri_cnt; LIST_HEAD(els_sgl_list); int rc; /* * update on pci function's els xri-sgl list */ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { /* els xri-sgl expanded */ xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3157 ELS xri-sgl count increased from " "%d to %d\n", phba->sli4_hba.els_xri_cnt, els_xri_cnt); /* allocate the additional els sgls */ for (i = 0; i < xri_cnt; i++) { sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); if (sglq_entry == NULL) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2562 Failure to allocate an " "ELS sgl entry:%d\n", i); rc = -ENOMEM; goto out_free_mem; } sglq_entry->buff_type = GEN_BUFF_TYPE; sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); if (sglq_entry->virt == NULL) { kfree(sglq_entry); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2563 Failure to allocate an " "ELS mbuf:%d\n", i); rc = -ENOMEM; goto out_free_mem; } sglq_entry->sgl = sglq_entry->virt; memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); sglq_entry->state = SGL_FREED; list_add_tail(&sglq_entry->list, &els_sgl_list); } spin_lock_irq(&phba->sli4_hba.sgl_list_lock); list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_els_sgl_list); spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { /* els xri-sgl shrinked */ xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3158 ELS xri-sgl count decreased from " "%d to %d\n", phba->sli4_hba.els_xri_cnt, els_xri_cnt); spin_lock_irq(&phba->sli4_hba.sgl_list_lock); list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &els_sgl_list); /* release extra els sgls from list */ for (i = 0; i < xri_cnt; i++) { list_remove_head(&els_sgl_list, sglq_entry, struct lpfc_sglq, list); if (sglq_entry) { __lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); kfree(sglq_entry); } } list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_els_sgl_list); spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); } else lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3163 ELS xri-sgl count unchanged: %d\n", els_xri_cnt); phba->sli4_hba.els_xri_cnt = els_xri_cnt; /* update xris to els sgls on the list */ sglq_entry = NULL; sglq_entry_next = NULL; list_for_each_entry_safe(sglq_entry, sglq_entry_next, &phba->sli4_hba.lpfc_els_sgl_list, list) { lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2400 Failed to allocate xri for " "ELS sgl\n"); rc = -ENOMEM; goto out_free_mem; } sglq_entry->sli4_lxritag = lxri; sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } return 0; out_free_mem: lpfc_free_els_sgl_list(phba); return rc; } /** * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping * @phba: pointer to lpfc hba data structure. * * This routine first calculates the sizes of the current els and allocated * scsi sgl lists, and then goes through all sgls to updates the physical * XRIs assigned due to port function reset. During port initialization, the * current els and allocated scsi sgl lists are 0s. * * Return codes * 0 - successful (for now, it always returns 0) **/ int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) { struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; uint16_t i, lxri, xri_cnt, els_xri_cnt; uint16_t nvmet_xri_cnt; LIST_HEAD(nvmet_sgl_list); int rc; /* * update on pci function's nvmet xri-sgl list */ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { /* els xri-sgl expanded */ xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "6302 NVMET xri-sgl cnt grew from %d to %d\n", phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); /* allocate the additional nvmet sgls */ for (i = 0; i < xri_cnt; i++) { sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); if (sglq_entry == NULL) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6303 Failure to allocate an " "NVMET sgl entry:%d\n", i); rc = -ENOMEM; goto out_free_mem; } sglq_entry->buff_type = NVMET_BUFF_TYPE; sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, &sglq_entry->phys); if (sglq_entry->virt == NULL) { kfree(sglq_entry); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6304 Failure to allocate an " "NVMET buf:%d\n", i); rc = -ENOMEM; goto out_free_mem; } sglq_entry->sgl = sglq_entry->virt; memset(sglq_entry->sgl, 0, phba->cfg_sg_dma_buf_size); sglq_entry->state = SGL_FREED; list_add_tail(&sglq_entry->list, &nvmet_sgl_list); } spin_lock_irq(&phba->hbalock); spin_lock(&phba->sli4_hba.sgl_list_lock); list_splice_init(&nvmet_sgl_list, &phba->sli4_hba.lpfc_nvmet_sgl_list); spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { /* nvmet xri-sgl shrunk */ xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "6305 NVMET xri-sgl count decreased from " "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); spin_lock_irq(&phba->hbalock); spin_lock(&phba->sli4_hba.sgl_list_lock); list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &nvmet_sgl_list); /* release extra nvmet sgls from list */ for (i = 0; i < xri_cnt; i++) { list_remove_head(&nvmet_sgl_list, sglq_entry, struct lpfc_sglq, list); if (sglq_entry) { lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); kfree(sglq_entry); } } list_splice_init(&nvmet_sgl_list, &phba->sli4_hba.lpfc_nvmet_sgl_list); spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); } else lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "6306 NVMET xri-sgl count unchanged: %d\n", nvmet_xri_cnt); phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; /* update xris to nvmet sgls on the list */ sglq_entry = NULL; sglq_entry_next = NULL; list_for_each_entry_safe(sglq_entry, sglq_entry_next, &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6307 Failed to allocate xri for " "NVMET sgl\n"); rc = -ENOMEM; goto out_free_mem; } sglq_entry->sli4_lxritag = lxri; sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } return 0; out_free_mem: lpfc_free_nvmet_sgl_list(phba); return rc; } int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) { LIST_HEAD(blist); struct lpfc_sli4_hdw_queue *qp; struct lpfc_io_buf *lpfc_cmd; struct lpfc_io_buf *iobufp, *prev_iobufp; int idx, cnt, xri, inserted; cnt = 0; for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; spin_lock_irq(&qp->io_buf_list_get_lock); spin_lock(&qp->io_buf_list_put_lock); /* Take everything off the get and put lists */ list_splice_init(&qp->lpfc_io_buf_list_get, &blist); list_splice(&qp->lpfc_io_buf_list_put, &blist); INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); cnt += qp->get_io_bufs + qp->put_io_bufs; qp->get_io_bufs = 0; qp->put_io_bufs = 0; qp->total_io_bufs = 0; spin_unlock(&qp->io_buf_list_put_lock); spin_unlock_irq(&qp->io_buf_list_get_lock); } /* * Take IO buffers off blist and put on cbuf sorted by XRI. * This is because POST_SGL takes a sequential range of XRIs * to post to the firmware. */ for (idx = 0; idx < cnt; idx++) { list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); if (!lpfc_cmd) return cnt; if (idx == 0) { list_add_tail(&lpfc_cmd->list, cbuf); continue; } xri = lpfc_cmd->cur_iocbq.sli4_xritag; inserted = 0; prev_iobufp = NULL; list_for_each_entry(iobufp, cbuf, list) { if (xri < iobufp->cur_iocbq.sli4_xritag) { if (prev_iobufp) list_add(&lpfc_cmd->list, &prev_iobufp->list); else list_add(&lpfc_cmd->list, cbuf); inserted = 1; break; } prev_iobufp = iobufp; } if (!inserted) list_add_tail(&lpfc_cmd->list, cbuf); } return cnt; } int lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) { struct lpfc_sli4_hdw_queue *qp; struct lpfc_io_buf *lpfc_cmd; int idx, cnt; unsigned long iflags; qp = phba->sli4_hba.hdwq; cnt = 0; while (!list_empty(cbuf)) { for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { list_remove_head(cbuf, lpfc_cmd, struct lpfc_io_buf, list); if (!lpfc_cmd) return cnt; cnt++; qp = &phba->sli4_hba.hdwq[idx]; lpfc_cmd->hdwq_no = idx; lpfc_cmd->hdwq = qp; lpfc_cmd->cur_iocbq.cmd_cmpl = NULL; spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags); list_add_tail(&lpfc_cmd->list, &qp->lpfc_io_buf_list_put); qp->put_io_bufs++; qp->total_io_bufs++; spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflags); } } return cnt; } /** * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping * @phba: pointer to lpfc hba data structure. * * This routine first calculates the sizes of the current els and allocated * scsi sgl lists, and then goes through all sgls to updates the physical * XRIs assigned due to port function reset. During port initialization, the * current els and allocated scsi sgl lists are 0s. * * Return codes * 0 - successful (for now, it always returns 0) **/ int lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) { struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; uint16_t i, lxri, els_xri_cnt; uint16_t io_xri_cnt, io_xri_max; LIST_HEAD(io_sgl_list); int rc, cnt; /* * update on pci function's allocated nvme xri-sgl list */ /* maximum number of xris available for nvme buffers */ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; phba->sli4_hba.io_xri_max = io_xri_max; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "6074 Current allocated XRI sgl count:%d, " "maximum XRI count:%d els_xri_cnt:%d\n\n", phba->sli4_hba.io_xri_cnt, phba->sli4_hba.io_xri_max, els_xri_cnt); cnt = lpfc_io_buf_flush(phba, &io_sgl_list); if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { /* max nvme xri shrunk below the allocated nvme buffers */ io_xri_cnt = phba->sli4_hba.io_xri_cnt - phba->sli4_hba.io_xri_max; /* release the extra allocated nvme buffers */ for (i = 0; i < io_xri_cnt; i++) { list_remove_head(&io_sgl_list, lpfc_ncmd, struct lpfc_io_buf, list); if (lpfc_ncmd) { dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); } } phba->sli4_hba.io_xri_cnt -= io_xri_cnt; } /* update xris associated to remaining allocated nvme buffers */ lpfc_ncmd = NULL; lpfc_ncmd_next = NULL; phba->sli4_hba.io_xri_cnt = cnt; list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &io_sgl_list, list) { lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6075 Failed to allocate xri for " "nvme buffer\n"); rc = -ENOMEM; goto out_free_mem; } lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); return 0; out_free_mem: lpfc_io_free(phba); return rc; } /** * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec * @phba: Pointer to lpfc hba data structure. * @num_to_alloc: The requested number of buffers to allocate. * * This routine allocates nvme buffers for device with SLI-4 interface spec, * the nvme buffer contains all the necessary information needed to initiate * an I/O. After allocating up to @num_to_allocate IO buffers and put * them on a list, it post them to the port by using SGL block post. * * Return codes: * int - number of IO buffers that were allocated and posted. * 0 = failure, less than num_to_alloc is a partial failure. **/ int lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) { struct lpfc_io_buf *lpfc_ncmd; struct lpfc_iocbq *pwqeq; uint16_t iotag, lxri = 0; int bcnt, num_posted; LIST_HEAD(prep_nblist); LIST_HEAD(post_nblist); LIST_HEAD(nvme_nblist); phba->sli4_hba.io_xri_cnt = 0; for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); if (!lpfc_ncmd) break; /* * Get memory from the pci pool to map the virt space to * pci bus space for an I/O. The DMA buffer includes the * number of SGE's necessary to support the sg_tablesize. */ lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, &lpfc_ncmd->dma_handle); if (!lpfc_ncmd->data) { kfree(lpfc_ncmd); break; } if (phba->cfg_xpsgl && !phba->nvmet_support) { INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); } else { /* * 4K Page alignment is CRITICAL to BlockGuard, double * check to be sure. */ if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && (((unsigned long)(lpfc_ncmd->data) & (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3369 Memory alignment err: " "addr=%lx\n", (unsigned long)lpfc_ncmd->data); dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); break; } } INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); break; } pwqeq = &lpfc_ncmd->cur_iocbq; /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, pwqeq); if (iotag == 0) { dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6121 Failed to allocate IOTAG for" " XRI:0x%x\n", lxri); lpfc_sli4_free_xri(phba, lxri); break; } pwqeq->sli4_lxritag = lxri; pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; /* Initialize local short-hand pointers. */ lpfc_ncmd->dma_sgl = lpfc_ncmd->data; lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd; spin_lock_init(&lpfc_ncmd->buf_lock); /* add the nvme buffer to a post list */ list_add_tail(&lpfc_ncmd->list, &post_nblist); phba->sli4_hba.io_xri_cnt++; } lpfc_printf_log(phba, KERN_INFO, LOG_NVME, "6114 Allocate %d out of %d requested new NVME " "buffers of size x%zu bytes\n", bcnt, num_to_alloc, sizeof(*lpfc_ncmd)); /* post the list of nvme buffer sgls to port if available */ if (!list_empty(&post_nblist)) num_posted = lpfc_sli4_post_io_sgl_list( phba, &post_nblist, bcnt); else num_posted = 0; return num_posted; } static uint64_t lpfc_get_wwpn(struct lpfc_hba *phba) { uint64_t wwn; int rc; LPFC_MBOXQ_t *mboxq; MAILBOX_t *mb; mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return (uint64_t)-1; /* First get WWN of HBA instance */ lpfc_read_nv(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6019 Mailbox failed , mbxCmd x%x " "READ_NV, mbxStatus x%x\n", bf_get(lpfc_mqe_command, &mboxq->u.mqe), bf_get(lpfc_mqe_status, &mboxq->u.mqe)); mempool_free(mboxq, phba->mbox_mem_pool); return (uint64_t) -1; } mb = &mboxq->u.mb; memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); /* wwn is WWPN of HBA instance */ mempool_free(mboxq, phba->mbox_mem_pool); if (phba->sli_rev == LPFC_SLI_REV4) return be64_to_cpu(wwn); else return rol64(wwn, 32); } static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba) { if (phba->sli_rev == LPFC_SLI_REV4) if (phba->cfg_xpsgl && !phba->nvmet_support) return LPFC_MAX_SG_TABLESIZE; else return phba->cfg_scsi_seg_cnt; else return phba->cfg_sg_seg_cnt; } /** * lpfc_vmid_res_alloc - Allocates resources for VMID * @phba: pointer to lpfc hba data structure. * @vport: pointer to vport data structure * * This routine allocated the resources needed for the VMID. * * Return codes * 0 on Success * Non-0 on Failure */ static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport) { /* VMID feature is supported only on SLI4 */ if (phba->sli_rev == LPFC_SLI_REV3) { phba->cfg_vmid_app_header = 0; phba->cfg_vmid_priority_tagging = 0; } if (lpfc_is_vmid_enabled(phba)) { vport->vmid = kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), GFP_KERNEL); if (!vport->vmid) return -ENOMEM; rwlock_init(&vport->vmid_lock); /* Set the VMID parameters for the vport */ vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; vport->vmid_inactivity_timeout = phba->cfg_vmid_inactivity_timeout; vport->max_vmid = phba->cfg_max_vmid; vport->cur_vmid_cnt = 0; vport->vmid_priority_range = bitmap_zalloc (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL); if (!vport->vmid_priority_range) { kfree(vport->vmid); return -ENOMEM; } hash_init(vport->hash_table); } return 0; } /** * lpfc_create_port - Create an FC port * @phba: pointer to lpfc hba data structure. * @instance: a unique integer ID to this FC port. * @dev: pointer to the device data structure. * * This routine creates a FC port for the upper layer protocol. The FC port * can be created on top of either a physical port or a virtual port provided * by the HBA. This routine also allocates a SCSI host data structure (shost) * and associates the FC port created before adding the shost into the SCSI * layer. * * Return codes * @vport - pointer to the virtual N_Port data structure. * NULL - port create failed. **/ struct lpfc_vport * lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) { struct lpfc_vport *vport; struct Scsi_Host *shost = NULL; struct scsi_host_template *template; int error = 0; int i; uint64_t wwn; bool use_no_reset_hba = false; int rc; if (lpfc_no_hba_reset_cnt) { if (phba->sli_rev < LPFC_SLI_REV4 && dev == &phba->pcidev->dev) { /* Reset the port first */ lpfc_sli_brdrestart(phba); rc = lpfc_sli_chipset_init(phba); if (rc) return NULL; } wwn = lpfc_get_wwpn(phba); } for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { if (wwn == lpfc_no_hba_reset[i]) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6020 Setting use_no_reset port=%llx\n", wwn); use_no_reset_hba = true; break; } } /* Seed template for SCSI host registration */ if (dev == &phba->pcidev->dev) { if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { /* Seed physical port template */ template = &lpfc_template; if (use_no_reset_hba) /* template is for a no reset SCSI Host */ template->eh_host_reset_handler = NULL; /* Seed updated value of sg_tablesize */ template->sg_tablesize = lpfc_get_sg_tablesize(phba); } else { /* NVMET is for physical port only */ template = &lpfc_template_nvme; } } else { /* Seed vport template */ template = &lpfc_vport_template; /* Seed updated value of sg_tablesize */ template->sg_tablesize = lpfc_get_sg_tablesize(phba); } shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); if (!shost) goto out; vport = (struct lpfc_vport *) shost->hostdata; vport->phba = phba; vport->load_flag |= FC_LOADING; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; vport->fc_rscn_flush = 0; lpfc_get_vport_cfgparam(vport); /* Adjust value in vport */ vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; shost->unique_id = instance; shost->max_id = LPFC_MAX_TARGET; shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; shost->max_cmd_len = 16; if (phba->sli_rev == LPFC_SLI_REV4) { if (!phba->cfg_fcp_mq_threshold || phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), phba->cfg_fcp_mq_threshold); shost->dma_boundary = phba->sli4_hba.pc_sli4_params.sge_supp_len-1; } else /* SLI-3 has a limited number of hardware queues (3), * thus there is only one for FCP processing. */ shost->nr_hw_queues = 1; /* * Set initial can_queue value since 0 is no longer supported and * scsi_add_host will fail. This will be adjusted later based on the * max xri value determined in hba setup. */ shost->can_queue = phba->cfg_hba_queue_depth - 10; if (dev != &phba->pcidev->dev) { shost->transportt = lpfc_vport_transport_template; vport->port_type = LPFC_NPIV_PORT; } else { shost->transportt = lpfc_transport_template; vport->port_type = LPFC_PHYSICAL_PORT; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, "9081 CreatePort TMPLATE type %x TBLsize %d " "SEGcnt %d/%d\n", vport->port_type, shost->sg_tablesize, phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); /* Allocate the resources for VMID */ rc = lpfc_vmid_res_alloc(phba, vport); if (rc) goto out_put_shost; /* Initialize all internally managed lists. */ INIT_LIST_HEAD(&vport->fc_nodes); INIT_LIST_HEAD(&vport->rcv_buffer_list); spin_lock_init(&vport->work_port_lock); timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) lpfc_setup_bg(phba, shost); error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) goto out_free_vmid; spin_lock_irq(&phba->port_list_lock); list_add_tail(&vport->listentry, &phba->port_list); spin_unlock_irq(&phba->port_list_lock); return vport; out_free_vmid: kfree(vport->vmid); bitmap_free(vport->vmid_priority_range); out_put_shost: scsi_host_put(shost); out: return NULL; } /** * destroy_port - destroy an FC port * @vport: pointer to an lpfc virtual N_Port data structure. * * This routine destroys a FC port from the upper layer protocol. All the * resources associated with the port are released. **/ void destroy_port(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; lpfc_debugfs_terminate(vport); fc_remove_host(shost); scsi_remove_host(shost); spin_lock_irq(&phba->port_list_lock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->port_list_lock); lpfc_cleanup(vport); return; } /** * lpfc_get_instance - Get a unique integer ID * * This routine allocates a unique integer ID from lpfc_hba_index pool. It * uses the kernel idr facility to perform the task. * * Return codes: * instance - a unique integer ID allocated as the new instance. * -1 - lpfc get instance failed. **/ int lpfc_get_instance(void) { int ret; ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); return ret < 0 ? -1 : ret; } /** * lpfc_scan_finished - method for SCSI layer to detect whether scan is done * @shost: pointer to SCSI host data structure. * @time: elapsed time of the scan in jiffies. * * This routine is called by the SCSI layer with a SCSI host to determine * whether the scan host is finished. * * Note: there is no scan_start function as adapter initialization will have * asynchronously kicked off the link initialization. * * Return codes * 0 - SCSI host scan is not over yet. * 1 - SCSI host scan is over. **/ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int stat = 0; spin_lock_irq(shost->host_lock); if (vport->load_flag & FC_UNLOADING) { stat = 1; goto finished; } if (time >= msecs_to_jiffies(30 * 1000)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } if (time >= msecs_to_jiffies(15 * 1000) && phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } if (vport->port_state != LPFC_VPORT_READY) goto finished; if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; stat = 1; finished: spin_unlock_irq(shost->host_lock); return stat; } static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; fc_host_supported_speeds(shost) = 0; /* * Avoid reporting supported link speed for FCoE as it can't be * controlled via FCoE. */ if (phba->hba_flag & HBA_FCOE_MODE) return; if (phba->lmt & LMT_256Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT; if (phba->lmt & LMT_128Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; if (phba->lmt & LMT_64Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; if (phba->lmt & LMT_32Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; if (phba->lmt & LMT_16Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; if (phba->lmt & LMT_10Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; if (phba->lmt & LMT_8Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; if (phba->lmt & LMT_4Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; if (phba->lmt & LMT_2Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; if (phba->lmt & LMT_1Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; } /** * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port * @shost: pointer to SCSI host data structure. * * This routine initializes a given SCSI host attributes on a FC port. The * SCSI host can be either on top of a physical port or a virtual port. **/ void lpfc_host_attrib_init(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; /* * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). */ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); fc_host_supported_classes(shost) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(shost), 0, sizeof(fc_host_supported_fc4s(shost))); fc_host_supported_fc4s(shost)[2] = 1; fc_host_supported_fc4s(shost)[7] = 1; lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), sizeof fc_host_symbolic_name(shost)); lpfc_host_supported_speeds_set(shost); fc_host_maxframe_size(shost) = (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; /* This value is also unchanging */ memset(fc_host_active_fc4s(shost), 0, sizeof(fc_host_active_fc4s(shost))); fc_host_active_fc4s(shost)[2] = 1; fc_host_active_fc4s(shost)[7] = 1; fc_host_max_npiv_vports(shost) = phba->max_vpi; spin_lock_irq(shost->host_lock); vport->load_flag &= ~FC_LOADING; spin_unlock_irq(shost->host_lock); } /** * lpfc_stop_port_s3 - Stop SLI3 device port * @phba: pointer to lpfc hba data structure. * * This routine is invoked to stop an SLI3 device port, it stops the device * from generating interrupts and stops the device driver's timers for the * device. **/ static void lpfc_stop_port_s3(struct lpfc_hba *phba) { /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ /* Clear all pending interrupts */ writel(0xffffffff, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ /* Reset some HBA SLI setup states */ lpfc_stop_hba_timers(phba); phba->pport->work_port_events = 0; } /** * lpfc_stop_port_s4 - Stop SLI4 device port * @phba: pointer to lpfc hba data structure. * * This routine is invoked to stop an SLI4 device port, it stops the device * from generating interrupts and stops the device driver's timers for the * device. **/ static void lpfc_stop_port_s4(struct lpfc_hba *phba) { /* Reset some HBA SLI4 setup states */ lpfc_stop_hba_timers(phba); if (phba->pport) phba->pport->work_port_events = 0; phba->sli4_hba.intr_enable = 0; } /** * lpfc_stop_port - Wrapper function for stopping hba port * @phba: Pointer to HBA context object. * * This routine wraps the actual SLI3 or SLI4 hba stop port routine from * the API jump table function pointer from the lpfc_hba struct. **/ void lpfc_stop_port(struct lpfc_hba *phba) { phba->lpfc_stop_port(phba); if (phba->wq) flush_workqueue(phba->wq); } /** * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer * @phba: Pointer to hba for which this call is being executed. * * This routine starts the timer waiting for the FCF rediscovery to complete. **/ void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) { unsigned long fcf_redisc_wait_tmo = (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); /* Start fcf rediscovery wait period timer */ mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); spin_lock_irq(&phba->hbalock); /* Allow action to new fcf asynchronous event */ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); /* Mark the FCF rediscovery pending state */ phba->fcf.fcf_flag |= FCF_REDISC_PEND; spin_unlock_irq(&phba->hbalock); } /** * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout * @t: Timer context used to obtain the pointer to lpfc hba data structure. * * This routine is invoked when waiting for FCF table rediscover has been * timed out. If new FCF record(s) has (have) been discovered during the * wait period, a new FCF event shall be added to the FCOE async event * list, and then worker thread shall be waked up for processing from the * worker thread context. **/ static void lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) { struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); /* Don't send FCF rediscovery event if timer cancelled */ spin_lock_irq(&phba->hbalock); if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { spin_unlock_irq(&phba->hbalock); return; } /* Clear FCF rediscovery timer pending flag */ phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; /* FCF rediscovery event to worker thread */ phba->fcf.fcf_flag |= FCF_REDISC_EVT; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2776 FCF rediscover quiescent timer expired\n"); /* wake up worker thread */ lpfc_worker_wake_up(phba); } /** * lpfc_vmid_poll - VMID timeout detection * @t: Timer context used to obtain the pointer to lpfc hba data structure. * * This routine is invoked when there is no I/O on by a VM for the specified * amount of time. When this situation is detected, the VMID has to be * deregistered from the switch and all the local resources freed. The VMID * will be reassigned to the VM once the I/O begins. **/ static void lpfc_vmid_poll(struct timer_list *t) { struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); u32 wake_up = 0; /* check if there is a need to issue QFPA */ if (phba->pport->vmid_priority_tagging) { wake_up = 1; phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; } /* Is the vmid inactivity timer enabled */ if (phba->pport->vmid_inactivity_timeout || phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) { wake_up = 1; phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; } if (wake_up) lpfc_worker_wake_up(phba); /* restart the timer for the next iteration */ mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * LPFC_VMID_TIMER)); } /** * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. * * This routine is to parse the SLI4 link-attention link fault code. **/ static void lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, struct lpfc_acqe_link *acqe_link) { switch (bf_get(lpfc_acqe_fc_la_att_type, acqe_link)) { case LPFC_FC_LA_TYPE_LINK_DOWN: case LPFC_FC_LA_TYPE_TRUNKING_EVENT: case LPFC_FC_LA_TYPE_ACTIVATE_FAIL: case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT: break; default: switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { case LPFC_ASYNC_LINK_FAULT_NONE: case LPFC_ASYNC_LINK_FAULT_LOCAL: case LPFC_ASYNC_LINK_FAULT_REMOTE: case LPFC_ASYNC_LINK_FAULT_LR_LRR: break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0398 Unknown link fault code: x%x\n", bf_get(lpfc_acqe_link_fault, acqe_link)); break; } break; } } /** * lpfc_sli4_parse_latt_type - Parse sli4 link attention type * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. * * This routine is to parse the SLI4 link attention type and translate it * into the base driver's link attention type coding. * * Return: Link attention type in terms of base driver's coding. **/ static uint8_t lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, struct lpfc_acqe_link *acqe_link) { uint8_t att_type; switch (bf_get(lpfc_acqe_link_status, acqe_link)) { case LPFC_ASYNC_LINK_STATUS_DOWN: case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: att_type = LPFC_ATT_LINK_DOWN; break; case LPFC_ASYNC_LINK_STATUS_UP: /* Ignore physical link up events - wait for logical link up */ att_type = LPFC_ATT_RESERVED; break; case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: att_type = LPFC_ATT_LINK_UP; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0399 Invalid link attention type: x%x\n", bf_get(lpfc_acqe_link_status, acqe_link)); att_type = LPFC_ATT_RESERVED; break; } return att_type; } /** * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed * @phba: pointer to lpfc hba data structure. * * This routine is to get an SLI3 FC port's link speed in Mbps. * * Return: link speed in terms of Mbps. **/ uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *phba) { uint32_t link_speed; if (!lpfc_is_link_up(phba)) return 0; if (phba->sli_rev <= LPFC_SLI_REV3) { switch (phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: link_speed = 1000; break; case LPFC_LINK_SPEED_2GHZ: link_speed = 2000; break; case LPFC_LINK_SPEED_4GHZ: link_speed = 4000; break; case LPFC_LINK_SPEED_8GHZ: link_speed = 8000; break; case LPFC_LINK_SPEED_10GHZ: link_speed = 10000; break; case LPFC_LINK_SPEED_16GHZ: link_speed = 16000; break; default: link_speed = 0; } } else { if (phba->sli4_hba.link_state.logical_speed) link_speed = phba->sli4_hba.link_state.logical_speed; else link_speed = phba->sli4_hba.link_state.speed; } return link_speed; } /** * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed * @phba: pointer to lpfc hba data structure. * @evt_code: asynchronous event code. * @speed_code: asynchronous event link speed code. * * This routine is to parse the giving SLI4 async event link speed code into * value of Mbps for the link speed. * * Return: link speed in terms of Mbps. **/ static uint32_t lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, uint8_t speed_code) { uint32_t port_speed; switch (evt_code) { case LPFC_TRAILER_CODE_LINK: switch (speed_code) { case LPFC_ASYNC_LINK_SPEED_ZERO: port_speed = 0; break; case LPFC_ASYNC_LINK_SPEED_10MBPS: port_speed = 10; break; case LPFC_ASYNC_LINK_SPEED_100MBPS: port_speed = 100; break; case LPFC_ASYNC_LINK_SPEED_1GBPS: port_speed = 1000; break; case LPFC_ASYNC_LINK_SPEED_10GBPS: port_speed = 10000; break; case LPFC_ASYNC_LINK_SPEED_20GBPS: port_speed = 20000; break; case LPFC_ASYNC_LINK_SPEED_25GBPS: port_speed = 25000; break; case LPFC_ASYNC_LINK_SPEED_40GBPS: port_speed = 40000; break; case LPFC_ASYNC_LINK_SPEED_100GBPS: port_speed = 100000; break; default: port_speed = 0; } break; case LPFC_TRAILER_CODE_FC: switch (speed_code) { case LPFC_FC_LA_SPEED_UNKNOWN: port_speed = 0; break; case LPFC_FC_LA_SPEED_1G: port_speed = 1000; break; case LPFC_FC_LA_SPEED_2G: port_speed = 2000; break; case LPFC_FC_LA_SPEED_4G: port_speed = 4000; break; case LPFC_FC_LA_SPEED_8G: port_speed = 8000; break; case LPFC_FC_LA_SPEED_10G: port_speed = 10000; break; case LPFC_FC_LA_SPEED_16G: port_speed = 16000; break; case LPFC_FC_LA_SPEED_32G: port_speed = 32000; break; case LPFC_FC_LA_SPEED_64G: port_speed = 64000; break; case LPFC_FC_LA_SPEED_128G: port_speed = 128000; break; case LPFC_FC_LA_SPEED_256G: port_speed = 256000; break; default: port_speed = 0; } break; default: port_speed = 0; } return port_speed; } /** * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. * * This routine is to handle the SLI4 asynchronous FCoE link event. **/ static void lpfc_sli4_async_link_evt(struct lpfc_hba *phba, struct lpfc_acqe_link *acqe_link) { LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; struct lpfc_mbx_read_top *la; uint8_t att_type; int rc; att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) return; phba->fcoe_eventtag = acqe_link->event_tag; pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0395 The mboxq allocation failed\n"); return; } rc = lpfc_mbox_rsrc_prep(phba, pmb); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0396 mailbox allocation failed\n"); goto out_free_pmb; } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); /* Block ELS IOCBs until we have done process link event */ phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; /* Update link event statistics */ phba->sli.slistat.link_event++; /* Create lpfc_handle_latt mailbox command from link ACQE */ lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; pmb->vport = phba->pport; /* Keep the link status for extra SLI4 state machine reference */ phba->sli4_hba.link_state.speed = lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, bf_get(lpfc_acqe_link_speed, acqe_link)); phba->sli4_hba.link_state.duplex = bf_get(lpfc_acqe_link_duplex, acqe_link); phba->sli4_hba.link_state.status = bf_get(lpfc_acqe_link_status, acqe_link); phba->sli4_hba.link_state.type = bf_get(lpfc_acqe_link_type, acqe_link); phba->sli4_hba.link_state.number = bf_get(lpfc_acqe_link_number, acqe_link); phba->sli4_hba.link_state.fault = bf_get(lpfc_acqe_link_fault, acqe_link); phba->sli4_hba.link_state.logical_speed = bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2900 Async FC/FCoE Link event - Speed:%dGBit " "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " "Logical speed:%dMbps Fault:%d\n", phba->sli4_hba.link_state.speed, phba->sli4_hba.link_state.topology, phba->sli4_hba.link_state.status, phba->sli4_hba.link_state.type, phba->sli4_hba.link_state.number, phba->sli4_hba.link_state.logical_speed, phba->sli4_hba.link_state.fault); /* * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch * topology info. Note: Optional for non FC-AL ports. */ if (!(phba->hba_flag & HBA_FCOE_MODE)) { rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) goto out_free_pmb; return; } /* * For FCoE Mode: fill in all the topology information we need and call * the READ_TOPOLOGY completion routine to continue without actually * sending the READ_TOPOLOGY mailbox command to the port. */ /* Initialize completion status */ mb = &pmb->u.mb; mb->mbxStatus = MBX_SUCCESS; /* Parse port fault information field */ lpfc_sli4_parse_latt_fault(phba, acqe_link); /* Parse and translate link attention fields */ la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; la->eventTag = acqe_link->event_tag; bf_set(lpfc_mbx_read_top_att_type, la, att_type); bf_set(lpfc_mbx_read_top_link_spd, la, (bf_get(lpfc_acqe_link_speed, acqe_link))); /* Fake the following irrelevant fields */ bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); bf_set(lpfc_mbx_read_top_il, la, 0); bf_set(lpfc_mbx_read_top_pb, la, 0); bf_set(lpfc_mbx_read_top_fa, la, 0); bf_set(lpfc_mbx_read_top_mm, la, 0); /* Invoke the lpfc_handle_latt mailbox command callback function */ lpfc_mbx_cmpl_read_topology(phba, pmb); return; out_free_pmb: lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); } /** * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read * topology. * @phba: pointer to lpfc hba data structure. * @speed_code: asynchronous event link speed code. * * This routine is to parse the giving SLI4 async event link speed code into * value of Read topology link speed. * * Return: link speed in terms of Read topology. **/ static uint8_t lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) { uint8_t port_speed; switch (speed_code) { case LPFC_FC_LA_SPEED_1G: port_speed = LPFC_LINK_SPEED_1GHZ; break; case LPFC_FC_LA_SPEED_2G: port_speed = LPFC_LINK_SPEED_2GHZ; break; case LPFC_FC_LA_SPEED_4G: port_speed = LPFC_LINK_SPEED_4GHZ; break; case LPFC_FC_LA_SPEED_8G: port_speed = LPFC_LINK_SPEED_8GHZ; break; case LPFC_FC_LA_SPEED_16G: port_speed = LPFC_LINK_SPEED_16GHZ; break; case LPFC_FC_LA_SPEED_32G: port_speed = LPFC_LINK_SPEED_32GHZ; break; case LPFC_FC_LA_SPEED_64G: port_speed = LPFC_LINK_SPEED_64GHZ; break; case LPFC_FC_LA_SPEED_128G: port_speed = LPFC_LINK_SPEED_128GHZ; break; case LPFC_FC_LA_SPEED_256G: port_speed = LPFC_LINK_SPEED_256GHZ; break; default: port_speed = 0; break; } return port_speed; } void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba) { if (!phba->rx_monitor) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "4411 Rx Monitor Info is empty.\n"); } else { lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0, LPFC_MAX_RXMONITOR_DUMP); } } /** * lpfc_cgn_update_stat - Save data into congestion stats buffer * @phba: pointer to lpfc hba data structure. * @dtag: FPIN descriptor received * * Increment the FPIN received counter/time when it happens. */ void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) { struct lpfc_cgn_info *cp; u32 value; /* Make sure we have a congestion info buffer */ if (!phba->cgn_i) return; cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; /* Update congestion statistics */ switch (dtag) { case ELS_DTAG_LNK_INTEGRITY: le32_add_cpu(&cp->link_integ_notification, 1); lpfc_cgn_update_tstamp(phba, &cp->stat_lnk); break; case ELS_DTAG_DELIVERY: le32_add_cpu(&cp->delivery_notification, 1); lpfc_cgn_update_tstamp(phba, &cp->stat_delivery); break; case ELS_DTAG_PEER_CONGEST: le32_add_cpu(&cp->cgn_peer_notification, 1); lpfc_cgn_update_tstamp(phba, &cp->stat_peer); break; case ELS_DTAG_CONGESTION: le32_add_cpu(&cp->cgn_notification, 1); lpfc_cgn_update_tstamp(phba, &cp->stat_fpin); } if (phba->cgn_fpin_frequency && phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; cp->cgn_stat_npm = value; } value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); cp->cgn_info_crc = cpu_to_le32(value); } /** * lpfc_cgn_update_tstamp - Update cmf timestamp * @phba: pointer to lpfc hba data structure. * @ts: structure to write the timestamp to. */ void lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts) { struct timespec64 cur_time; struct tm tm_val; ktime_get_real_ts64(&cur_time); time64_to_tm(cur_time.tv_sec, 0, &tm_val); ts->month = tm_val.tm_mon + 1; ts->day = tm_val.tm_mday; ts->year = tm_val.tm_year - 100; ts->hour = tm_val.tm_hour; ts->minute = tm_val.tm_min; ts->second = tm_val.tm_sec; lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "2646 Updated CMF timestamp : " "%u/%u/%u %u:%u:%u\n", ts->day, ts->month, ts->year, ts->hour, ts->minute, ts->second); } /** * lpfc_cmf_stats_timer - Save data into registered congestion buffer * @timer: Timer cookie to access lpfc private data * * Save the congestion event data every minute. * On the hour collapse all the minute data into hour data. Every day * collapse all the hour data into daily data. Separate driver * and fabrc congestion event counters that will be saved out * to the registered congestion buffer every minute. */ static enum hrtimer_restart lpfc_cmf_stats_timer(struct hrtimer *timer) { struct lpfc_hba *phba; struct lpfc_cgn_info *cp; uint32_t i, index; uint16_t value, mvalue; uint64_t bps; uint32_t mbps; uint32_t dvalue, wvalue, lvalue, avalue; uint64_t latsum; __le16 *ptr; __le32 *lptr; __le16 *mptr; phba = container_of(timer, struct lpfc_hba, cmf_stats_timer); /* Make sure we have a congestion info buffer */ if (!phba->cgn_i) return HRTIMER_NORESTART; cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; phba->cgn_evt_timestamp = jiffies + msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); phba->cgn_evt_minute++; /* We should get to this point in the routine on 1 minute intervals */ lpfc_cgn_update_tstamp(phba, &cp->base_time); if (phba->cgn_fpin_frequency && phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; cp->cgn_stat_npm = value; } /* Read and clear the latency counters for this minute */ lvalue = atomic_read(&phba->cgn_latency_evt_cnt); latsum = atomic64_read(&phba->cgn_latency_evt); atomic_set(&phba->cgn_latency_evt_cnt, 0); atomic64_set(&phba->cgn_latency_evt, 0); /* We need to store MB/sec bandwidth in the congestion information. * block_cnt is count of 512 byte blocks for the entire minute, * bps will get bytes per sec before finally converting to MB/sec. */ bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512; phba->rx_block_cnt = 0; mvalue = bps / (1024 * 1024); /* convert to MB/sec */ /* Every minute */ /* cgn parameters */ cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; /* Fill in default LUN qdepth */ value = (uint16_t)(phba->pport->cfg_lun_queue_depth); cp->cgn_lunq = cpu_to_le16(value); /* Record congestion buffer info - every minute * cgn_driver_evt_cnt (Driver events) * cgn_fabric_warn_cnt (Congestion Warnings) * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency) * cgn_fabric_alarm_cnt (Congestion Alarms) */ index = ++cp->cgn_index_minute; if (cp->cgn_index_minute == LPFC_MIN_HOUR) { cp->cgn_index_minute = 0; index = 0; } /* Get the number of driver events in this sample and reset counter */ dvalue = atomic_read(&phba->cgn_driver_evt_cnt); atomic_set(&phba->cgn_driver_evt_cnt, 0); /* Get the number of warning events - FPIN and Signal for this minute */ wvalue = 0; if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); atomic_set(&phba->cgn_fabric_warn_cnt, 0); /* Get the number of alarm events - FPIN and Signal for this minute */ avalue = 0; if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); atomic_set(&phba->cgn_fabric_alarm_cnt, 0); /* Collect the driver, warning, alarm and latency counts for this * minute into the driver congestion buffer. */ ptr = &cp->cgn_drvr_min[index]; value = (uint16_t)dvalue; *ptr = cpu_to_le16(value); ptr = &cp->cgn_warn_min[index]; value = (uint16_t)wvalue; *ptr = cpu_to_le16(value); ptr = &cp->cgn_alarm_min[index]; value = (uint16_t)avalue; *ptr = cpu_to_le16(value); lptr = &cp->cgn_latency_min[index]; if (lvalue) { lvalue = (uint32_t)div_u64(latsum, lvalue); *lptr = cpu_to_le32(lvalue); } else { *lptr = 0; } /* Collect the bandwidth value into the driver's congesion buffer. */ mptr = &cp->cgn_bw_min[index]; *mptr = cpu_to_le16(mvalue); lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", index, dvalue, wvalue, *lptr, mvalue, avalue); /* Every hour */ if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { /* Record congestion buffer info - every hour * Collapse all minutes into an hour */ index = ++cp->cgn_index_hour; if (cp->cgn_index_hour == LPFC_HOUR_DAY) { cp->cgn_index_hour = 0; index = 0; } dvalue = 0; wvalue = 0; lvalue = 0; avalue = 0; mvalue = 0; mbps = 0; for (i = 0; i < LPFC_MIN_HOUR; i++) { dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); wvalue += le16_to_cpu(cp->cgn_warn_min[i]); lvalue += le32_to_cpu(cp->cgn_latency_min[i]); mbps += le16_to_cpu(cp->cgn_bw_min[i]); avalue += le16_to_cpu(cp->cgn_alarm_min[i]); } if (lvalue) /* Avg of latency averages */ lvalue /= LPFC_MIN_HOUR; if (mbps) /* Avg of Bandwidth averages */ mvalue = mbps / LPFC_MIN_HOUR; lptr = &cp->cgn_drvr_hr[index]; *lptr = cpu_to_le32(dvalue); lptr = &cp->cgn_warn_hr[index]; *lptr = cpu_to_le32(wvalue); lptr = &cp->cgn_latency_hr[index]; *lptr = cpu_to_le32(lvalue); mptr = &cp->cgn_bw_hr[index]; *mptr = cpu_to_le16(mvalue); lptr = &cp->cgn_alarm_hr[index]; *lptr = cpu_to_le32(avalue); lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "2419 Congestion Info - hour " "(%d): %d %d %d %d %d\n", index, dvalue, wvalue, lvalue, mvalue, avalue); } /* Every day */ if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { /* Record congestion buffer info - every hour * Collapse all hours into a day. Rotate days * after LPFC_MAX_CGN_DAYS. */ index = ++cp->cgn_index_day; if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { cp->cgn_index_day = 0; index = 0; } dvalue = 0; wvalue = 0; lvalue = 0; mvalue = 0; mbps = 0; avalue = 0; for (i = 0; i < LPFC_HOUR_DAY; i++) { dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); mbps += le16_to_cpu(cp->cgn_bw_hr[i]); avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); } if (lvalue) /* Avg of latency averages */ lvalue /= LPFC_HOUR_DAY; if (mbps) /* Avg of Bandwidth averages */ mvalue = mbps / LPFC_HOUR_DAY; lptr = &cp->cgn_drvr_day[index]; *lptr = cpu_to_le32(dvalue); lptr = &cp->cgn_warn_day[index]; *lptr = cpu_to_le32(wvalue); lptr = &cp->cgn_latency_day[index]; *lptr = cpu_to_le32(lvalue); mptr = &cp->cgn_bw_day[index]; *mptr = cpu_to_le16(mvalue); lptr = &cp->cgn_alarm_day[index]; *lptr = cpu_to_le32(avalue); lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "2420 Congestion Info - daily (%d): " "%d %d %d %d %d\n", index, dvalue, wvalue, lvalue, mvalue, avalue); } /* Use the frequency found in the last rcv'ed FPIN */ value = phba->cgn_fpin_frequency; cp->cgn_warn_freq = cpu_to_le16(value); cp->cgn_alarm_freq = cpu_to_le16(value); lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); cp->cgn_info_crc = cpu_to_le32(lvalue); hrtimer_forward_now(timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC)); return HRTIMER_RESTART; } /** * lpfc_calc_cmf_latency - latency from start of rxate timer interval * @phba: The Hba for which this call is being executed. * * The routine calculates the latency from the beginning of the CMF timer * interval to the current point in time. It is called from IO completion * when we exceed our Bandwidth limitation for the time interval. */ uint32_t lpfc_calc_cmf_latency(struct lpfc_hba *phba) { struct timespec64 cmpl_time; uint32_t msec = 0; ktime_get_real_ts64(&cmpl_time); /* This routine works on a ms granularity so sec and usec are * converted accordingly. */ if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC; } else { if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec) * MSEC_PER_SEC; msec += ((cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); } else { msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - 1) * MSEC_PER_SEC; msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + cmpl_time.tv_nsec) / NSEC_PER_MSEC); } } return msec; } /** * lpfc_cmf_timer - This is the timer function for one congestion * rate interval. * @timer: Pointer to the high resolution timer that expired */ static enum hrtimer_restart lpfc_cmf_timer(struct hrtimer *timer) { struct lpfc_hba *phba = container_of(timer, struct lpfc_hba, cmf_timer); struct rx_info_entry entry; uint32_t io_cnt; uint32_t busy, max_read; uint64_t total, rcv, lat, mbpi, extra, cnt; int timer_interval = LPFC_CMF_INTERVAL; uint32_t ms; struct lpfc_cgn_stat *cgs; int cpu; /* Only restart the timer if congestion mgmt is on */ if (phba->cmf_active_mode == LPFC_CFG_OFF || !phba->cmf_latency.tv_sec) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6224 CMF timer exit: %d %lld\n", phba->cmf_active_mode, (uint64_t)phba->cmf_latency.tv_sec); return HRTIMER_NORESTART; } /* If pport is not ready yet, just exit and wait for * the next timer cycle to hit. */ if (!phba->pport) goto skip; /* Do not block SCSI IO while in the timer routine since * total_bytes will be cleared */ atomic_set(&phba->cmf_stop_io, 1); /* First we need to calculate the actual ms between * the last timer interrupt and this one. We ask for * LPFC_CMF_INTERVAL, however the actual time may * vary depending on system overhead. */ ms = lpfc_calc_cmf_latency(phba); /* Immediately after we calculate the time since the last * timer interrupt, set the start time for the next * interrupt */ ktime_get_real_ts64(&phba->cmf_latency); phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000); /* Collect all the stats from the prior timer interval */ total = 0; io_cnt = 0; lat = 0; rcv = 0; for_each_present_cpu(cpu) { cgs = per_cpu_ptr(phba->cmf_stat, cpu); total += atomic64_xchg(&cgs->total_bytes, 0); io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); lat += atomic64_xchg(&cgs->rx_latency, 0); rcv += atomic64_xchg(&cgs->rcv_bytes, 0); } /* Before we issue another CMF_SYNC_WQE, retrieve the BW * returned from the last CMF_SYNC_WQE issued, from * cmf_last_sync_bw. This will be the target BW for * this next timer interval. */ if (phba->cmf_active_mode == LPFC_CFG_MANAGED && phba->link_state != LPFC_LINK_DOWN && phba->hba_flag & HBA_SETUP) { mbpi = phba->cmf_last_sync_bw; phba->cmf_last_sync_bw = 0; extra = 0; /* Calculate any extra bytes needed to account for the * timer accuracy. If we are less than LPFC_CMF_INTERVAL * calculate the adjustment needed for total to reflect * a full LPFC_CMF_INTERVAL. */ if (ms && ms < LPFC_CMF_INTERVAL) { cnt = div_u64(total, ms); /* bytes per ms */ cnt *= LPFC_CMF_INTERVAL; /* what total should be */ extra = cnt - total; } lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); } else { /* For Monitor mode or link down we want mbpi * to be the full link speed */ mbpi = phba->cmf_link_byte_count; extra = 0; } phba->cmf_timer_cnt++; if (io_cnt) { /* Update congestion info buffer latency in us */ atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); atomic64_add(lat, &phba->cgn_latency_evt); } busy = atomic_xchg(&phba->cmf_busy, 0); max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); /* Calculate MBPI for the next timer interval */ if (mbpi) { if (mbpi > phba->cmf_link_byte_count || phba->cmf_active_mode == LPFC_CFG_MONITOR) mbpi = phba->cmf_link_byte_count; /* Change max_bytes_per_interval to what the prior * CMF_SYNC_WQE cmpl indicated. */ if (mbpi != phba->cmf_max_bytes_per_interval) phba->cmf_max_bytes_per_interval = mbpi; } /* Save rxmonitor information for debug */ if (phba->rx_monitor) { entry.total_bytes = total; entry.cmf_bytes = total + extra; entry.rcv_bytes = rcv; entry.cmf_busy = busy; entry.cmf_info = phba->cmf_active_info; if (io_cnt) { entry.avg_io_latency = div_u64(lat, io_cnt); entry.avg_io_size = div_u64(rcv, io_cnt); } else { entry.avg_io_latency = 0; entry.avg_io_size = 0; } entry.max_read_cnt = max_read; entry.io_cnt = io_cnt; entry.max_bytes_per_interval = mbpi; if (phba->cmf_active_mode == LPFC_CFG_MANAGED) entry.timer_utilization = phba->cmf_last_ts; else entry.timer_utilization = ms; entry.timer_interval = ms; phba->cmf_last_ts = 0; lpfc_rx_monitor_record(phba->rx_monitor, &entry); } if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { /* If Monitor mode, check if we are oversubscribed * against the full line rate. */ if (mbpi && total > mbpi) atomic_inc(&phba->cgn_driver_evt_cnt); } phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ /* Since total_bytes has already been zero'ed, its okay to unblock * after max_bytes_per_interval is setup. */ if (atomic_xchg(&phba->cmf_bw_wait, 0)) queue_work(phba->wq, &phba->unblock_request_work); /* SCSI IO is now unblocked */ atomic_set(&phba->cmf_stop_io, 0); skip: hrtimer_forward_now(timer, ktime_set(0, timer_interval * NSEC_PER_MSEC)); return HRTIMER_RESTART; } #define trunk_link_status(__idx)\ bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ "Link up" : "Link down") : "NA" /* Did port __idx reported an error */ #define trunk_port_fault(__idx)\ bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" static void lpfc_update_trunk_link_status(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) { uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); u8 cnt = 0; phba->sli4_hba.link_state.speed = lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); phba->sli4_hba.link_state.logical_speed = bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ phba->fc_linkspeed = lpfc_async_link_speed_to_read_top( phba, bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { phba->trunk_link.link0.state = bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) ? LPFC_LINK_UP : LPFC_LINK_DOWN; phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; cnt++; } if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { phba->trunk_link.link1.state = bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) ? LPFC_LINK_UP : LPFC_LINK_DOWN; phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; cnt++; } if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { phba->trunk_link.link2.state = bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) ? LPFC_LINK_UP : LPFC_LINK_DOWN; phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; cnt++; } if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { phba->trunk_link.link3.state = bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) ? LPFC_LINK_UP : LPFC_LINK_DOWN; phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; cnt++; } if (cnt) phba->trunk_link.phy_lnk_speed = phba->sli4_hba.link_state.logical_speed / (cnt * 1000); else phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2910 Async FC Trunking Event - Speed:%d\n" "\tLogical speed:%d " "port0: %s port1: %s port2: %s port3: %s\n", phba->sli4_hba.link_state.speed, phba->sli4_hba.link_state.logical_speed, trunk_link_status(0), trunk_link_status(1), trunk_link_status(2), trunk_link_status(3)); if (phba->cmf_active_mode != LPFC_CFG_OFF) lpfc_cmf_signal_init(phba); if (port_fault) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3202 trunk error:0x%x (%s) seen on port0:%s " /* * SLI-4: We have only 0xA error codes * defined as of now. print an appropriate * message in case driver needs to be updated. */ "port1:%s port2:%s port3:%s\n", err, err > 0xA ? "UNDEFINED. update driver." : trunk_errmsg[err], trunk_port_fault(0), trunk_port_fault(1), trunk_port_fault(2), trunk_port_fault(3)); } /** * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event * @phba: pointer to lpfc hba data structure. * @acqe_fc: pointer to the async fc completion queue entry. * * This routine is to handle the SLI4 asynchronous FC event. It will simply log * that the event was received and then issue a read_topology mailbox command so * that the rest of the driver will treat it the same as SLI3. **/ static void lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) { LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; struct lpfc_mbx_read_top *la; char *log_level; int rc; if (bf_get(lpfc_trailer_type, acqe_fc) != LPFC_FC_LA_EVENT_TYPE_FC_LINK) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2895 Non FC link Event detected.(%d)\n", bf_get(lpfc_trailer_type, acqe_fc)); return; } if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == LPFC_FC_LA_TYPE_TRUNKING_EVENT) { lpfc_update_trunk_link_status(phba, acqe_fc); return; } /* Keep the link status for extra SLI4 state machine reference */ phba->sli4_hba.link_state.speed = lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; phba->sli4_hba.link_state.topology = bf_get(lpfc_acqe_fc_la_topology, acqe_fc); phba->sli4_hba.link_state.status = bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); phba->sli4_hba.link_state.type = bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); phba->sli4_hba.link_state.number = bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); phba->sli4_hba.link_state.fault = bf_get(lpfc_acqe_link_fault, acqe_fc); phba->sli4_hba.link_state.link_status = bf_get(lpfc_acqe_fc_la_link_status, acqe_fc); /* * Only select attention types need logical speed modification to what * was previously set. */ if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP && phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) { if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == LPFC_FC_LA_TYPE_LINK_DOWN) phba->sli4_hba.link_state.logical_speed = 0; else if (!phba->sli4_hba.conf_trunk) phba->sli4_hba.link_state.logical_speed = bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; } lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2896 Async FC event - Speed:%dGBaud Topology:x%x " "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" "%dMbps Fault:x%x Link Status:x%x\n", phba->sli4_hba.link_state.speed, phba->sli4_hba.link_state.topology, phba->sli4_hba.link_state.status, phba->sli4_hba.link_state.type, phba->sli4_hba.link_state.number, phba->sli4_hba.link_state.logical_speed, phba->sli4_hba.link_state.fault, phba->sli4_hba.link_state.link_status); /* * The following attention types are informational only, providing * further details about link status. Overwrite the value of * link_state.status appropriately. No further action is required. */ if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) { switch (phba->sli4_hba.link_state.status) { case LPFC_FC_LA_TYPE_ACTIVATE_FAIL: log_level = KERN_WARNING; phba->sli4_hba.link_state.status = LPFC_FC_LA_TYPE_LINK_DOWN; break; case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT: /* * During bb credit recovery establishment, receiving * this attention type is normal. Link Up attention * type is expected to occur before this informational * attention type so keep the Link Up status. */ log_level = KERN_INFO; phba->sli4_hba.link_state.status = LPFC_FC_LA_TYPE_LINK_UP; break; default: log_level = KERN_INFO; break; } lpfc_log_msg(phba, log_level, LOG_SLI, "2992 Async FC event - Informational Link " "Attention Type x%x\n", bf_get(lpfc_acqe_fc_la_att_type, acqe_fc)); return; } pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2897 The mboxq allocation failed\n"); return; } rc = lpfc_mbox_rsrc_prep(phba, pmb); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2898 The mboxq prep failed\n"); goto out_free_pmb; } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); /* Block ELS IOCBs until we have done process link event */ phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; /* Update link event statistics */ phba->sli.slistat.link_event++; /* Create lpfc_handle_latt mailbox command from link ACQE */ lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; pmb->vport = phba->pport; if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); switch (phba->sli4_hba.link_state.status) { case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: phba->link_flag |= LS_MDS_LINK_DOWN; break; case LPFC_FC_LA_TYPE_MDS_LOOPBACK: phba->link_flag |= LS_MDS_LOOPBACK; break; default: break; } /* Initialize completion status */ mb = &pmb->u.mb; mb->mbxStatus = MBX_SUCCESS; /* Parse port fault information field */ lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); /* Parse and translate link attention fields */ la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; la->eventTag = acqe_fc->event_tag; if (phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_UNEXP_WWPN) { bf_set(lpfc_mbx_read_top_att_type, la, LPFC_FC_LA_TYPE_UNEXP_WWPN); } else { bf_set(lpfc_mbx_read_top_att_type, la, LPFC_FC_LA_TYPE_LINK_DOWN); } /* Invoke the mailbox command callback function */ lpfc_mbx_cmpl_read_topology(phba, pmb); return; } rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) goto out_free_pmb; return; out_free_pmb: lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); } /** * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event * @phba: pointer to lpfc hba data structure. * @acqe_sli: pointer to the async SLI completion queue entry. * * This routine is to handle the SLI4 asynchronous SLI events. **/ static void lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) { char port_name; char message[128]; uint8_t status; uint8_t evt_type; uint8_t operational = 0; struct temp_event temp_event_data; struct lpfc_acqe_misconfigured_event *misconfigured; struct lpfc_acqe_cgn_signal *cgn_signal; struct Scsi_Host *shost; struct lpfc_vport **vports; int rc, i, cnt; evt_type = bf_get(lpfc_trailer_type, acqe_sli); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2901 Async SLI event - Type:%d, Event Data: x%08x " "x%08x x%08x x%08x\n", evt_type, acqe_sli->event_data1, acqe_sli->event_data2, acqe_sli->event_data3, acqe_sli->trailer); port_name = phba->Port[0]; if (port_name == 0x00) port_name = '?'; /* get port name is empty */ switch (evt_type) { case LPFC_SLI_EVENT_TYPE_OVER_TEMP: temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; temp_event_data.event_code = LPFC_THRESHOLD_TEMP; temp_event_data.data = (uint32_t)acqe_sli->event_data1; lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3190 Over Temperature:%d Celsius- Port Name %c\n", acqe_sli->event_data1, port_name); phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; shost = lpfc_shost_from_vport(phba->pport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(temp_event_data), (char *)&temp_event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); break; case LPFC_SLI_EVENT_TYPE_NORM_TEMP: temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; temp_event_data.event_code = LPFC_NORMAL_TEMP; temp_event_data.data = (uint32_t)acqe_sli->event_data1; lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT, "3191 Normal Temperature:%d Celsius - Port Name %c\n", acqe_sli->event_data1, port_name); shost = lpfc_shost_from_vport(phba->pport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(temp_event_data), (char *)&temp_event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); break; case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: misconfigured = (struct lpfc_acqe_misconfigured_event *) &acqe_sli->event_data1; /* fetch the status for this port */ switch (phba->sli4_hba.lnk_info.lnk_no) { case LPFC_LINK_NUMBER_0: status = bf_get(lpfc_sli_misconfigured_port0_state, &misconfigured->theEvent); operational = bf_get(lpfc_sli_misconfigured_port0_op, &misconfigured->theEvent); break; case LPFC_LINK_NUMBER_1: status = bf_get(lpfc_sli_misconfigured_port1_state, &misconfigured->theEvent); operational = bf_get(lpfc_sli_misconfigured_port1_op, &misconfigured->theEvent); break; case LPFC_LINK_NUMBER_2: status = bf_get(lpfc_sli_misconfigured_port2_state, &misconfigured->theEvent); operational = bf_get(lpfc_sli_misconfigured_port2_op, &misconfigured->theEvent); break; case LPFC_LINK_NUMBER_3: status = bf_get(lpfc_sli_misconfigured_port3_state, &misconfigured->theEvent); operational = bf_get(lpfc_sli_misconfigured_port3_op, &misconfigured->theEvent); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3296 " "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " "event: Invalid link %d", phba->sli4_hba.lnk_info.lnk_no); return; } /* Skip if optic state unchanged */ if (phba->sli4_hba.lnk_info.optic_state == status) return; switch (status) { case LPFC_SLI_EVENT_STATUS_VALID: sprintf(message, "Physical Link is functional"); break; case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: sprintf(message, "Optics faulted/incorrectly " "installed/not installed - Reseat optics, " "if issue not resolved, replace."); break; case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: sprintf(message, "Optics of two types installed - Remove one " "optic or install matching pair of optics."); break; case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: sprintf(message, "Incompatible optics - Replace with " "compatible optics for card to function."); break; case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: sprintf(message, "Unqualified optics - Replace with " "Avago optics for Warranty and Technical " "Support - Link is%s operational", (operational) ? " not" : ""); break; case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: sprintf(message, "Uncertified optics - Replace with " "Avago-certified optics to enable link " "operation - Link is%s operational", (operational) ? " not" : ""); break; default: /* firmware is reporting a status we don't know about */ sprintf(message, "Unknown event status x%02x", status); break; } /* Issue READ_CONFIG mbox command to refresh supported speeds */ rc = lpfc_sli4_read_config(phba); if (rc) { phba->lmt = 0; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3194 Unable to retrieve supported " "speeds, rc = 0x%x\n", rc); } rc = lpfc_sli4_refresh_params(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3174 Unable to update pls support, " "rc x%x\n", rc); } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); lpfc_host_supported_speeds_set(shost); } } lpfc_destroy_vport_work_array(phba, vports); phba->sli4_hba.lnk_info.optic_state = status; lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3176 Port Name %c %s\n", port_name, message); break; case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3192 Remote DPort Test Initiated - " "Event Data1:x%08x Event Data2: x%08x\n", acqe_sli->event_data1, acqe_sli->event_data2); break; case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG: /* Call FW to obtain active parms */ lpfc_sli4_cgn_parm_chg_evt(phba); break; case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: /* Misconfigured WWN. Reports that the SLI Port is configured * to use FA-WWN, but the attached device doesn’t support it. * Event Data1 - N.A, Event Data2 - N.A * This event only happens on the physical port. */ lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY, "2699 Misconfigured FA-PWWN - Attached device " "does not support FA-PWWN\n"); phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC; memset(phba->pport->fc_portname.u.wwn, 0, sizeof(struct lpfc_name)); break; case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: /* EEPROM failure. No driver action is required */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2518 EEPROM failure - " "Event Data1: x%08x Event Data2: x%08x\n", acqe_sli->event_data1, acqe_sli->event_data2); break; case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL: if (phba->cmf_active_mode == LPFC_CFG_OFF) break; cgn_signal = (struct lpfc_acqe_cgn_signal *) &acqe_sli->event_data1; phba->cgn_acqe_cnt++; cnt = bf_get(lpfc_warn_acqe, cgn_signal); atomic64_add(cnt, &phba->cgn_acqe_stat.warn); atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm); /* no threshold for CMF, even 1 signal will trigger an event */ /* Alarm overrides warning, so check that first */ if (cgn_signal->alarm_cnt) { if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { /* Keep track of alarm cnt for CMF_SYNC_WQE */ atomic_add(cgn_signal->alarm_cnt, &phba->cgn_sync_alarm_cnt); } } else if (cnt) { /* signal action needs to be taken */ if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { /* Keep track of warning cnt for CMF_SYNC_WQE */ atomic_add(cnt, &phba->cgn_sync_warn_cnt); } } break; case LPFC_SLI_EVENT_TYPE_RD_SIGNAL: /* May be accompanied by a temperature event */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT, "2902 Remote Degrade Signaling: x%08x x%08x " "x%08x\n", acqe_sli->event_data1, acqe_sli->event_data2, acqe_sli->event_data3); break; default: lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3193 Unrecognized SLI event, type: 0x%x", evt_type); break; } } /** * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport * @vport: pointer to vport data structure. * * This routine is to perform Clear Virtual Link (CVL) on a vport in * response to a CVL event. * * Return the pointer to the ndlp with the vport if successful, otherwise * return NULL. **/ static struct lpfc_nodelist * lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; struct lpfc_hba *phba; if (!vport) return NULL; phba = vport->phba; if (!phba) return NULL; ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) { /* Cannot find existing Fabric ndlp, so allocate a new one */ ndlp = lpfc_nlp_init(vport, Fabric_DID); if (!ndlp) return NULL; /* Set the node type */ ndlp->nlp_type |= NLP_FABRIC; /* Put ndlp onto node list */ lpfc_enqueue_node(vport, ndlp); } if ((phba->pport->port_state < LPFC_FLOGI) && (phba->pport->port_state != LPFC_VPORT_FAILED)) return NULL; /* If virtual link is not yet instantiated ignore CVL */ if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) && (vport->port_state != LPFC_VPORT_FAILED)) return NULL; shost = lpfc_shost_from_vport(vport); if (!shost) return NULL; lpfc_linkdown_port(vport); lpfc_cleanup_pending_mbox(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_CVL_RCVD; spin_unlock_irq(shost->host_lock); return ndlp; } /** * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports * @phba: pointer to lpfc hba data structure. * * This routine is to perform Clear Virtual Link (CVL) on all vports in * response to a FCF dead event. **/ static void lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; vports = lpfc_create_vport_work_array(phba); if (vports) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_sli4_perform_vport_cvl(vports[i]); lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event * @phba: pointer to lpfc hba data structure. * @acqe_fip: pointer to the async fcoe completion queue entry. * * This routine is to handle the SLI4 asynchronous fcoe event. **/ static void lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, struct lpfc_acqe_fip *acqe_fip) { uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); int rc; struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; int active_vlink_present; struct lpfc_vport **vports; int i; phba->fc_eventTag = acqe_fip->event_tag; phba->fcoe_eventtag = acqe_fip->event_tag; switch (event_type) { case LPFC_FIP_EVENT_TYPE_NEW_FCF: case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2546 New FCF event, evt_tag:x%x, " "index:x%x\n", acqe_fip->event_tag, acqe_fip->index); else lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_DISCOVERY, "2788 FCF param modified event, " "evt_tag:x%x, index:x%x\n", acqe_fip->event_tag, acqe_fip->index); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { /* * During period of FCF discovery, read the FCF * table record indexed by the event to update * FCF roundrobin failover eligible FCF bmask. */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2779 Read FCF (x%x) for updating " "roundrobin FCF failover bmask\n", acqe_fip->index); rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); } /* If the FCF discovery is in progress, do nothing. */ spin_lock_irq(&phba->hbalock); if (phba->hba_flag & FCF_TS_INPROG) { spin_unlock_irq(&phba->hbalock); break; } /* If fast FCF failover rescan event is pending, do nothing */ if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { spin_unlock_irq(&phba->hbalock); break; } /* If the FCF has been in discovered state, do nothing. */ if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { spin_unlock_irq(&phba->hbalock); break; } spin_unlock_irq(&phba->hbalock); /* Otherwise, scan the entire FCF table and re-discover SAN */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2770 Start FCF table scan per async FCF " "event, evt_tag:x%x, index:x%x\n", acqe_fip->event_tag, acqe_fip->index); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2547 Issue FCF scan read FCF mailbox " "command failed (x%x)\n", rc); break; case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2548 FCF Table full count 0x%x tag 0x%x\n", bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), acqe_fip->event_tag); break; case LPFC_FIP_EVENT_TYPE_FCF_DEAD: phba->fcoe_cvl_eventtag = acqe_fip->event_tag; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2549 FCF (x%x) disconnected from network, " "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); /* * If we are in the middle of FCF failover process, clear * the corresponding FCF bit in the roundrobin bitmap. */ spin_lock_irq(&phba->hbalock); if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { spin_unlock_irq(&phba->hbalock); /* Update FLOGI FCF failover eligible FCF bmask */ lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); break; } spin_unlock_irq(&phba->hbalock); /* If the event is not for currently used fcf do nothing */ if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) break; /* * Otherwise, request the port to rediscover the entire FCF * table for a fast recovery from case that the current FCF * is no longer valid as we are not in the middle of FCF * failover process already. */ spin_lock_irq(&phba->hbalock); /* Mark the fast failover process in progress */ phba->fcf.fcf_flag |= FCF_DEAD_DISC; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2771 Start FCF fast failover process due to " "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " "\n", acqe_fip->event_tag, acqe_fip->index); rc = lpfc_sli4_redisc_fcf_table(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_TRACE_EVENT, "2772 Issue FCF rediscover mailbox " "command failed, fail through to FCF " "dead event\n"); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; spin_unlock_irq(&phba->hbalock); /* * Last resort will fail over by treating this * as a link down to FCF registration. */ lpfc_sli4_fcf_dead_failthrough(phba); } else { /* Reset FCF roundrobin bmask for new discovery */ lpfc_sli4_clear_fcf_rr_bmask(phba); /* * Handling fast FCF failover to a DEAD FCF event is * considered equalivant to receiving CVL to all vports. */ lpfc_sli4_perform_all_vport_cvl(phba); } break; case LPFC_FIP_EVENT_TYPE_CVL: phba->fcoe_cvl_eventtag = acqe_fip->event_tag; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2718 Clear Virtual Link Received for VPI 0x%x" " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); vport = lpfc_find_vport_by_vpid(phba, acqe_fip->index); ndlp = lpfc_sli4_perform_vport_cvl(vport); if (!ndlp) break; active_vlink_present = 0; vports = lpfc_create_vport_work_array(phba); if (vports) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if ((!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) && (vports[i]->port_state > LPFC_FDISC)) { active_vlink_present = 1; break; } } lpfc_destroy_vport_work_array(phba, vports); } /* * Don't re-instantiate if vport is marked for deletion. * If we are here first then vport_delete is going to wait * for discovery to complete. */ if (!(vport->load_flag & FC_UNLOADING) && active_vlink_present) { /* * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(&ndlp->lock); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { /* * Otherwise, we request port to rediscover * the entire FCF table for a fast recovery * from possible case that the current FCF * is no longer valid if we are not already * in the FCF failover process. */ spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { spin_unlock_irq(&phba->hbalock); break; } /* Mark the fast failover process in progress */ phba->fcf.fcf_flag |= FCF_ACVL_DISC; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2773 Start FCF failover per CVL, " "evt_tag:x%x\n", acqe_fip->event_tag); rc = lpfc_sli4_redisc_fcf_table(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_TRACE_EVENT, "2774 Issue FCF rediscover " "mailbox command failed, " "through to CVL event\n"); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; spin_unlock_irq(&phba->hbalock); /* * Last resort will be re-try on the * the current registered FCF entry. */ lpfc_retry_pport_discovery(phba); } else /* * Reset FCF roundrobin bmask for new * discovery. */ lpfc_sli4_clear_fcf_rr_bmask(phba); } break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0288 Unknown FCoE event type 0x%x event tag " "0x%x\n", event_type, acqe_fip->event_tag); break; } } /** * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event * @phba: pointer to lpfc hba data structure. * @acqe_dcbx: pointer to the async dcbx completion queue entry. * * This routine is to handle the SLI4 asynchronous dcbx event. **/ static void lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, struct lpfc_acqe_dcbx *acqe_dcbx) { phba->fc_eventTag = acqe_dcbx->event_tag; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0290 The SLI4 DCBX asynchronous event is not " "handled yet\n"); } /** * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event * @phba: pointer to lpfc hba data structure. * @acqe_grp5: pointer to the async grp5 completion queue entry. * * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event * is an asynchronous notified of a logical link speed change. The Port * reports the logical link speed in units of 10Mbps. **/ static void lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, struct lpfc_acqe_grp5 *acqe_grp5) { uint16_t prev_ll_spd; phba->fc_eventTag = acqe_grp5->event_tag; phba->fcoe_eventtag = acqe_grp5->event_tag; prev_ll_spd = phba->sli4_hba.link_state.logical_speed; phba->sli4_hba.link_state.logical_speed = (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2789 GRP5 Async Event: Updating logical link speed " "from %dMbps to %dMbps\n", prev_ll_spd, phba->sli4_hba.link_state.logical_speed); } /** * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event * @phba: pointer to lpfc hba data structure. * * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event * is an asynchronous notification of a request to reset CM stats. **/ static void lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba) { if (!phba->cgn_i) return; lpfc_init_congestion_stat(phba); } /** * lpfc_cgn_params_val - Validate FW congestion parameters. * @phba: pointer to lpfc hba data structure. * @p_cfg_param: pointer to FW provided congestion parameters. * * This routine validates the congestion parameters passed * by the FW to the driver via an ACQE event. **/ static void lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param) { spin_lock_irq(&phba->hbalock); if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, LPFC_CFG_MONITOR)) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, "6225 CMF mode param out of range: %d\n", p_cfg_param->cgn_param_mode); p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; } spin_unlock_irq(&phba->hbalock); } static const char * const lpfc_cmf_mode_to_str[] = { "OFF", "MANAGED", "MONITOR", }; /** * lpfc_cgn_params_parse - Process a FW cong parm change event * @phba: pointer to lpfc hba data structure. * @p_cgn_param: pointer to a data buffer with the FW cong params. * @len: the size of pdata in bytes. * * This routine validates the congestion management buffer signature * from the FW, validates the contents and makes corrections for * valid, in-range values. If the signature magic is correct and * after parameter validation, the contents are copied to the driver's * @phba structure. If the magic is incorrect, an error message is * logged. **/ static void lpfc_cgn_params_parse(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cgn_param, uint32_t len) { struct lpfc_cgn_info *cp; uint32_t crc, oldmode; char acr_string[4] = {0}; /* Make sure the FW has encoded the correct magic number to * validate the congestion parameter in FW memory. */ if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, "4668 FW cgn parm buffer data: " "magic 0x%x version %d mode %d " "level0 %d level1 %d " "level2 %d byte13 %d " "byte14 %d byte15 %d " "byte11 %d byte12 %d activeMode %d\n", p_cgn_param->cgn_param_magic, p_cgn_param->cgn_param_version, p_cgn_param->cgn_param_mode, p_cgn_param->cgn_param_level0, p_cgn_param->cgn_param_level1, p_cgn_param->cgn_param_level2, p_cgn_param->byte13, p_cgn_param->byte14, p_cgn_param->byte15, p_cgn_param->byte11, p_cgn_param->byte12, phba->cmf_active_mode); oldmode = phba->cmf_active_mode; /* Any parameters out of range are corrected to defaults * by this routine. No need to fail. */ lpfc_cgn_params_val(phba, p_cgn_param); /* Parameters are verified, move them into driver storage */ spin_lock_irq(&phba->hbalock); memcpy(&phba->cgn_p, p_cgn_param, sizeof(struct lpfc_cgn_param)); /* Update parameters in congestion info buffer now */ if (phba->cgn_i) { cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); cp->cgn_info_crc = cpu_to_le32(crc); } spin_unlock_irq(&phba->hbalock); phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; switch (oldmode) { case LPFC_CFG_OFF: if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { /* Turning CMF on */ lpfc_cmf_start(phba); if (phba->link_state >= LPFC_LINK_UP) { phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; phba->cgn_reg_signal = phba->cgn_init_reg_signal; lpfc_issue_els_edc(phba->pport, 0); } } break; case LPFC_CFG_MANAGED: switch (phba->cgn_p.cgn_param_mode) { case LPFC_CFG_OFF: /* Turning CMF off */ lpfc_cmf_stop(phba); if (phba->link_state >= LPFC_LINK_UP) lpfc_issue_els_edc(phba->pport, 0); break; case LPFC_CFG_MONITOR: phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; /* Resume blocked IO - unblock on workqueue */ queue_work(phba->wq, &phba->unblock_request_work); break; } break; case LPFC_CFG_MONITOR: switch (phba->cgn_p.cgn_param_mode) { case LPFC_CFG_OFF: /* Turning CMF off */ lpfc_cmf_stop(phba); if (phba->link_state >= LPFC_LINK_UP) lpfc_issue_els_edc(phba->pport, 0); break; case LPFC_CFG_MANAGED: lpfc_cmf_signal_init(phba); break; } break; } if (oldmode != LPFC_CFG_OFF || oldmode != phba->cgn_p.cgn_param_mode) { if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED) scnprintf(acr_string, sizeof(acr_string), "%u", phba->cgn_p.cgn_param_level0); else scnprintf(acr_string, sizeof(acr_string), "NA"); dev_info(&phba->pcidev->dev, "%d: " "4663 CMF: Mode %s acr %s\n", phba->brd_no, lpfc_cmf_mode_to_str [phba->cgn_p.cgn_param_mode], acr_string); } } else { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, "4669 FW cgn parm buf wrong magic 0x%x " "version %d\n", p_cgn_param->cgn_param_magic, p_cgn_param->cgn_param_version); } } /** * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters. * @phba: pointer to lpfc hba data structure. * * This routine issues a read_object mailbox command to * get the congestion management parameters from the FW * parses it and updates the driver maintained values. * * Returns * 0 if the object was empty * -Eval if an error was encountered * Count if bytes were read from object **/ int lpfc_sli4_cgn_params_read(struct lpfc_hba *phba) { int ret = 0; struct lpfc_cgn_param *p_cgn_param = NULL; u32 *pdata = NULL; u32 len = 0; /* Find out if the FW has a new set of congestion parameters. */ len = sizeof(struct lpfc_cgn_param); pdata = kzalloc(len, GFP_KERNEL); if (!pdata) return -ENOMEM; ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME, pdata, len); /* 0 means no data. A negative means error. A positive means * bytes were copied. */ if (!ret) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, "4670 CGN RD OBJ returns no data\n"); goto rd_obj_err; } else if (ret < 0) { /* Some error. Just exit and return it to the caller.*/ goto rd_obj_err; } lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, "6234 READ CGN PARAMS Successful %d\n", len); /* Parse data pointer over len and update the phba congestion * parameters with values passed back. The receive rate values * may have been altered in FW, but take no action here. */ p_cgn_param = (struct lpfc_cgn_param *)pdata; lpfc_cgn_params_parse(phba, p_cgn_param, len); rd_obj_err: kfree(pdata); return ret; } /** * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event * @phba: pointer to lpfc hba data structure. * * The FW generated Async ACQE SLI event calls this routine when * the event type is an SLI Internal Port Event and the Event Code * indicates a change to the FW maintained congestion parameters. * * This routine executes a Read_Object mailbox call to obtain the * current congestion parameters maintained in FW and corrects * the driver's active congestion parameters. * * The acqe event is not passed because there is no further data * required. * * Returns nonzero error if event processing encountered an error. * Zero otherwise for success. **/ static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba) { int ret = 0; if (!phba->sli4_hba.pc_sli4_params.cmf) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, "4664 Cgn Evt when E2E off. Drop event\n"); return -EACCES; } /* If the event is claiming an empty object, it's ok. A write * could have cleared it. Only error is a negative return * status. */ ret = lpfc_sli4_cgn_params_read(phba); if (ret < 0) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, "4667 Error reading Cgn Params (%d)\n", ret); } else if (!ret) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, "4673 CGN Event empty object.\n"); } return ret; } /** * lpfc_sli4_async_event_proc - Process all the pending asynchronous event * @phba: pointer to lpfc hba data structure. * * This routine is invoked by the worker thread to process all the pending * SLI4 asynchronous events. **/ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; unsigned long iflags; /* First, declare the async event has been handled */ spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~ASYNC_EVENT; spin_unlock_irqrestore(&phba->hbalock, iflags); /* Now, handle all the async events */ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, cq_event, struct lpfc_cq_event, list); spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); /* Process the asynchronous event */ switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { case LPFC_TRAILER_CODE_LINK: lpfc_sli4_async_link_evt(phba, &cq_event->cqe.acqe_link); break; case LPFC_TRAILER_CODE_FCOE: lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); break; case LPFC_TRAILER_CODE_DCBX: lpfc_sli4_async_dcbx_evt(phba, &cq_event->cqe.acqe_dcbx); break; case LPFC_TRAILER_CODE_GRP5: lpfc_sli4_async_grp5_evt(phba, &cq_event->cqe.acqe_grp5); break; case LPFC_TRAILER_CODE_FC: lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); break; case LPFC_TRAILER_CODE_SLI: lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); break; case LPFC_TRAILER_CODE_CMSTAT: lpfc_sli4_async_cmstat_evt(phba); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1804 Invalid asynchronous event code: " "x%x\n", bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)); break; } /* Free the completion event processed to the free pool */ lpfc_sli4_cq_event_release(phba, cq_event); spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); } spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); } /** * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event * @phba: pointer to lpfc hba data structure. * * This routine is invoked by the worker thread to process FCF table * rediscovery pending completion event. **/ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) { int rc; spin_lock_irq(&phba->hbalock); /* Clear FCF rediscovery timeout event */ phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; /* Clear driver fast failover FCF record flag */ phba->fcf.failover_rec.flag = 0; /* Set state for FCF fast failover */ phba->fcf.fcf_flag |= FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); /* Scan FCF table from the first entry to re-discover SAN */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2777 Start post-quiescent FCF table scan\n"); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2747 Issue FCF scan read FCF mailbox " "command failed 0x%x\n", rc); } /** * lpfc_api_table_setup - Set up per hba pci-device group func api jump table * @phba: pointer to lpfc hba data structure. * @dev_grp: The HBA PCI-Device group number. * * This routine is invoked to set up the per HBA PCI-Device group function * API jump table entries. * * Return: 0 if success, otherwise -ENODEV **/ int lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { int rc; /* Set up lpfc PCI-device group */ phba->pci_dev_grp = dev_grp; /* The LPFC_PCI_DEV_OC uses SLI4 */ if (dev_grp == LPFC_PCI_DEV_OC) phba->sli_rev = LPFC_SLI_REV4; /* Set up device INIT API function jump table */ rc = lpfc_init_api_table_setup(phba, dev_grp); if (rc) return -ENODEV; /* Set up SCSI API function jump table */ rc = lpfc_scsi_api_table_setup(phba, dev_grp); if (rc) return -ENODEV; /* Set up SLI API function jump table */ rc = lpfc_sli_api_table_setup(phba, dev_grp); if (rc) return -ENODEV; /* Set up MBOX API function jump table */ rc = lpfc_mbox_api_table_setup(phba, dev_grp); if (rc) return -ENODEV; return 0; } /** * lpfc_log_intr_mode - Log the active interrupt mode * @phba: pointer to lpfc hba data structure. * @intr_mode: active interrupt mode adopted. * * This routine it invoked to log the currently used active interrupt mode * to the device. **/ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) { switch (intr_mode) { case 0: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0470 Enable INTx interrupt mode.\n"); break; case 1: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0481 Enabled MSI interrupt mode.\n"); break; case 2: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0480 Enabled MSI-X interrupt mode.\n"); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0482 Illegal interrupt mode.\n"); break; } return; } /** * lpfc_enable_pci_dev - Enable a generic PCI device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the PCI device that is common to all * PCI devices. * * Return codes * 0 - successful * other values - error **/ static int lpfc_enable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; /* Obtain PCI device reference */ if (!phba->pcidev) goto out_error; else pdev = phba->pcidev; /* Enable PCI device */ if (pci_enable_device_mem(pdev)) goto out_error; /* Request PCI resource for the device */ if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) goto out_disable_device; /* Set up device as PCI master and save state for EEH */ pci_set_master(pdev); pci_try_set_mwi(pdev); pci_save_state(pdev); /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ if (pci_is_pcie(pdev)) pdev->needs_freset = 1; return 0; out_disable_device: pci_disable_device(pdev); out_error: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1401 Failed to enable pci device\n"); return -ENODEV; } /** * lpfc_disable_pci_dev - Disable a generic PCI device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable the PCI device that is common to all * PCI devices. **/ static void lpfc_disable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; /* Obtain PCI device reference */ if (!phba->pcidev) return; else pdev = phba->pcidev; /* Release PCI resource and disable PCI device */ pci_release_mem_regions(pdev); pci_disable_device(pdev); return; } /** * lpfc_reset_hba - Reset a hba * @phba: pointer to lpfc hba data structure. * * This routine is invoked to reset a hba device. It brings the HBA * offline, performs a board restart, and then brings the board back * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up * on outstanding mailbox commands. **/ void lpfc_reset_hba(struct lpfc_hba *phba) { int rc = 0; /* If resets are disabled then set error state and return. */ if (!phba->cfg_enable_hba_reset) { phba->link_state = LPFC_HBA_ERROR; return; } /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { lpfc_offline_prep(phba, LPFC_MBX_WAIT); } else { if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) { /* Perform a PCI function reset to start from clean */ rc = lpfc_pci_function_reset(phba); lpfc_els_flush_all_cmd(phba); } lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_sli_flush_io_rings(phba); } lpfc_offline(phba); clear_bit(MBX_TMO_ERR, &phba->bit_flags); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "8888 PCI function reset failed rc %x\n", rc); } else { lpfc_sli_brdrestart(phba); lpfc_online(phba); lpfc_unblock_mgmt_io(phba); } } /** * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions * @phba: pointer to lpfc hba data structure. * * This function enables the PCI SR-IOV virtual functions to a physical * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to * enable the number of virtual functions to the physical function. As * not all devices support SR-IOV, the return code from the pci_enable_sriov() * API call does not considered as an error condition for most of the device. **/ uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) { struct pci_dev *pdev = phba->pcidev; uint16_t nr_virtfn; int pos; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); if (pos == 0) return 0; pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); return nr_virtfn; } /** * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions * @phba: pointer to lpfc hba data structure. * @nr_vfn: number of virtual functions to be enabled. * * This function enables the PCI SR-IOV virtual functions to a physical * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to * enable the number of virtual functions to the physical function. As * not all devices support SR-IOV, the return code from the pci_enable_sriov() * API call does not considered as an error condition for most of the device. **/ int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) { struct pci_dev *pdev = phba->pcidev; uint16_t max_nr_vfn; int rc; max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); if (nr_vfn > max_nr_vfn) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3057 Requested vfs (%d) greater than " "supported vfs (%d)", nr_vfn, max_nr_vfn); return -EINVAL; } rc = pci_enable_sriov(pdev, nr_vfn); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2806 Failed to enable sriov on this device " "with vfn number nr_vf:%d, rc:%d\n", nr_vfn, rc); } else lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2807 Successful enable sriov on this device " "with vfn number nr_vf:%d\n", nr_vfn); return rc; } static void lpfc_unblock_requests_work(struct work_struct *work) { struct lpfc_hba *phba = container_of(work, struct lpfc_hba, unblock_request_work); lpfc_unblock_requests(phba); } /** * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the driver internal resources before the * device specific resource setup to support the HBA device it attached to. * * Return codes * 0 - successful * other values - error **/ static int lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; /* * Driver resources common to all SLI revisions */ atomic_set(&phba->fast_event_count, 0); atomic_set(&phba->dbg_log_idx, 0); atomic_set(&phba->dbg_log_cnt, 0); atomic_set(&phba->dbg_log_dmping, 0); spin_lock_init(&phba->hbalock); /* Initialize port_list spinlock */ spin_lock_init(&phba->port_list_lock); INIT_LIST_HEAD(&phba->port_list); INIT_LIST_HEAD(&phba->work_list); /* Initialize the wait queue head for the kernel thread */ init_waitqueue_head(&phba->work_waitq); lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1403 Protocols supported %s %s %s\n", ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? "SCSI" : " "), ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? "NVME" : " "), (phba->nvmet_support ? "NVMET" : " ")); /* Initialize the IO buffer list used by driver for SLI3 SCSI */ spin_lock_init(&phba->scsi_buf_list_get_lock); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); spin_lock_init(&phba->scsi_buf_list_put_lock); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); /* Initialize the fabric iocb list */ INIT_LIST_HEAD(&phba->fabric_iocb_list); /* Initialize list to save ELS buffers */ INIT_LIST_HEAD(&phba->elsbuf); /* Initialize FCF connection rec list */ INIT_LIST_HEAD(&phba->fcf_conn_rec_list); /* Initialize OAS configuration list */ spin_lock_init(&phba->devicelock); INIT_LIST_HEAD(&phba->luns); /* MBOX heartbeat timer */ timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); /* Fabric block timer */ timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); /* EA polling mode timer */ timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); /* Heartbeat timer */ timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); INIT_DELAYED_WORK(&phba->idle_stat_delay_work, lpfc_idle_stat_delay_work); INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); return 0; } /** * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the driver internal resources specific to * support the SLI-3 HBA device it attached to. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) { int rc, entry_sz; /* * Initialize timers used by driver */ /* FCP polling mode timer */ timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); /* Host attention work mask setup */ phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); /* Get all the module params for configuring this host */ lpfc_get_cfgparam(phba); /* Set up phase-1 common device driver resources */ rc = lpfc_setup_driver_resource_phase1(phba); if (rc) return -ENODEV; if (!phba->sli.sli3_ring) phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, sizeof(struct lpfc_sli_ring), GFP_KERNEL); if (!phba->sli.sli3_ring) return -ENOMEM; /* * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. */ if (phba->sli_rev == LPFC_SLI_REV4) entry_sz = sizeof(struct sli4_sge); else entry_sz = sizeof(struct ulp_bde64); /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ if (phba->cfg_enable_bg) { /* * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, * the FCP rsp, and a BDE for each. Sice we have no control * over how many protection data segments the SCSI Layer * will hand us (ie: there could be one for every block * in the IO), we just allocate enough BDEs to accomidate * our max amount and we need to limit lpfc_sg_seg_cnt to * minimize the risk of running out. */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + (LPFC_MAX_SG_SEG_CNT * entry_sz); if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; } else { /* * The scsi_buf for a regular I/O will hold the FCP cmnd, * the FCP rsp, a BDE for each, and a BDE for up to * cfg_sg_seg_cnt data segments. */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + ((phba->cfg_sg_seg_cnt + 2) * entry_sz); /* Total BDEs in BPL for scsi_sg_list */ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt); phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after config_port mbox */ phba->max_vports = 0; /* * Initialize the SLI Layer to run with lpfc HBAs. */ lpfc_sli_setup(phba); lpfc_sli_queue_init(phba); /* Allocate device driver memory */ if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) return -ENOMEM; phba->lpfc_sg_dma_buf_pool = dma_pool_create("lpfc_sg_dma_buf_pool", &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, BPL_ALIGN_SZ, 0); if (!phba->lpfc_sg_dma_buf_pool) goto fail_free_mem; phba->lpfc_cmd_rsp_buf_pool = dma_pool_create("lpfc_cmd_rsp_buf_pool", &phba->pcidev->dev, sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp), BPL_ALIGN_SZ, 0); if (!phba->lpfc_cmd_rsp_buf_pool) goto fail_free_dma_buf_pool; /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. */ if (phba->cfg_sriov_nr_virtfn > 0) { rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2808 Requested number of SR-IOV " "virtual functions (%d) is not " "supported\n", phba->cfg_sriov_nr_virtfn); phba->cfg_sriov_nr_virtfn = 0; } } return 0; fail_free_dma_buf_pool: dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); phba->lpfc_sg_dma_buf_pool = NULL; fail_free_mem: lpfc_mem_free(phba); return -ENOMEM; } /** * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the driver internal resources set up * specific for supporting the SLI-3 HBA device it attached to. **/ static void lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) { /* Free device driver memory allocated */ lpfc_mem_free_all(phba); return; } /** * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the driver internal resources specific to * support the SLI-4 HBA device it attached to. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; MAILBOX_t *mb; int rc, i, max_buf_size; int longs; int extra; uint64_t wwn; u32 if_type; u32 if_fam; phba->sli4_hba.num_present_cpu = lpfc_present_cpu; phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; phba->sli4_hba.curr_disp_cpu = 0; /* Get all the module params for configuring this host */ lpfc_get_cfgparam(phba); /* Set up phase-1 common device driver resources */ rc = lpfc_setup_driver_resource_phase1(phba); if (rc) return -ENODEV; /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); if (rc) return -ENODEV; /* Allocate all driver workqueues here */ /* The lpfc_wq workqueue for deferred irq use */ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); if (!phba->wq) return -ENOMEM; /* * Initialize timers used by driver */ timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); /* FCF rediscover timer */ timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); /* CMF congestion timer */ hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); phba->cmf_timer.function = lpfc_cmf_timer; /* CMF 1 minute stats collection timer */ hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); phba->cmf_stats_timer.function = lpfc_cmf_stats_timer; /* * Control structure for handling external multi-buffer mailbox * command pass-through. */ memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, sizeof(struct lpfc_mbox_ext_buf_ctx)); INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after the read_config mbox */ phba->max_vports = 0; /* Program the default value of vlan_id and fc_map */ phba->valid_vlan = 0; phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; /* * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands * we will associate a new ring, for each EQ/CQ/WQ tuple. * The WQ create will allocate the ring. */ /* Initialize buffer queue management fields */ INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; /* for VMID idle timeout if VMID is enabled */ if (lpfc_is_vmid_enabled(phba)) timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); /* * Initialize the SLI Layer to run with lpfc SLI4 HBAs. */ /* Initialize the Abort buffer list used by driver */ spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { /* Initialize the Abort nvme buffer list used by driver */ spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); spin_lock_init(&phba->sli4_hba.t_active_list_lock); INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); } /* This abort list used by worker thread */ spin_lock_init(&phba->sli4_hba.sgl_list_lock); spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); spin_lock_init(&phba->sli4_hba.asynce_list_lock); spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); /* * Initialize driver internal slow-path work queues */ /* Driver internel slow-path CQ Event pool */ INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); /* Response IOCB work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); /* Asynchronous event CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); /* Slow-path XRI aborted CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); /* Receive queue CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); /* Initialize extent block lists. */ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); /* Initialize mboxq lists. If the early init routines fail * these lists need to be correctly initialized. */ INIT_LIST_HEAD(&phba->sli.mboxq); INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); /* initialize optic_state to 0xFF */ phba->sli4_hba.lnk_info.optic_state = 0xff; /* Allocate device driver memory */ rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); if (rc) goto out_destroy_workqueue; /* IF Type 2 ports get initialized now. */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) { rc = lpfc_pci_function_reset(phba); if (unlikely(rc)) { rc = -ENODEV; goto out_free_mem; } phba->temp_sensor_support = 1; } /* Create the bootstrap mailbox command */ rc = lpfc_create_bootstrap_mbox(phba); if (unlikely(rc)) goto out_free_mem; /* Set up the host's endian order with the device. */ rc = lpfc_setup_endian_order(phba); if (unlikely(rc)) goto out_free_bsmbx; /* Set up the hba's configuration parameters. */ rc = lpfc_sli4_read_config(phba); if (unlikely(rc)) goto out_free_bsmbx; if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) { /* Right now the link is down, if FA-PWWN is configured the * firmware will try FLOGI before the driver gets a link up. * If it fails, the driver should get a MISCONFIGURED async * event which will clear this flag. The only notification * the driver gets is if it fails, if it succeeds there is no * notification given. Assume success. */ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; } rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); if (unlikely(rc)) goto out_free_bsmbx; /* IF Type 0 ports get initialized now. */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_0) { rc = lpfc_pci_function_reset(phba); if (unlikely(rc)) goto out_free_bsmbx; } mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { rc = -ENOMEM; goto out_free_bsmbx; } /* Check for NVMET being configured */ phba->nvmet_support = 0; if (lpfc_enable_nvmet_cnt) { /* First get WWN of HBA instance */ lpfc_read_nv(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6016 Mailbox failed , mbxCmd x%x " "READ_NV, mbxStatus x%x\n", bf_get(lpfc_mqe_command, &mboxq->u.mqe), bf_get(lpfc_mqe_status, &mboxq->u.mqe)); mempool_free(mboxq, phba->mbox_mem_pool); rc = -EIO; goto out_free_bsmbx; } mb = &mboxq->u.mb; memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, sizeof(uint64_t)); wwn = cpu_to_be64(wwn); phba->sli4_hba.wwnn.u.name = wwn; memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); /* wwn is WWPN of HBA instance */ wwn = cpu_to_be64(wwn); phba->sli4_hba.wwpn.u.name = wwn; /* Check to see if it matches any module parameter */ for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { if (wwn == lpfc_enable_nvmet[i]) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) if (lpfc_nvmet_mem_alloc(phba)) break; phba->nvmet_support = 1; /* a match */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6017 NVME Target %016llx\n", wwn); #else lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6021 Can't enable NVME Target." " NVME_TARGET_FC infrastructure" " is not in kernel\n"); #endif /* Not supported for NVMET */ phba->cfg_xri_rebalancing = 0; if (phba->irq_chann_mode == NHT_MODE) { phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; phba->irq_chann_mode = NORMAL_MODE; } break; } } } lpfc_nvme_mod_param_dep(phba); /* * Get sli4 parameters that override parameters from Port capabilities. * If this call fails, it isn't critical unless the SLI4 parameters come * back in conflict. */ rc = lpfc_get_sli4_parameters(phba, mboxq); if (rc) { if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if_fam = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); if (phba->sli4_hba.extents_in_use && phba->sli4_hba.rpi_hdrs_in_use) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2999 Unsupported SLI4 Parameters " "Extents and RPI headers enabled.\n"); if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && if_fam == LPFC_SLI_INTF_FAMILY_BE2) { mempool_free(mboxq, phba->mbox_mem_pool); rc = -EIO; goto out_free_bsmbx; } } if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { mempool_free(mboxq, phba->mbox_mem_pool); rc = -EIO; goto out_free_bsmbx; } } /* * 1 for cmd, 1 for rsp, NVME adds an extra one * for boundary conditions in its max_sgl_segment template. */ extra = 2; if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) extra++; /* * It doesn't matter what family our adapter is in, we are * limited to 2 Pages, 512 SGEs, for our SGL. * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp */ max_buf_size = (2 * SLI4_PAGE_SIZE); /* * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be calculated. */ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { /* Both cfg_enable_bg and cfg_external_dif code paths */ /* * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, * the FCP rsp, and a SGE. Sice we have no control * over how many protection segments the SCSI Layer * will hand us (ie: there could be one for every block * in the IO), just allocate enough SGEs to accomidate * our max amount and we need to limit lpfc_sg_seg_cnt * to minimize the risk of running out. */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + max_buf_size; /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; /* * If supporting DIF, reduce the seg count for scsi to * allow room for the DIF sges. */ if (phba->cfg_enable_bg && phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; else phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; } else { /* * The scsi_buf for a regular I/O holds the FCP cmnd, * the FCP rsp, a SGE for each, and a SGE for up to * cfg_sg_seg_cnt data segments. */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + ((phba->cfg_sg_seg_cnt + extra) * sizeof(struct sli4_sge)); /* Total SGEs for scsi_sg_list */ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; /* * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only * need to post 1 page for the SGL. */ } if (phba->cfg_xpsgl && !phba->nvmet_support) phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; else phba->cfg_sg_dma_buf_size = SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); phba->border_sge_num = phba->cfg_sg_dma_buf_size / sizeof(struct sli4_sge); /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, "6300 Reducing NVME sg segment " "cnt to %d\n", LPFC_MAX_NVME_SEG_CNT); phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; } else phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, "9087 sg_seg_cnt:%d dmabuf_size:%d " "total:%d scsi:%d nvme:%d\n", phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, phba->cfg_nvme_seg_cnt); if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) i = phba->cfg_sg_dma_buf_size; else i = SLI4_PAGE_SIZE; phba->lpfc_sg_dma_buf_pool = dma_pool_create("lpfc_sg_dma_buf_pool", &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, i, 0); if (!phba->lpfc_sg_dma_buf_pool) { rc = -ENOMEM; goto out_free_bsmbx; } phba->lpfc_cmd_rsp_buf_pool = dma_pool_create("lpfc_cmd_rsp_buf_pool", &phba->pcidev->dev, sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp), i, 0); if (!phba->lpfc_cmd_rsp_buf_pool) { rc = -ENOMEM; goto out_free_sg_dma_buf; } mempool_free(mboxq, phba->mbox_mem_pool); /* Verify OAS is supported */ lpfc_sli4_oas_verify(phba); /* Verify RAS support on adapter */ lpfc_sli4_ras_init(phba); /* Verify all the SLI4 queues */ rc = lpfc_sli4_queue_verify(phba); if (rc) goto out_free_cmd_rsp_buf; /* Create driver internal CQE event pool */ rc = lpfc_sli4_cq_event_pool_create(phba); if (rc) goto out_free_cmd_rsp_buf; /* Initialize sgl lists per host */ lpfc_init_sgl_list(phba); /* Allocate and initialize active sgl array */ rc = lpfc_init_active_sgl_array(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1430 Failed to initialize sgl list.\n"); goto out_destroy_cq_event_pool; } rc = lpfc_sli4_init_rpi_hdrs(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1432 Failed to initialize rpi headers.\n"); goto out_free_active_sgl; } /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), GFP_KERNEL); if (!phba->fcf.fcf_rr_bmask) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2759 Failed allocate memory for FCF round " "robin failover bmask\n"); rc = -ENOMEM; goto out_remove_rpi_hdrs; } phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, sizeof(struct lpfc_hba_eq_hdl), GFP_KERNEL); if (!phba->sli4_hba.hba_eq_hdl) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2572 Failed allocate memory for " "fast-path per-EQ handle array\n"); rc = -ENOMEM; goto out_free_fcf_rr_bmask; } phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(struct lpfc_vector_map_info), GFP_KERNEL); if (!phba->sli4_hba.cpu_map) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3327 Failed allocate memory for msi-x " "interrupt vector mapping\n"); rc = -ENOMEM; goto out_free_hba_eq_hdl; } phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); if (!phba->sli4_hba.eq_info) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3321 Failed allocation for per_cpu stats\n"); rc = -ENOMEM; goto out_free_hba_cpu_map; } phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*phba->sli4_hba.idle_stat), GFP_KERNEL); if (!phba->sli4_hba.idle_stat) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3390 Failed allocation for idle_stat\n"); rc = -ENOMEM; goto out_free_hba_eq_info; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); if (!phba->sli4_hba.c_stat) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3332 Failed allocating per cpu hdwq stats\n"); rc = -ENOMEM; goto out_free_hba_idle_stat; } #endif phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); if (!phba->cmf_stat) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3331 Failed allocating per cpu cgn stats\n"); rc = -ENOMEM; goto out_free_hba_hdwq_info; } /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. */ if (phba->cfg_sriov_nr_virtfn > 0) { rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "3020 Requested number of SR-IOV " "virtual functions (%d) is not " "supported\n", phba->cfg_sriov_nr_virtfn); phba->cfg_sriov_nr_virtfn = 0; } } return 0; out_free_hba_hdwq_info: #ifdef CONFIG_SCSI_LPFC_DEBUG_FS free_percpu(phba->sli4_hba.c_stat); out_free_hba_idle_stat: #endif kfree(phba->sli4_hba.idle_stat); out_free_hba_eq_info: free_percpu(phba->sli4_hba.eq_info); out_free_hba_cpu_map: kfree(phba->sli4_hba.cpu_map); out_free_hba_eq_hdl: kfree(phba->sli4_hba.hba_eq_hdl); out_free_fcf_rr_bmask: kfree(phba->fcf.fcf_rr_bmask); out_remove_rpi_hdrs: lpfc_sli4_remove_rpi_hdrs(phba); out_free_active_sgl: lpfc_free_active_sgl(phba); out_destroy_cq_event_pool: lpfc_sli4_cq_event_pool_destroy(phba); out_free_cmd_rsp_buf: dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); phba->lpfc_cmd_rsp_buf_pool = NULL; out_free_sg_dma_buf: dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); phba->lpfc_sg_dma_buf_pool = NULL; out_free_bsmbx: lpfc_destroy_bootstrap_mbox(phba); out_free_mem: lpfc_mem_free(phba); out_destroy_workqueue: destroy_workqueue(phba->wq); phba->wq = NULL; return rc; } /** * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the driver internal resources set up * specific for supporting the SLI-4 HBA device it attached to. **/ static void lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; free_percpu(phba->sli4_hba.eq_info); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS free_percpu(phba->sli4_hba.c_stat); #endif free_percpu(phba->cmf_stat); kfree(phba->sli4_hba.idle_stat); /* Free memory allocated for msi-x interrupt vector to CPU mapping */ kfree(phba->sli4_hba.cpu_map); phba->sli4_hba.num_possible_cpu = 0; phba->sli4_hba.num_present_cpu = 0; phba->sli4_hba.curr_disp_cpu = 0; cpumask_clear(&phba->sli4_hba.irq_aff_mask); /* Free memory allocated for fast-path work queue handles */ kfree(phba->sli4_hba.hba_eq_hdl); /* Free the allocated rpi headers. */ lpfc_sli4_remove_rpi_hdrs(phba); lpfc_sli4_remove_rpis(phba); /* Free eligible FCF index bmask */ kfree(phba->fcf.fcf_rr_bmask); /* Free the ELS sgl list */ lpfc_free_active_sgl(phba); lpfc_free_els_sgl_list(phba); lpfc_free_nvmet_sgl_list(phba); /* Free the completion queue EQ event pool */ lpfc_sli4_cq_event_release_all(phba); lpfc_sli4_cq_event_pool_destroy(phba); /* Release resource identifiers. */ lpfc_sli4_dealloc_resource_identifiers(phba); /* Free the bsmbx region. */ lpfc_destroy_bootstrap_mbox(phba); /* Free the SLI Layer memory with SLI4 HBAs */ lpfc_mem_free_all(phba); /* Free the current connect table */ list_for_each_entry_safe(conn_entry, next_conn_entry, &phba->fcf_conn_rec_list, list) { list_del_init(&conn_entry->list); kfree(conn_entry); } return; } /** * lpfc_init_api_table_setup - Set up init api function jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. * * This routine sets up the device INIT interface API function jump table * in @phba struct. * * Returns: 0 - success, -ENODEV - failure. **/ int lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { phba->lpfc_hba_init_link = lpfc_hba_init_link; phba->lpfc_hba_down_link = lpfc_hba_down_link; phba->lpfc_selective_reset = lpfc_selective_reset; switch (dev_grp) { case LPFC_PCI_DEV_LP: phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; phba->lpfc_stop_port = lpfc_stop_port_s3; break; case LPFC_PCI_DEV_OC: phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; phba->lpfc_stop_port = lpfc_stop_port_s4; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1431 Invalid HBA PCI-device group: 0x%x\n", dev_grp); return -ENODEV; } return 0; } /** * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the driver internal resources after the * device specific resource setup to support the HBA device it attached to. * * Return codes * 0 - successful * other values - error **/ static int lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) { int error; /* Startup the kernel thread for this host adapter. */ phba->worker_thread = kthread_run(lpfc_do_work, phba, "lpfc_worker_%d", phba->brd_no); if (IS_ERR(phba->worker_thread)) { error = PTR_ERR(phba->worker_thread); return error; } return 0; } /** * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the driver internal resources set up after * the device specific resource setup for supporting the HBA device it * attached to. **/ static void lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) { if (phba->wq) { destroy_workqueue(phba->wq); phba->wq = NULL; } /* Stop kernel worker thread */ if (phba->worker_thread) kthread_stop(phba->worker_thread); } /** * lpfc_free_iocb_list - Free iocb list. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the driver's IOCB list and memory. **/ void lpfc_free_iocb_list(struct lpfc_hba *phba) { struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocbq_entry, iocbq_next, &phba->lpfc_iocb_list, list) { list_del(&iocbq_entry->list); kfree(iocbq_entry); phba->total_iocbq_bufs--; } spin_unlock_irq(&phba->hbalock); return; } /** * lpfc_init_iocb_list - Allocate and initialize iocb list. * @phba: pointer to lpfc hba data structure. * @iocb_count: number of requested iocbs * * This routine is invoked to allocate and initizlize the driver's IOCB * list and set up the IOCB tag array accordingly. * * Return codes * 0 - successful * other values - error **/ int lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) { struct lpfc_iocbq *iocbq_entry = NULL; uint16_t iotag; int i; /* Initialize and populate the iocb list per host. */ INIT_LIST_HEAD(&phba->lpfc_iocb_list); for (i = 0; i < iocb_count; i++) { iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); if (iocbq_entry == NULL) { printk(KERN_ERR "%s: only allocated %d iocbs of " "expected %d count. Unloading driver.\n", __func__, i, iocb_count); goto out_free_iocbq; } iotag = lpfc_sli_next_iotag(phba, iocbq_entry); if (iotag == 0) { kfree(iocbq_entry); printk(KERN_ERR "%s: failed to allocate IOTAG. " "Unloading driver.\n", __func__); goto out_free_iocbq; } iocbq_entry->sli4_lxritag = NO_XRI; iocbq_entry->sli4_xritag = NO_XRI; spin_lock_irq(&phba->hbalock); list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); phba->total_iocbq_bufs++; spin_unlock_irq(&phba->hbalock); } return 0; out_free_iocbq: lpfc_free_iocb_list(phba); return -ENOMEM; } /** * lpfc_free_sgl_list - Free a given sgl list. * @phba: pointer to lpfc hba data structure. * @sglq_list: pointer to the head of sgl list. * * This routine is invoked to free a give sgl list and memory. **/ void lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) { struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { list_del(&sglq_entry->list); lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); kfree(sglq_entry); } } /** * lpfc_free_els_sgl_list - Free els sgl list. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the driver's els sgl list and memory. **/ static void lpfc_free_els_sgl_list(struct lpfc_hba *phba) { LIST_HEAD(sglq_list); /* Retrieve all els sgls from driver list */ spin_lock_irq(&phba->sli4_hba.sgl_list_lock); list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); /* Now free the sgl list */ lpfc_free_sgl_list(phba, &sglq_list); } /** * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the driver's nvmet sgl list and memory. **/ static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) { struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; LIST_HEAD(sglq_list); /* Retrieve all nvmet sgls from driver list */ spin_lock_irq(&phba->hbalock); spin_lock(&phba->sli4_hba.sgl_list_lock); list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); /* Now free the sgl list */ list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { list_del(&sglq_entry->list); lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); kfree(sglq_entry); } /* Update the nvmet_xri_cnt to reflect no current sgls. * The next initialization cycle sets the count and allocates * the sgls over again. */ phba->sli4_hba.nvmet_xri_cnt = 0; } /** * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate the driver's active sgl memory. * This array will hold the sglq_entry's for active IOs. **/ static int lpfc_init_active_sgl_array(struct lpfc_hba *phba) { int size; size = sizeof(struct lpfc_sglq *); size *= phba->sli4_hba.max_cfg_param.max_xri; phba->sli4_hba.lpfc_sglq_active_list = kzalloc(size, GFP_KERNEL); if (!phba->sli4_hba.lpfc_sglq_active_list) return -ENOMEM; return 0; } /** * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to walk through the array of active sglq entries * and free all of the resources. * This is just a place holder for now. **/ static void lpfc_free_active_sgl(struct lpfc_hba *phba) { kfree(phba->sli4_hba.lpfc_sglq_active_list); } /** * lpfc_init_sgl_list - Allocate and initialize sgl list. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate and initizlize the driver's sgl * list and set up the sgl xritag tag array accordingly. * **/ static void lpfc_init_sgl_list(struct lpfc_hba *phba) { /* Initialize and populate the sglq list per host/VF. */ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); /* els xri-sgl book keeping */ phba->sli4_hba.els_xri_cnt = 0; /* nvme xri-buffer book keeping */ phba->sli4_hba.io_xri_cnt = 0; } /** * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port * @phba: pointer to lpfc hba data structure. * * This routine is invoked to post rpi header templates to the * port for those SLI4 ports that do not support extents. This routine * posts a PAGE_SIZE memory region to the port to hold up to * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine * and should be called only when interrupts are disabled. * * Return codes * 0 - successful * -ERROR - otherwise. **/ int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) { int rc = 0; struct lpfc_rpi_hdr *rpi_hdr; INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); if (!phba->sli4_hba.rpi_hdrs_in_use) return rc; if (phba->sli4_hba.extents_in_use) return -EIO; rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); if (!rpi_hdr) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0391 Error during rpi post operation\n"); lpfc_sli4_remove_rpis(phba); rc = -ENODEV; } return rc; } /** * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate a single 4KB memory region to * support rpis and stores them in the phba. This single region * provides support for up to 64 rpis. The region is used globally * by the device. * * Returns: * A valid rpi hdr on success. * A NULL pointer on any failure. **/ struct lpfc_rpi_hdr * lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) { uint16_t rpi_limit, curr_rpi_range; struct lpfc_dmabuf *dmabuf; struct lpfc_rpi_hdr *rpi_hdr; /* * If the SLI4 port supports extents, posting the rpi header isn't * required. Set the expected maximum count and let the actual value * get set when extents are fully allocated. */ if (!phba->sli4_hba.rpi_hdrs_in_use) return NULL; if (phba->sli4_hba.extents_in_use) return NULL; /* The limit on the logical index is just the max_rpi count. */ rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; spin_lock_irq(&phba->hbalock); /* * Establish the starting RPI in this header block. The starting * rpi is normalized to a zero base because the physical rpi is * port based. */ curr_rpi_range = phba->sli4_hba.next_rpi; spin_unlock_irq(&phba->hbalock); /* Reached full RPI range */ if (curr_rpi_range == rpi_limit) return NULL; /* * First allocate the protocol header region for the port. The * port expects a 4KB DMA-mapped memory region that is 4K aligned. */ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) return NULL; dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { rpi_hdr = NULL; goto err_free_dmabuf; } if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { rpi_hdr = NULL; goto err_free_coherent; } /* Save the rpi header data for cleanup later. */ rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); if (!rpi_hdr) goto err_free_coherent; rpi_hdr->dmabuf = dmabuf; rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; rpi_hdr->page_count = 1; spin_lock_irq(&phba->hbalock); /* The rpi_hdr stores the logical index only. */ rpi_hdr->start_rpi = curr_rpi_range; rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); spin_unlock_irq(&phba->hbalock); return rpi_hdr; err_free_coherent: dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, dmabuf->virt, dmabuf->phys); err_free_dmabuf: kfree(dmabuf); return NULL; } /** * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions * @phba: pointer to lpfc hba data structure. * * This routine is invoked to remove all memory resources allocated * to support rpis for SLI4 ports not supporting extents. This routine * presumes the caller has released all rpis consumed by fabric or port * logins and is prepared to have the header pages removed. **/ void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) { struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; if (!phba->sli4_hba.rpi_hdrs_in_use) goto exit; list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { list_del(&rpi_hdr->list); dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); kfree(rpi_hdr->dmabuf); kfree(rpi_hdr); } exit: /* There are no rpis available to the port now. */ phba->sli4_hba.next_rpi = 0; } /** * lpfc_hba_alloc - Allocate driver hba data structure for a device. * @pdev: pointer to pci device data structure. * * This routine is invoked to allocate the driver hba data structure for an * HBA device. If the allocation is successful, the phba reference to the * PCI device data structure is set. * * Return codes * pointer to @phba - successful * NULL - error **/ static struct lpfc_hba * lpfc_hba_alloc(struct pci_dev *pdev) { struct lpfc_hba *phba; /* Allocate memory for HBA structure */ phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); if (!phba) { dev_err(&pdev->dev, "failed to allocate hba struct\n"); return NULL; } /* Set reference to PCI device in HBA structure */ phba->pcidev = pdev; /* Assign an unused board number */ phba->brd_no = lpfc_get_instance(); if (phba->brd_no < 0) { kfree(phba); return NULL; } phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; spin_lock_init(&phba->ct_ev_lock); INIT_LIST_HEAD(&phba->ct_ev_waiters); return phba; } /** * lpfc_hba_free - Free driver hba data structure with a device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the driver hba data structure with an * HBA device. **/ static void lpfc_hba_free(struct lpfc_hba *phba) { if (phba->sli_rev == LPFC_SLI_REV4) kfree(phba->sli4_hba.hdwq); /* Release the driver assigned board number */ idr_remove(&lpfc_hba_index, phba->brd_no); /* Free memory allocated with sli3 rings */ kfree(phba->sli.sli3_ring); phba->sli.sli3_ring = NULL; kfree(phba); return; } /** * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes * @vport: pointer to lpfc vport data structure. * * This routine is will setup initial FDMI attribute masks for * FDMI2 or SmartSAN depending on module parameters. The driver will attempt * to get these attributes first before falling back, the attribute * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1 **/ void lpfc_setup_fdmi_mask(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; vport->load_flag |= FC_ALLOW_FDMI; if (phba->cfg_enable_SmartSAN || phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) { /* Setup appropriate attribute masks */ vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; if (phba->cfg_enable_SmartSAN) vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; else vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; } lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, "6077 Setup FDMI mask: hba x%x port x%x\n", vport->fdmi_hba_mask, vport->fdmi_port_mask); } /** * lpfc_create_shost - Create hba physical port with associated scsi host. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to create HBA physical port and associate a SCSI * host with it. * * Return codes * 0 - successful * other values - error **/ static int lpfc_create_shost(struct lpfc_hba *phba) { struct lpfc_vport *vport; struct Scsi_Host *shost; /* Initialize HBA FC structure */ phba->fc_edtov = FF_DEF_EDTOV; phba->fc_ratov = FF_DEF_RATOV; phba->fc_altov = FF_DEF_ALTOV; phba->fc_arbtov = FF_DEF_ARBTOV; atomic_set(&phba->sdev_cnt, 0); vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); if (!vport) return -ENODEV; shost = lpfc_shost_from_vport(vport); phba->pport = vport; if (phba->nvmet_support) { /* Only 1 vport (pport) will support NVME target */ phba->targetport = NULL; phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, "6076 NVME Target Found\n"); } lpfc_debugfs_initialize(vport); /* Put reference to SCSI host to driver's device private data */ pci_set_drvdata(phba->pcidev, shost); lpfc_setup_fdmi_mask(vport); /* * At this point we are fully registered with PSA. In addition, * any initial discovery should be completed. */ return 0; } /** * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to destroy HBA physical port and the associated * SCSI host. **/ static void lpfc_destroy_shost(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; /* Destroy physical port that associated with the SCSI host */ destroy_port(vport); return; } /** * lpfc_setup_bg - Setup Block guard structures and debug areas. * @phba: pointer to lpfc hba data structure. * @shost: the shost to be used to detect Block guard settings. * * This routine sets up the local Block guard protocol settings for @shost. * This routine also allocates memory for debugging bg buffers. **/ static void lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) { uint32_t old_mask; uint32_t old_guard; if (phba->cfg_prot_mask && phba->cfg_prot_guard) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1478 Registering BlockGuard with the " "SCSI layer\n"); old_mask = phba->cfg_prot_mask; old_guard = phba->cfg_prot_guard; /* Only allow supported values */ phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION | SHOST_DIX_TYPE1_PROTECTION); phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); /* DIF Type 1 protection for profiles AST1/C1 is end to end */ if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; if (phba->cfg_prot_mask && phba->cfg_prot_guard) { if ((old_mask != phba->cfg_prot_mask) || (old_guard != phba->cfg_prot_guard)) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1475 Registering BlockGuard with the " "SCSI layer: mask %d guard %d\n", phba->cfg_prot_mask, phba->cfg_prot_guard); scsi_host_set_prot(shost, phba->cfg_prot_mask); scsi_host_set_guard(shost, phba->cfg_prot_guard); } else lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1479 Not Registering BlockGuard with the SCSI " "layer, Bad protection parameters: %d %d\n", old_mask, old_guard); } } /** * lpfc_post_init_setup - Perform necessary device post initialization setup. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to perform all the necessary post initialization * setup for the device. **/ static void lpfc_post_init_setup(struct lpfc_hba *phba) { struct Scsi_Host *shost; struct lpfc_adapter_event_header adapter_event; /* Get the default values for Model Name and Description */ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); /* * hba setup may have changed the hba_queue_depth so we need to * adjust the value of can_queue. */ shost = pci_get_drvdata(phba->pcidev); shost->can_queue = phba->cfg_hba_queue_depth - 10; lpfc_host_attrib_init(shost); if (phba->cfg_poll & DISABLE_FCP_RING_INT) { spin_lock_irq(shost->host_lock); lpfc_poll_start_timer(phba); spin_unlock_irq(shost->host_lock); } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0428 Perform SCSI scan\n"); /* Send board arrival event to upper layer */ adapter_event.event_type = FC_REG_ADAPTER_EVENT; adapter_event.subcategory = LPFC_EVENT_ARRIVAL; fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(adapter_event), (char *) &adapter_event, LPFC_NL_VENDOR_ID); return; } /** * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the PCI device memory space for device * with SLI-3 interface spec. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) { struct pci_dev *pdev = phba->pcidev; unsigned long bar0map_len, bar2map_len; int i, hbq_count; void *ptr; int error; if (!pdev) return -ENODEV; /* Set the device DMA mask size */ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (error) error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (error) return error; error = -ENODEV; /* Get the bus address of Bar0 and Bar2 and the number of bytes * required by each mapping. */ phba->pci_bar0_map = pci_resource_start(pdev, 0); bar0map_len = pci_resource_len(pdev, 0); phba->pci_bar2_map = pci_resource_start(pdev, 2); bar2map_len = pci_resource_len(pdev, 2); /* Map HBA SLIM to a kernel virtual address. */ phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); if (!phba->slim_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLIM memory.\n"); goto out; } /* Map HBA Control Registers to a kernel virtual address. */ phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); if (!phba->ctrl_regs_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for HBA control registers.\n"); goto out_iounmap_slim; } /* Allocate memory for SLI-2 structures */ phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, &phba->slim2p.phys, GFP_KERNEL); if (!phba->slim2p.virt) goto out_iounmap; phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); phba->mbox_ext = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx_ext_words)); phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); phba->IOCBs = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, IOCBs)); phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, lpfc_sli_hbq_size(), &phba->hbqslimp.phys, GFP_KERNEL); if (!phba->hbqslimp.virt) goto out_free_slim; hbq_count = lpfc_sli_hbq_count(); ptr = phba->hbqslimp.virt; for (i = 0; i < hbq_count; ++i) { phba->hbqs[i].hbq_virt = ptr; INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); ptr += (lpfc_hbq_defs[i]->entry_count * sizeof(struct lpfc_hbq_entry)); } phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); phba->MBslimaddr = phba->slim_memmap_p; phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; return 0; out_free_slim: dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p.virt, phba->slim2p.phys); out_iounmap: iounmap(phba->ctrl_regs_memmap_p); out_iounmap_slim: iounmap(phba->slim_memmap_p); out: return error; } /** * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the PCI device memory space for device * with SLI-3 interface spec. **/ static void lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) { struct pci_dev *pdev; /* Obtain PCI device reference */ if (!phba->pcidev) return; else pdev = phba->pcidev; /* Free coherent DMA memory allocated */ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, phba->hbqslimp.phys); dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p.virt, phba->slim2p.phys); /* I/O memory unmap */ iounmap(phba->ctrl_regs_memmap_p); iounmap(phba->slim_memmap_p); return; } /** * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status * @phba: pointer to lpfc hba data structure. * * This routine is invoked to wait for SLI4 device Power On Self Test (POST) * done and check status. * * Return 0 if successful, otherwise -ENODEV. **/ int lpfc_sli4_post_status_check(struct lpfc_hba *phba) { struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; struct lpfc_register reg_data; int i, port_error = 0; uint32_t if_type; memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); memset(&reg_data, 0, sizeof(reg_data)); if (!phba->sli4_hba.PSMPHRregaddr) return -ENODEV; /* Wait up to 30 seconds for the SLI Port POST done and ready */ for (i = 0; i < 3000; i++) { if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, &portsmphr_reg.word0) || (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { /* Port has a fatal POST error, break out */ port_error = -ENODEV; break; } if (LPFC_POST_STAGE_PORT_READY == bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) break; msleep(10); } /* * If there was a port error during POST, then don't proceed with * other register reads as the data may not be valid. Just exit. */ if (port_error) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1408 Port Failed POST - portsmphr=0x%x, " "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " "scr2=x%x, hscratch=x%x, pstatus=x%x\n", portsmphr_reg.word0, bf_get(lpfc_port_smphr_perr, &portsmphr_reg), bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), bf_get(lpfc_port_smphr_nip, &portsmphr_reg), bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); } else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2534 Device Info: SLIFamily=0x%x, " "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " "SLIHint_2=0x%x, FT=0x%x\n", bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf), bf_get(lpfc_sli_intf_slirev, &phba->sli4_hba.sli_intf), bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf), bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf), bf_get(lpfc_sli_intf_sli_hint2, &phba->sli4_hba.sli_intf), bf_get(lpfc_sli_intf_func_type, &phba->sli4_hba.sli_intf)); /* * Check for other Port errors during the initialization * process. Fail the load if the port did not come up * correctly. */ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); uerrlo_reg.word0 = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); uerrhi_reg.word0 = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1422 Unrecoverable Error " "Detected during POST " "uerr_lo_reg=0x%x, " "uerr_hi_reg=0x%x, " "ue_mask_lo_reg=0x%x, " "ue_mask_hi_reg=0x%x\n", uerrlo_reg.word0, uerrhi_reg.word0, phba->sli4_hba.ue_mask_lo, phba->sli4_hba.ue_mask_hi); port_error = -ENODEV; } break; case LPFC_SLI_INTF_IF_TYPE_2: case LPFC_SLI_INTF_IF_TYPE_6: /* Final checks. The port status should be clean. */ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &reg_data.word0) || lpfc_sli4_unrecoverable_port(&reg_data)) { phba->work_status[0] = readl(phba->sli4_hba.u.if_type2. ERR1regaddr); phba->work_status[1] = readl(phba->sli4_hba.u.if_type2. ERR2regaddr); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2888 Unrecoverable port error " "following POST: port status reg " "0x%x, port_smphr reg 0x%x, " "error 1=0x%x, error 2=0x%x\n", reg_data.word0, portsmphr_reg.word0, phba->work_status[0], phba->work_status[1]); port_error = -ENODEV; break; } if (lpfc_pldv_detect && bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_FAMILY_G6) pci_write_config_byte(phba->pcidev, LPFC_SLI_INTF, CFG_PLD); break; case LPFC_SLI_INTF_IF_TYPE_1: default: break; } } return port_error; } /** * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. * @phba: pointer to lpfc hba data structure. * @if_type: The SLI4 interface type getting configured. * * This routine is invoked to set up SLI4 BAR0 PCI config space register * memory map. **/ static void lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) { switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: phba->sli4_hba.u.if_type0.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; phba->sli4_hba.u.if_type0.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; phba->sli4_hba.u.if_type0.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; phba->sli4_hba.u.if_type0.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; break; case LPFC_SLI_INTF_IF_TYPE_2: phba->sli4_hba.u.if_type2.EQDregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_EQ_DELAY_OFFSET; phba->sli4_hba.u.if_type2.ERR1regaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER1_OFFSET; phba->sli4_hba.u.if_type2.ERR2regaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER2_OFFSET; phba->sli4_hba.u.if_type2.CTRLregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_CTL_OFFSET; phba->sli4_hba.u.if_type2.STATUSregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_STA_OFFSET; phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_SEM_OFFSET; phba->sli4_hba.RQDBregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_ULP0_RQ_DOORBELL; phba->sli4_hba.WQDBregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_ULP0_WQ_DOORBELL; phba->sli4_hba.CQDBregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; phba->sli4_hba.MQDBregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; phba->sli4_hba.BMBXregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; break; case LPFC_SLI_INTF_IF_TYPE_6: phba->sli4_hba.u.if_type2.EQDregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_EQ_DELAY_OFFSET; phba->sli4_hba.u.if_type2.ERR1regaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER1_OFFSET; phba->sli4_hba.u.if_type2.ERR2regaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER2_OFFSET; phba->sli4_hba.u.if_type2.CTRLregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_CTL_OFFSET; phba->sli4_hba.u.if_type2.STATUSregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_STA_OFFSET; phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_SEM_OFFSET; phba->sli4_hba.BMBXregaddr = phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; break; case LPFC_SLI_INTF_IF_TYPE_1: default: dev_printk(KERN_ERR, &phba->pcidev->dev, "FATAL - unsupported SLI4 interface type - %d\n", if_type); break; } } /** * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. * @phba: pointer to lpfc hba data structure. * @if_type: sli if type to operate on. * * This routine is invoked to set up SLI4 BAR1 register memory map. **/ static void lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) { switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_SLIPORT_IF0_SMPHR; phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_HST_ISR0; phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_HST_IMR0; phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_HST_ISCR0; break; case LPFC_SLI_INTF_IF_TYPE_6: phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + LPFC_IF6_RQ_DOORBELL; phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + LPFC_IF6_WQ_DOORBELL; phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + LPFC_IF6_CQ_DOORBELL; phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + LPFC_IF6_EQ_DOORBELL; phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + LPFC_IF6_MQ_DOORBELL; break; case LPFC_SLI_INTF_IF_TYPE_2: case LPFC_SLI_INTF_IF_TYPE_1: default: dev_err(&phba->pcidev->dev, "FATAL - unsupported SLI4 interface type - %d\n", if_type); break; } } /** * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. * @phba: pointer to lpfc hba data structure. * @vf: virtual function number * * This routine is invoked to set up SLI4 BAR2 doorbell register memory map * based on the given viftual function number, @vf. * * Return 0 if successful, otherwise -ENODEV. **/ static int lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) { if (vf > LPFC_VIR_FUNC_MAX) return -ENODEV; phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_ULP0_RQ_DOORBELL); phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_ULP0_WQ_DOORBELL); phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); return 0; } /** * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox * @phba: pointer to lpfc hba data structure. * * This routine is invoked to create the bootstrap mailbox * region consistent with the SLI-4 interface spec. This * routine allocates all memory necessary to communicate * mailbox commands to the port and sets up all alignment * needs. No locks are expected to be held when calling * this routine. * * Return codes * 0 - successful * -ENOMEM - could not allocated memory. **/ static int lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) { uint32_t bmbx_size; struct lpfc_dmabuf *dmabuf; struct dma_address *dma_address; uint32_t pa_addr; uint64_t phys_addr; dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) return -ENOMEM; /* * The bootstrap mailbox region is comprised of 2 parts * plus an alignment restriction of 16 bytes. */ bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); return -ENOMEM; } /* * Initialize the bootstrap mailbox pointers now so that the register * operations are simple later. The mailbox dma address is required * to be 16-byte aligned. Also align the virtual memory as each * maibox is copied into the bmbx mailbox region before issuing the * command to the port. */ phba->sli4_hba.bmbx.dmabuf = dmabuf; phba->sli4_hba.bmbx.bmbx_size = bmbx_size; phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, LPFC_ALIGN_16_BYTE); phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, LPFC_ALIGN_16_BYTE); /* * Set the high and low physical addresses now. The SLI4 alignment * requirement is 16 bytes and the mailbox is posted to the port * as two 30-bit addresses. The other data is a bit marking whether * the 30-bit address is the high or low address. * Upcast bmbx aphys to 64bits so shift instruction compiles * clean on 32 bit machines. */ dma_address = &phba->sli4_hba.bmbx.dma_address; phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | LPFC_BMBX_BIT1_ADDR_HI); pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | LPFC_BMBX_BIT1_ADDR_LO); return 0; } /** * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources * @phba: pointer to lpfc hba data structure. * * This routine is invoked to teardown the bootstrap mailbox * region and release all host resources. This routine requires * the caller to ensure all mailbox commands recovered, no * additional mailbox comands are sent, and interrupts are disabled * before calling this routine. * **/ static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) { dma_free_coherent(&phba->pcidev->dev, phba->sli4_hba.bmbx.bmbx_size, phba->sli4_hba.bmbx.dmabuf->virt, phba->sli4_hba.bmbx.dmabuf->phys); kfree(phba->sli4_hba.bmbx.dmabuf); memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); } static const char * const lpfc_topo_to_str[] = { "Loop then P2P", "Loopback", "P2P Only", "Unsupported", "Loop Only", "Unsupported", "P2P then Loop", }; #define LINK_FLAGS_DEF 0x0 #define LINK_FLAGS_P2P 0x1 #define LINK_FLAGS_LOOP 0x2 /** * lpfc_map_topology - Map the topology read from READ_CONFIG * @phba: pointer to lpfc hba data structure. * @rd_config: pointer to read config data * * This routine is invoked to map the topology values as read * from the read config mailbox command. If the persistent * topology feature is supported, the firmware will provide the * saved topology information to be used in INIT_LINK **/ static void lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) { u8 ptv, tf, pt; ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", ptv, tf, pt); if (!ptv) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2019 FW does not support persistent topology " "Using driver parameter defined value [%s]", lpfc_topo_to_str[phba->cfg_topology]); return; } /* FW supports persistent topology - override module parameter value */ phba->hba_flag |= HBA_PERSISTENT_TOPO; /* if ASIC_GEN_NUM >= 0xC) */ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_6) || (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_FAMILY_G6)) { if (!tf) { phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) ? FLAGS_TOPOLOGY_MODE_LOOP : FLAGS_TOPOLOGY_MODE_PT_PT); } else { phba->hba_flag &= ~HBA_PERSISTENT_TOPO; } } else { /* G5 */ if (tf) { /* If topology failover set - pt is '0' or '1' */ phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : FLAGS_TOPOLOGY_MODE_LOOP_PT); } else { phba->cfg_topology = ((pt == LINK_FLAGS_P2P) ? FLAGS_TOPOLOGY_MODE_PT_PT : FLAGS_TOPOLOGY_MODE_LOOP); } } if (phba->hba_flag & HBA_PERSISTENT_TOPO) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2020 Using persistent topology value [%s]", lpfc_topo_to_str[phba->cfg_topology]); } else { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2021 Invalid topology values from FW " "Using driver parameter defined value [%s]", lpfc_topo_to_str[phba->cfg_topology]); } } /** * lpfc_sli4_read_config - Get the config parameters. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to read the configuration parameters from the HBA. * The configuration parameters are used to set the base and maximum values * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource * allocation for the port. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_read_config(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmb; struct lpfc_mbx_read_config *rd_config; union lpfc_sli4_cfg_shdr *shdr; uint32_t shdr_status, shdr_add_status; struct lpfc_mbx_get_func_cfg *get_func_cfg; struct lpfc_rsrc_desc_fcfcoe *desc; char *pdesc_0; uint16_t forced_link_speed; uint32_t if_type, qmin, fawwpn; int length, i, rc = 0, rc2; pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2011 Unable to allocate memory for issuing " "SLI_CONFIG_SPECIAL mailbox command\n"); return -ENOMEM; } lpfc_read_config(phba, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2012 Mailbox failed , mbxCmd x%x " "READ_CONFIG, mbxStatus x%x\n", bf_get(lpfc_mqe_command, &pmb->u.mqe), bf_get(lpfc_mqe_status, &pmb->u.mqe)); rc = -EIO; } else { rd_config = &pmb->u.mqe.un.rd_config; if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; phba->sli4_hba.lnk_info.lnk_tp = bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); phba->sli4_hba.lnk_info.lnk_no = bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3081 lnk_type:%d, lnk_numb:%d\n", phba->sli4_hba.lnk_info.lnk_tp, phba->sli4_hba.lnk_info.lnk_no); } else lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3082 Mailbox (x%x) returned ldv:x0\n", bf_get(lpfc_mqe_command, &pmb->u.mqe)); if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { phba->bbcredit_support = 1; phba->sli4_hba.bbscn_params.word0 = rd_config->word8; } fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config); if (fawwpn) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_DISCOVERY, "2702 READ_CONFIG: FA-PWWN is " "configured on\n"); phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG; } else { /* Clear FW configured flag, preserve driver flag */ phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG; } phba->sli4_hba.conf_trunk = bf_get(lpfc_mbx_rd_conf_trunk, rd_config); phba->sli4_hba.extents_in_use = bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); phba->sli4_hba.max_cfg_param.max_xri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); /* Reduce resource usage in kdump environment */ if (is_kdump_kernel() && phba->sli4_hba.max_cfg_param.max_xri > 512) phba->sli4_hba.max_cfg_param.max_xri = 512; phba->sli4_hba.max_cfg_param.xri_base = bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); phba->sli4_hba.max_cfg_param.max_vpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); /* Limit the max we support */ if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; phba->sli4_hba.max_cfg_param.vpi_base = bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); phba->sli4_hba.max_cfg_param.max_rpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); phba->sli4_hba.max_cfg_param.rpi_base = bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); phba->sli4_hba.max_cfg_param.max_vfi = bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); phba->sli4_hba.max_cfg_param.vfi_base = bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); phba->sli4_hba.max_cfg_param.max_fcfi = bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); phba->sli4_hba.max_cfg_param.max_eq = bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); phba->sli4_hba.max_cfg_param.max_rq = bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); phba->sli4_hba.max_cfg_param.max_wq = bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); phba->sli4_hba.max_cfg_param.max_cq = bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; phba->max_vports = phba->max_vpi; /* Next decide on FPIN or Signal E2E CGN support * For congestion alarms and warnings valid combination are: * 1. FPIN alarms / FPIN warnings * 2. Signal alarms / Signal warnings * 3. FPIN alarms / Signal warnings * 4. Signal alarms / FPIN warnings * * Initialize the adapter frequency to 100 mSecs */ phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; if (lpfc_use_cgn_signal) { if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) { phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; } if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) { /* MUST support both alarm and warning * because EDC does not support alarm alone. */ if (phba->cgn_reg_signal != EDC_CG_SIG_WARN_ONLY) { /* Must support both or none */ phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; } else { phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; } } } /* Set the congestion initial signal and fpin values. */ phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; phba->cgn_init_reg_signal = phba->cgn_reg_signal; lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n", phba->cgn_reg_signal, phba->cgn_reg_fpin); lpfc_map_topology(phba, rd_config); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2003 cfg params Extents? %d " "XRI(B:%d M:%d), " "VPI(B:%d M:%d) " "VFI(B:%d M:%d) " "RPI(B:%d M:%d) " "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", phba->sli4_hba.extents_in_use, phba->sli4_hba.max_cfg_param.xri_base, phba->sli4_hba.max_cfg_param.max_xri, phba->sli4_hba.max_cfg_param.vpi_base, phba->sli4_hba.max_cfg_param.max_vpi, phba->sli4_hba.max_cfg_param.vfi_base, phba->sli4_hba.max_cfg_param.max_vfi, phba->sli4_hba.max_cfg_param.rpi_base, phba->sli4_hba.max_cfg_param.max_rpi, phba->sli4_hba.max_cfg_param.max_fcfi, phba->sli4_hba.max_cfg_param.max_eq, phba->sli4_hba.max_cfg_param.max_cq, phba->sli4_hba.max_cfg_param.max_wq, phba->sli4_hba.max_cfg_param.max_rq, phba->lmt); /* * Calculate queue resources based on how * many WQ/CQ/EQs are available. */ qmin = phba->sli4_hba.max_cfg_param.max_wq; if (phba->sli4_hba.max_cfg_param.max_cq < qmin) qmin = phba->sli4_hba.max_cfg_param.max_cq; /* * Reserve 4 (ELS, NVME LS, MBOX, plus one extra) and * the remainder can be used for NVME / FCP. */ qmin -= 4; if (phba->sli4_hba.max_cfg_param.max_eq < qmin) qmin = phba->sli4_hba.max_cfg_param.max_eq; /* Check to see if there is enough for default cfg */ if ((phba->cfg_irq_chann > qmin) || (phba->cfg_hdw_queue > qmin)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2005 Reducing Queues - " "FW resource limitation: " "WQ %d CQ %d EQ %d: min %d: " "IRQ %d HDWQ %d\n", phba->sli4_hba.max_cfg_param.max_wq, phba->sli4_hba.max_cfg_param.max_cq, phba->sli4_hba.max_cfg_param.max_eq, qmin, phba->cfg_irq_chann, phba->cfg_hdw_queue); if (phba->cfg_irq_chann > qmin) phba->cfg_irq_chann = qmin; if (phba->cfg_hdw_queue > qmin) phba->cfg_hdw_queue = qmin; } } if (rc) goto read_cfg_out; /* Update link speed if forced link speed is supported */ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { forced_link_speed = bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); if (forced_link_speed) { phba->hba_flag |= HBA_FORCED_LINK_SPEED; switch (forced_link_speed) { case LINK_SPEED_1G: phba->cfg_link_speed = LPFC_USER_LINK_SPEED_1G; break; case LINK_SPEED_2G: phba->cfg_link_speed = LPFC_USER_LINK_SPEED_2G; break; case LINK_SPEED_4G: phba->cfg_link_speed = LPFC_USER_LINK_SPEED_4G; break; case LINK_SPEED_8G: phba->cfg_link_speed = LPFC_USER_LINK_SPEED_8G; break; case LINK_SPEED_10G: phba->cfg_link_speed = LPFC_USER_LINK_SPEED_10G; break; case LINK_SPEED_16G: phba->cfg_link_speed = LPFC_USER_LINK_SPEED_16G; break; case LINK_SPEED_32G: phba->cfg_link_speed = LPFC_USER_LINK_SPEED_32G; break; case LINK_SPEED_64G: phba->cfg_link_speed = LPFC_USER_LINK_SPEED_64G; break; case 0xffff: phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0047 Unrecognized link " "speed : %d\n", forced_link_speed); phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; } } } /* Reset the DFT_HBA_Q_DEPTH to the max xri */ length = phba->sli4_hba.max_cfg_param.max_xri - lpfc_sli4_get_els_iocb_cnt(phba); if (phba->cfg_hba_queue_depth > length) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "3361 HBA queue depth changed from %d to %d\n", phba->cfg_hba_queue_depth, length); phba->cfg_hba_queue_depth = length; } if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2) goto read_cfg_out; /* get the pf# and vf# for SLI4 if_type 2 port */ length = (sizeof(struct lpfc_mbx_get_func_cfg) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, length, LPFC_SLI4_MBX_EMBED); rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &pmb->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (rc2 || shdr_status || shdr_add_status) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3026 Mailbox failed , mbxCmd x%x " "GET_FUNCTION_CONFIG, mbxStatus x%x\n", bf_get(lpfc_mqe_command, &pmb->u.mqe), bf_get(lpfc_mqe_status, &pmb->u.mqe)); goto read_cfg_out; } /* search for fc_fcoe resrouce descriptor */ get_func_cfg = &pmb->u.mqe.un.get_func_cfg; pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) goto read_cfg_out; for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); if (LPFC_RSRC_DESC_TYPE_FCFCOE == bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { phba->sli4_hba.iov.pf_number = bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); phba->sli4_hba.iov.vf_number = bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); break; } } if (i < LPFC_RSRC_DESC_MAX_NUM) lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3027 GET_FUNCTION_CONFIG: pf_number:%d, " "vf_number:%d\n", phba->sli4_hba.iov.pf_number, phba->sli4_hba.iov.vf_number); else lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3028 GET_FUNCTION_CONFIG: failed to find " "Resource Descriptor:x%x\n", LPFC_RSRC_DESC_TYPE_FCFCOE); read_cfg_out: mempool_free(pmb, phba->mbox_mem_pool); return rc; } /** * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to setup the port-side endian order when * the port if_type is 0. This routine has no function for other * if_types. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ static int lpfc_setup_endian_order(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; uint32_t if_type, rc = 0; uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, HOST_ENDIAN_HIGH_WORD1}; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0492 Unable to allocate memory for " "issuing SLI_CONFIG_SPECIAL mailbox " "command\n"); return -ENOMEM; } /* * The SLI4_CONFIG_SPECIAL mailbox command requires the first * two words to contain special data values and no other data. */ memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0493 SLI_CONFIG_SPECIAL mailbox " "failed with status x%x\n", rc); rc = -EIO; } mempool_free(mboxq, phba->mbox_mem_pool); break; case LPFC_SLI_INTF_IF_TYPE_6: case LPFC_SLI_INTF_IF_TYPE_2: case LPFC_SLI_INTF_IF_TYPE_1: default: break; } return rc; } /** * lpfc_sli4_queue_verify - Verify and update EQ counts * @phba: pointer to lpfc hba data structure. * * This routine is invoked to check the user settable queue counts for EQs. * After this routine is called the counts will be set to valid values that * adhere to the constraints of the system's interrupt vectors and the port's * queue resources. * * Return codes * 0 - successful * -ENOMEM - No available memory **/ static int lpfc_sli4_queue_verify(struct lpfc_hba *phba) { /* * Sanity check for configured queue parameters against the run-time * device parameters */ if (phba->nvmet_support) { if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", phba->cfg_hdw_queue, phba->cfg_irq_chann, phba->cfg_nvmet_mrq); /* Get EQ depth from module parameter, fake the default for now */ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; /* Get CQ depth from module parameter, fake the default for now */ phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; return 0; } static int lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) { struct lpfc_queue *qdesc; u32 wqesize; int cpu; cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); /* Create Fast Path IO CQs */ if (phba->enab_exp_wqcq_pages) /* Increase the CQ size when WQEs contain an embedded cdb */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, phba->sli4_hba.cq_esize, LPFC_CQE_EXP_COUNT, cpu); else qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0499 Failed allocate fast-path IO CQ (%d)\n", idx); return 1; } qdesc->qe_valid = 1; qdesc->hdwq = idx; qdesc->chann = cpu; phba->sli4_hba.hdwq[idx].io_cq = qdesc; /* Create Fast Path IO WQs */ if (phba->enab_exp_wqcq_pages) { /* Increase the WQ size when WQEs contain an embedded cdb */ wqesize = (phba->fcp_embed_io) ? LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, wqesize, LPFC_WQE_EXP_COUNT, cpu); } else qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.wq_esize, phba->sli4_hba.wq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0503 Failed allocate fast-path IO WQ (%d)\n", idx); return 1; } qdesc->hdwq = idx; qdesc->chann = cpu; phba->sli4_hba.hdwq[idx].io_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); return 0; } /** * lpfc_sli4_queue_create - Create all the SLI4 queues * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA * operation. For each SLI4 queue type, the parameters such as queue entry * count (queue depth) shall be taken from the module parameter. For now, * we just use some constant number as place holder. * * Return codes * 0 - successful * -ENOMEM - No availble memory * -EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_queue_create(struct lpfc_hba *phba) { struct lpfc_queue *qdesc; int idx, cpu, eqcpu; struct lpfc_sli4_hdw_queue *qp; struct lpfc_vector_map_info *cpup; struct lpfc_vector_map_info *eqcpup; struct lpfc_eq_intr_info *eqi; /* * Create HBA Record arrays. * Both NVME and FCP will share that same vectors / EQs */ phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; if (!phba->sli4_hba.hdwq) { phba->sli4_hba.hdwq = kcalloc( phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), GFP_KERNEL); if (!phba->sli4_hba.hdwq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6427 Failed allocate memory for " "fast-path Hardware Queue array\n"); goto out_error; } /* Prepare hardware queues to take IO buffers */ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; spin_lock_init(&qp->io_buf_list_get_lock); spin_lock_init(&qp->io_buf_list_put_lock); INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); qp->get_io_bufs = 0; qp->put_io_bufs = 0; qp->total_io_bufs = 0; spin_lock_init(&qp->abts_io_buf_list_lock); INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); qp->abts_scsi_io_bufs = 0; qp->abts_nvme_io_bufs = 0; INIT_LIST_HEAD(&qp->sgl_list); INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); spin_lock_init(&qp->hdwq_lock); } } if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { if (phba->nvmet_support) { phba->sli4_hba.nvmet_cqset = kcalloc( phba->cfg_nvmet_mrq, sizeof(struct lpfc_queue *), GFP_KERNEL); if (!phba->sli4_hba.nvmet_cqset) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3121 Fail allocate memory for " "fast-path CQ set array\n"); goto out_error; } phba->sli4_hba.nvmet_mrq_hdr = kcalloc( phba->cfg_nvmet_mrq, sizeof(struct lpfc_queue *), GFP_KERNEL); if (!phba->sli4_hba.nvmet_mrq_hdr) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3122 Fail allocate memory for " "fast-path RQ set hdr array\n"); goto out_error; } phba->sli4_hba.nvmet_mrq_data = kcalloc( phba->cfg_nvmet_mrq, sizeof(struct lpfc_queue *), GFP_KERNEL); if (!phba->sli4_hba.nvmet_mrq_data) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3124 Fail allocate memory for " "fast-path RQ set data array\n"); goto out_error; } } } INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); /* Create HBA Event Queues (EQs) */ for_each_present_cpu(cpu) { /* We only want to create 1 EQ per vector, even though * multiple CPUs might be using that vector. so only * selects the CPUs that are LPFC_CPU_FIRST_IRQ. */ cpup = &phba->sli4_hba.cpu_map[cpu]; if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) continue; /* Get a ptr to the Hardware Queue associated with this CPU */ qp = &phba->sli4_hba.hdwq[cpup->hdwq]; /* Allocate an EQ */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0497 Failed allocate EQ (%d)\n", cpup->hdwq); goto out_error; } qdesc->qe_valid = 1; qdesc->hdwq = cpup->hdwq; qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ qdesc->last_cpu = qdesc->chann; /* Save the allocated EQ in the Hardware Queue */ qp->hba_eq = qdesc; eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); list_add(&qdesc->cpu_list, &eqi->list); } /* Now we need to populate the other Hardware Queues, that share * an IRQ vector, with the associated EQ ptr. */ for_each_present_cpu(cpu) { cpup = &phba->sli4_hba.cpu_map[cpu]; /* Check for EQ already allocated in previous loop */ if (cpup->flag & LPFC_CPU_FIRST_IRQ) continue; /* Check for multiple CPUs per hdwq */ qp = &phba->sli4_hba.hdwq[cpup->hdwq]; if (qp->hba_eq) continue; /* We need to share an EQ for this hdwq */ eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; } /* Allocate IO Path SLI4 CQ/WQs */ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { if (lpfc_alloc_io_wq_cq(phba, idx)) goto out_error; } if (phba->nvmet_support) { for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3142 Failed allocate NVME " "CQ Set (%d)\n", idx); goto out_error; } qdesc->qe_valid = 1; qdesc->hdwq = idx; qdesc->chann = cpu; phba->sli4_hba.nvmet_cqset[idx] = qdesc; } } /* * Create Slow Path Completion Queues (CQs) */ cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); /* Create slow-path Mailbox Command Complete Queue */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0500 Failed allocate slow-path mailbox CQ\n"); goto out_error; } qdesc->qe_valid = 1; phba->sli4_hba.mbx_cq = qdesc; /* Create slow-path ELS Complete Queue */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0501 Failed allocate slow-path ELS CQ\n"); goto out_error; } qdesc->qe_valid = 1; qdesc->chann = cpu; phba->sli4_hba.els_cq = qdesc; /* * Create Slow Path Work Queues (WQs) */ /* Create Mailbox Command Queue */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.mq_esize, phba->sli4_hba.mq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0505 Failed allocate slow-path MQ\n"); goto out_error; } qdesc->chann = cpu; phba->sli4_hba.mbx_wq = qdesc; /* * Create ELS Work Queues */ /* Create slow-path ELS Work Queue */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.wq_esize, phba->sli4_hba.wq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0504 Failed allocate slow-path ELS WQ\n"); goto out_error; } qdesc->chann = cpu; phba->sli4_hba.els_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { /* Create NVME LS Complete Queue */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6079 Failed allocate NVME LS CQ\n"); goto out_error; } qdesc->chann = cpu; qdesc->qe_valid = 1; phba->sli4_hba.nvmels_cq = qdesc; /* Create NVME LS Work Queue */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.wq_esize, phba->sli4_hba.wq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6080 Failed allocate NVME LS WQ\n"); goto out_error; } qdesc->chann = cpu; phba->sli4_hba.nvmels_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); } /* * Create Receive Queue (RQ) */ /* Create Receive Queue for header */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.rq_esize, phba->sli4_hba.rq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0506 Failed allocate receive HRQ\n"); goto out_error; } phba->sli4_hba.hdr_rq = qdesc; /* Create Receive Queue for data */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.rq_esize, phba->sli4_hba.rq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0507 Failed allocate receive DRQ\n"); goto out_error; } phba->sli4_hba.dat_rq = qdesc; if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && phba->nvmet_support) { for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); /* Create NVMET Receive Queue for header */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.rq_esize, LPFC_NVMET_RQE_DEF_COUNT, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3146 Failed allocate " "receive HRQ\n"); goto out_error; } qdesc->hdwq = idx; phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; /* Only needed for header of RQ pair */ qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), GFP_KERNEL, cpu_to_node(cpu)); if (qdesc->rqbp == NULL) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6131 Failed allocate " "Header RQBP\n"); goto out_error; } /* Put list in known state in case driver load fails. */ INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); /* Create NVMET Receive Queue for data */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.rq_esize, LPFC_NVMET_RQE_DEF_COUNT, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3156 Failed allocate " "receive DRQ\n"); goto out_error; } qdesc->hdwq = idx; phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; } } /* Clear NVME stats */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); } } /* Clear SCSI stats */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); } } return 0; out_error: lpfc_sli4_queue_destroy(phba); return -ENOMEM; } static inline void __lpfc_sli4_release_queue(struct lpfc_queue **qp) { if (*qp != NULL) { lpfc_sli4_queue_free(*qp); *qp = NULL; } } static inline void lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) { int idx; if (*qs == NULL) return; for (idx = 0; idx < max; idx++) __lpfc_sli4_release_queue(&(*qs)[idx]); kfree(*qs); *qs = NULL; } static inline void lpfc_sli4_release_hdwq(struct lpfc_hba *phba) { struct lpfc_sli4_hdw_queue *hdwq; struct lpfc_queue *eq; uint32_t idx; hdwq = phba->sli4_hba.hdwq; /* Loop thru all Hardware Queues */ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { /* Free the CQ/WQ corresponding to the Hardware Queue */ lpfc_sli4_queue_free(hdwq[idx].io_cq); lpfc_sli4_queue_free(hdwq[idx].io_wq); hdwq[idx].hba_eq = NULL; hdwq[idx].io_cq = NULL; hdwq[idx].io_wq = NULL; if (phba->cfg_xpsgl && !phba->nvmet_support) lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); } /* Loop thru all IRQ vectors */ for (idx = 0; idx < phba->cfg_irq_chann; idx++) { /* Free the EQ corresponding to the IRQ vector */ eq = phba->sli4_hba.hba_eq_hdl[idx].eq; lpfc_sli4_queue_free(eq); phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; } } /** * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues * @phba: pointer to lpfc hba data structure. * * This routine is invoked to release all the SLI4 queues with the FCoE HBA * operation. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ void lpfc_sli4_queue_destroy(struct lpfc_hba *phba) { /* * Set FREE_INIT before beginning to free the queues. * Wait until the users of queues to acknowledge to * release queues by clearing FREE_WAIT. */ spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { spin_unlock_irq(&phba->hbalock); msleep(20); spin_lock_irq(&phba->hbalock); } spin_unlock_irq(&phba->hbalock); lpfc_sli4_cleanup_poll_list(phba); /* Release HBA eqs */ if (phba->sli4_hba.hdwq) lpfc_sli4_release_hdwq(phba); if (phba->nvmet_support) { lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, phba->cfg_nvmet_mrq); lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, phba->cfg_nvmet_mrq); lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, phba->cfg_nvmet_mrq); } /* Release mailbox command work queue */ __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); /* Release ELS work queue */ __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); /* Release ELS work queue */ __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); /* Release unsolicited receive queue */ __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); /* Release ELS complete queue */ __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); /* Release NVME LS complete queue */ __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); /* Release mailbox command complete queue */ __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); /* Everything on this list has been freed */ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); /* Done with freeing the queues */ spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; spin_unlock_irq(&phba->hbalock); } int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) { struct lpfc_rqb *rqbp; struct lpfc_dmabuf *h_buf; struct rqb_dmabuf *rqb_buffer; rqbp = rq->rqbp; while (!list_empty(&rqbp->rqb_buffer_list)) { list_remove_head(&rqbp->rqb_buffer_list, h_buf, struct lpfc_dmabuf, list); rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); (rqbp->rqb_free_buffer)(phba, rqb_buffer); rqbp->buffer_count--; } return 1; } static int lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, int qidx, uint32_t qtype) { struct lpfc_sli_ring *pring; int rc; if (!eq || !cq || !wq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6085 Fast-path %s (%d) not allocated\n", ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); return -ENOMEM; } /* create the Cq first */ rc = lpfc_cq_create(phba, cq, eq, (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6086 Failed setup of CQ (%d), rc = 0x%x\n", qidx, (uint32_t)rc); return rc; } if (qtype != LPFC_MBOX) { /* Setup cq_map for fast lookup */ if (cq_map) *cq_map = cq->queue_id; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", qidx, cq->queue_id, qidx, eq->queue_id); /* create the wq */ rc = lpfc_wq_create(phba, wq, cq, qtype); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", qidx, (uint32_t)rc); /* no need to tear down cq - caller will do so */ return rc; } /* Bind this CQ/WQ to the NVME ring */ pring = wq->pring; pring->sli.sli4.wqp = (void *)wq; cq->pring = pring; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); } else { rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0539 Failed setup of slow-path MQ: " "rc = 0x%x\n", rc); /* no need to tear down cq - caller will do so */ return rc; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", phba->sli4_hba.mbx_wq->queue_id, phba->sli4_hba.mbx_cq->queue_id); } return 0; } /** * lpfc_setup_cq_lookup - Setup the CQ lookup table * @phba: pointer to lpfc hba data structure. * * This routine will populate the cq_lookup table by all * available CQ queue_id's. **/ static void lpfc_setup_cq_lookup(struct lpfc_hba *phba) { struct lpfc_queue *eq, *childq; int qidx; memset(phba->sli4_hba.cq_lookup, 0, (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); /* Loop thru all IRQ vectors */ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { /* Get the EQ corresponding to the IRQ vector */ eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; if (!eq) continue; /* Loop through all CQs associated with that EQ */ list_for_each_entry(childq, &eq->child_list, list) { if (childq->queue_id > phba->sli4_hba.cq_max) continue; if (childq->subtype == LPFC_IO) phba->sli4_hba.cq_lookup[childq->queue_id] = childq; } } } /** * lpfc_sli4_queue_setup - Set up all the SLI4 queues * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up all the SLI4 queues for the FCoE HBA * operation. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_queue_setup(struct lpfc_hba *phba) { uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; struct lpfc_vector_map_info *cpup; struct lpfc_sli4_hdw_queue *qp; LPFC_MBOXQ_t *mboxq; int qidx, cpu; uint32_t length, usdelay; int rc = -ENOMEM; /* Check for dual-ULP support */ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3249 Unable to allocate memory for " "QUERY_FW_CFG mailbox command\n"); return -ENOMEM; } length = (sizeof(struct lpfc_mbx_query_fw_config) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_QUERY_FW_CFG, length, LPFC_SLI4_MBX_EMBED); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3250 QUERY_FW_CFG mailbox failed with status " "x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); mempool_free(mboxq, phba->mbox_mem_pool); rc = -ENXIO; goto out_error; } phba->sli4_hba.fw_func_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; phba->sli4_hba.physical_port = mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); mempool_free(mboxq, phba->mbox_mem_pool); /* * Set up HBA Event Queues (EQs) */ qp = phba->sli4_hba.hdwq; /* Set up HBA event queue */ if (!qp) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3147 Fast-path EQs not allocated\n"); rc = -ENOMEM; goto out_error; } /* Loop thru all IRQ vectors */ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { /* Create HBA Event Queues (EQs) in order */ for_each_present_cpu(cpu) { cpup = &phba->sli4_hba.cpu_map[cpu]; /* Look for the CPU thats using that vector with * LPFC_CPU_FIRST_IRQ set. */ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) continue; if (qidx != cpup->eq) continue; /* Create an EQ for that vector */ rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, phba->cfg_fcp_imax); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0523 Failed setup of fast-path" " EQ (%d), rc = 0x%x\n", cpup->eq, (uint32_t)rc); goto out_destroy; } /* Save the EQ for that vector in the hba_eq_hdl */ phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = qp[cpup->hdwq].hba_eq; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2584 HBA EQ setup: queue[%d]-id=%d\n", cpup->eq, qp[cpup->hdwq].hba_eq->queue_id); } } /* Loop thru all Hardware Queues */ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); cpup = &phba->sli4_hba.cpu_map[cpu]; /* Create the CQ/WQ corresponding to the Hardware Queue */ rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, qp[qidx].io_cq, qp[qidx].io_wq, &phba->sli4_hba.hdwq[qidx].io_cq_map, qidx, LPFC_IO); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0535 Failed to setup fastpath " "IO WQ/CQ (%d), rc = 0x%x\n", qidx, (uint32_t)rc); goto out_destroy; } } /* * Set up Slow Path Complete Queues (CQs) */ /* Set up slow-path MBOX CQ/MQ */ if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0528 %s not allocated\n", phba->sli4_hba.mbx_cq ? "Mailbox WQ" : "Mailbox CQ"); rc = -ENOMEM; goto out_destroy; } rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, phba->sli4_hba.mbx_cq, phba->sli4_hba.mbx_wq, NULL, 0, LPFC_MBOX); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } if (phba->nvmet_support) { if (!phba->sli4_hba.nvmet_cqset) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3165 Fast-path NVME CQ Set " "array not allocated\n"); rc = -ENOMEM; goto out_destroy; } if (phba->cfg_nvmet_mrq > 1) { rc = lpfc_cq_create_set(phba, phba->sli4_hba.nvmet_cqset, qp, LPFC_WCQ, LPFC_NVMET); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3164 Failed setup of NVME CQ " "Set, rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } } else { /* Set up NVMET Receive Complete Queue */ rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], qp[0].hba_eq, LPFC_WCQ, LPFC_NVMET); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6089 Failed setup NVMET CQ: " "rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } phba->sli4_hba.nvmet_cqset[0]->chann = 0; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "6090 NVMET CQ setup: cq-id=%d, " "parent eq-id=%d\n", phba->sli4_hba.nvmet_cqset[0]->queue_id, qp[0].hba_eq->queue_id); } } /* Set up slow-path ELS WQ/CQ */ if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0530 ELS %s not allocated\n", phba->sli4_hba.els_cq ? "WQ" : "CQ"); rc = -ENOMEM; goto out_destroy; } rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, phba->sli4_hba.els_cq, phba->sli4_hba.els_wq, NULL, 0, LPFC_ELS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", phba->sli4_hba.els_wq->queue_id, phba->sli4_hba.els_cq->queue_id); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { /* Set up NVME LS Complete Queue */ if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6091 LS %s not allocated\n", phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); rc = -ENOMEM; goto out_destroy; } rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, phba->sli4_hba.nvmels_cq, phba->sli4_hba.nvmels_wq, NULL, 0, LPFC_NVME_LS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0526 Failed setup of NVVME LS WQ/CQ: " "rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "6096 ELS WQ setup: wq-id=%d, " "parent cq-id=%d\n", phba->sli4_hba.nvmels_wq->queue_id, phba->sli4_hba.nvmels_cq->queue_id); } /* * Create NVMET Receive Queue (RQ) */ if (phba->nvmet_support) { if ((!phba->sli4_hba.nvmet_cqset) || (!phba->sli4_hba.nvmet_mrq_hdr) || (!phba->sli4_hba.nvmet_mrq_data)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6130 MRQ CQ Queues not " "allocated\n"); rc = -ENOMEM; goto out_destroy; } if (phba->cfg_nvmet_mrq > 1) { rc = lpfc_mrq_create(phba, phba->sli4_hba.nvmet_mrq_hdr, phba->sli4_hba.nvmet_mrq_data, phba->sli4_hba.nvmet_cqset, LPFC_NVMET); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6098 Failed setup of NVMET " "MRQ: rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } } else { rc = lpfc_rq_create(phba, phba->sli4_hba.nvmet_mrq_hdr[0], phba->sli4_hba.nvmet_mrq_data[0], phba->sli4_hba.nvmet_cqset[0], LPFC_NVMET); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6057 Failed setup of NVMET " "Receive Queue: rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } lpfc_printf_log( phba, KERN_INFO, LOG_INIT, "6099 NVMET RQ setup: hdr-rq-id=%d, " "dat-rq-id=%d parent cq-id=%d\n", phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, phba->sli4_hba.nvmet_mrq_data[0]->queue_id, phba->sli4_hba.nvmet_cqset[0]->queue_id); } } if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0540 Receive Queue not allocated\n"); rc = -ENOMEM; goto out_destroy; } rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, phba->sli4_hba.els_cq, LPFC_USOL); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0541 Failed setup of Receive Queue: " "rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " "parent cq-id=%d\n", phba->sli4_hba.hdr_rq->queue_id, phba->sli4_hba.dat_rq->queue_id, phba->sli4_hba.els_cq->queue_id); if (phba->cfg_fcp_imax) usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; else usdelay = 0; for (qidx = 0; qidx < phba->cfg_irq_chann; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, usdelay); if (phba->sli4_hba.cq_max) { kfree(phba->sli4_hba.cq_lookup); phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), sizeof(struct lpfc_queue *), GFP_KERNEL); if (!phba->sli4_hba.cq_lookup) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0549 Failed setup of CQ Lookup table: " "size 0x%x\n", phba->sli4_hba.cq_max); rc = -ENOMEM; goto out_destroy; } lpfc_setup_cq_lookup(phba); } return 0; out_destroy: lpfc_sli4_queue_unset(phba); out_error: return rc; } /** * lpfc_sli4_queue_unset - Unset all the SLI4 queues * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset all the SLI4 queues with the FCoE HBA * operation. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ void lpfc_sli4_queue_unset(struct lpfc_hba *phba) { struct lpfc_sli4_hdw_queue *qp; struct lpfc_queue *eq; int qidx; /* Unset mailbox command work queue */ if (phba->sli4_hba.mbx_wq) lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); /* Unset NVME LS work queue */ if (phba->sli4_hba.nvmels_wq) lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); /* Unset ELS work queue */ if (phba->sli4_hba.els_wq) lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); /* Unset unsolicited receive queue */ if (phba->sli4_hba.hdr_rq) lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); /* Unset mailbox command complete queue */ if (phba->sli4_hba.mbx_cq) lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); /* Unset ELS complete queue */ if (phba->sli4_hba.els_cq) lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); /* Unset NVME LS complete queue */ if (phba->sli4_hba.nvmels_cq) lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); if (phba->nvmet_support) { /* Unset NVMET MRQ queue */ if (phba->sli4_hba.nvmet_mrq_hdr) { for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) lpfc_rq_destroy( phba, phba->sli4_hba.nvmet_mrq_hdr[qidx], phba->sli4_hba.nvmet_mrq_data[qidx]); } /* Unset NVMET CQ Set complete queue */ if (phba->sli4_hba.nvmet_cqset) { for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) lpfc_cq_destroy( phba, phba->sli4_hba.nvmet_cqset[qidx]); } } /* Unset fast-path SLI4 queues */ if (phba->sli4_hba.hdwq) { /* Loop thru all Hardware Queues */ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { /* Destroy the CQ/WQ corresponding to Hardware Queue */ qp = &phba->sli4_hba.hdwq[qidx]; lpfc_wq_destroy(phba, qp->io_wq); lpfc_cq_destroy(phba, qp->io_cq); } /* Loop thru all IRQ vectors */ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { /* Destroy the EQ corresponding to the IRQ vector */ eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; lpfc_eq_destroy(phba, eq); } } kfree(phba->sli4_hba.cq_lookup); phba->sli4_hba.cq_lookup = NULL; phba->sli4_hba.cq_max = 0; } /** * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool * @phba: pointer to lpfc hba data structure. * * This routine is invoked to allocate and set up a pool of completion queue * events. The body of the completion queue event is a completion queue entry * CQE. For now, this pool is used for the interrupt service routine to queue * the following HBA completion queue events for the worker thread to process: * - Mailbox asynchronous events * - Receive queue completion unsolicited events * Later, this can be used for all the slow-path events. * * Return codes * 0 - successful * -ENOMEM - No available memory **/ static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; int i; for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); if (!cq_event) goto out_pool_create_fail; list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); } return 0; out_pool_create_fail: lpfc_sli4_cq_event_pool_destroy(phba); return -ENOMEM; } /** * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool * @phba: pointer to lpfc hba data structure. * * This routine is invoked to free the pool of completion queue events at * driver unload time. Note that, it is the responsibility of the driver * cleanup routine to free all the outstanding completion-queue events * allocated from this pool back into the pool before invoking this routine * to destroy the pool. **/ static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event, *next_cq_event; list_for_each_entry_safe(cq_event, next_cq_event, &phba->sli4_hba.sp_cqe_event_pool, list) { list_del(&cq_event->list); kfree(cq_event); } } /** * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool * @phba: pointer to lpfc hba data structure. * * This routine is the lock free version of the API invoked to allocate a * completion-queue event from the free pool. * * Return: Pointer to the newly allocated completion-queue event if successful * NULL otherwise. **/ struct lpfc_cq_event * __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event = NULL; list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, struct lpfc_cq_event, list); return cq_event; } /** * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool * @phba: pointer to lpfc hba data structure. * * This routine is the lock version of the API invoked to allocate a * completion-queue event from the free pool. * * Return: Pointer to the newly allocated completion-queue event if successful * NULL otherwise. **/ struct lpfc_cq_event * lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; unsigned long iflags; spin_lock_irqsave(&phba->hbalock, iflags); cq_event = __lpfc_sli4_cq_event_alloc(phba); spin_unlock_irqrestore(&phba->hbalock, iflags); return cq_event; } /** * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool * @phba: pointer to lpfc hba data structure. * @cq_event: pointer to the completion queue event to be freed. * * This routine is the lock free version of the API invoked to release a * completion-queue event back into the free pool. **/ void __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, struct lpfc_cq_event *cq_event) { list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); } /** * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool * @phba: pointer to lpfc hba data structure. * @cq_event: pointer to the completion queue event to be freed. * * This routine is the lock version of the API invoked to release a * completion-queue event back into the free pool. **/ void lpfc_sli4_cq_event_release(struct lpfc_hba *phba, struct lpfc_cq_event *cq_event) { unsigned long iflags; spin_lock_irqsave(&phba->hbalock, iflags); __lpfc_sli4_cq_event_release(phba, cq_event); spin_unlock_irqrestore(&phba->hbalock, iflags); } /** * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool * @phba: pointer to lpfc hba data structure. * * This routine is to free all the pending completion-queue events to the * back into the free pool for device reset. **/ static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) { LIST_HEAD(cq_event_list); struct lpfc_cq_event *cq_event; unsigned long iflags; /* Retrieve all the pending WCQEs from pending WCQE lists */ /* Pending ELS XRI abort events */ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, &cq_event_list); spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); /* Pending asynnc events */ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, &cq_event_list); spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); while (!list_empty(&cq_event_list)) { list_remove_head(&cq_event_list, cq_event, struct lpfc_cq_event, list); lpfc_sli4_cq_event_release(phba, cq_event); } } /** * lpfc_pci_function_reset - Reset pci function. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to request a PCI function reset. It will destroys * all resources assigned to the PCI function which originates this request. * * Return codes * 0 - successful * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ int lpfc_pci_function_reset(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; uint32_t rc = 0, if_type; uint32_t shdr_status, shdr_add_status; uint32_t rdy_chk; uint32_t port_reset = 0; union lpfc_sli4_cfg_shdr *shdr; struct lpfc_register reg_data; uint16_t devid; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0494 Unable to allocate memory for " "issuing SLI_FUNCTION_RESET mailbox " "command\n"); return -ENOMEM; } /* Setup PCI function reset mailbox-ioctl command */ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, LPFC_SLI4_MBX_EMBED); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); mempool_free(mboxq, phba->mbox_mem_pool); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0495 SLI_FUNCTION_RESET mailbox " "failed with status x%x add_status x%x," " mbx status x%x\n", shdr_status, shdr_add_status, rc); rc = -ENXIO; } break; case LPFC_SLI_INTF_IF_TYPE_2: case LPFC_SLI_INTF_IF_TYPE_6: wait: /* * Poll the Port Status Register and wait for RDY for * up to 30 seconds. If the port doesn't respond, treat * it as an error. */ for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { if (lpfc_readl(phba->sli4_hba.u.if_type2. STATUSregaddr, &reg_data.word0)) { rc = -ENODEV; goto out; } if (bf_get(lpfc_sliport_status_rdy, &reg_data)) break; msleep(20); } if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) { phba->work_status[0] = readl( phba->sli4_hba.u.if_type2.ERR1regaddr); phba->work_status[1] = readl( phba->sli4_hba.u.if_type2.ERR2regaddr); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2890 Port not ready, port status reg " "0x%x error 1=0x%x, error 2=0x%x\n", reg_data.word0, phba->work_status[0], phba->work_status[1]); rc = -ENODEV; goto out; } if (bf_get(lpfc_sliport_status_pldv, &reg_data)) lpfc_pldv_detect = true; if (!port_reset) { /* * Reset the port now */ reg_data.word0 = 0; bf_set(lpfc_sliport_ctrl_end, &reg_data, LPFC_SLIPORT_LITTLE_ENDIAN); bf_set(lpfc_sliport_ctrl_ip, &reg_data, LPFC_SLIPORT_INIT_PORT); writel(reg_data.word0, phba->sli4_hba.u.if_type2. CTRLregaddr); /* flush */ pci_read_config_word(phba->pcidev, PCI_DEVICE_ID, &devid); port_reset = 1; msleep(20); goto wait; } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) { rc = -ENODEV; goto out; } break; case LPFC_SLI_INTF_IF_TYPE_1: default: break; } out: /* Catch the not-ready port failure after a port reset. */ if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3317 HBA not functional: IP Reset Failed " "try: echo fw_reset > board_mode\n"); rc = -ENODEV; } return rc; } /** * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to set up the PCI device memory space for device * with SLI-4 interface spec. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) { struct pci_dev *pdev = phba->pcidev; unsigned long bar0map_len, bar1map_len, bar2map_len; int error; uint32_t if_type; if (!pdev) return -ENODEV; /* Set the device DMA mask size */ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (error) error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (error) return error; /* * The BARs and register set definitions and offset locations are * dependent on the if_type. */ if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &phba->sli4_hba.sli_intf.word0)) { return -ENODEV; } /* There is no SLI3 failback for SLI4 devices. */ if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_VALID) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2894 SLI_INTF reg contents invalid " "sli_intf reg 0x%x\n", phba->sli4_hba.sli_intf.word0); return -ENODEV; } if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); /* * Get the bus address of SLI4 device Bar regions and the * number of bytes required by each mapping. The mapping of the * particular PCI BARs regions is dependent on the type of * SLI4 device. */ if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); /* * Map SLI4 PCI Config Space Register base to a kernel virtual * addr */ phba->sli4_hba.conf_regs_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); if (!phba->sli4_hba.conf_regs_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 PCI config " "registers.\n"); return -ENODEV; } phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; /* Set up BAR0 PCI config space register memory map */ lpfc_sli4_bar0_register_memmap(phba, if_type); } else { phba->pci_bar0_map = pci_resource_start(pdev, 1); bar0map_len = pci_resource_len(pdev, 1); if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { dev_printk(KERN_ERR, &pdev->dev, "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); return -ENODEV; } phba->sli4_hba.conf_regs_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); if (!phba->sli4_hba.conf_regs_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 PCI config " "registers.\n"); return -ENODEV; } lpfc_sli4_bar0_register_memmap(phba, if_type); } if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { /* * Map SLI4 if type 0 HBA Control Register base to a * kernel virtual address and setup the registers. */ phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); phba->sli4_hba.ctrl_regs_memmap_p = ioremap(phba->pci_bar1_map, bar1map_len); if (!phba->sli4_hba.ctrl_regs_memmap_p) { dev_err(&pdev->dev, "ioremap failed for SLI4 HBA " "control registers.\n"); error = -ENOMEM; goto out_iounmap_conf; } phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; lpfc_sli4_bar1_register_memmap(phba, if_type); } else { error = -ENOMEM; goto out_iounmap_conf; } } if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && (pci_resource_start(pdev, PCI_64BIT_BAR2))) { /* * Map SLI4 if type 6 HBA Doorbell Register base to a kernel * virtual address and setup the registers. */ phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); phba->sli4_hba.drbl_regs_memmap_p = ioremap(phba->pci_bar1_map, bar1map_len); if (!phba->sli4_hba.drbl_regs_memmap_p) { dev_err(&pdev->dev, "ioremap failed for SLI4 HBA doorbell registers.\n"); error = -ENOMEM; goto out_iounmap_conf; } phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; lpfc_sli4_bar1_register_memmap(phba, if_type); } if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { /* * Map SLI4 if type 0 HBA Doorbell Register base to * a kernel virtual address and setup the registers. */ phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); phba->sli4_hba.drbl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); if (!phba->sli4_hba.drbl_regs_memmap_p) { dev_err(&pdev->dev, "ioremap failed for SLI4 HBA" " doorbell registers.\n"); error = -ENOMEM; goto out_iounmap_ctrl; } phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); if (error) goto out_iounmap_all; } else { error = -ENOMEM; goto out_iounmap_ctrl; } } if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && pci_resource_start(pdev, PCI_64BIT_BAR4)) { /* * Map SLI4 if type 6 HBA DPP Register base to a kernel * virtual address and setup the registers. */ phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); phba->sli4_hba.dpp_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); if (!phba->sli4_hba.dpp_regs_memmap_p) { dev_err(&pdev->dev, "ioremap failed for SLI4 HBA dpp registers.\n"); error = -ENOMEM; goto out_iounmap_all; } phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; } /* Set up the EQ/CQ register handeling functions now */ switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: case LPFC_SLI_INTF_IF_TYPE_2: phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; break; case LPFC_SLI_INTF_IF_TYPE_6: phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; break; default: break; } return 0; out_iounmap_all: if (phba->sli4_hba.drbl_regs_memmap_p) iounmap(phba->sli4_hba.drbl_regs_memmap_p); out_iounmap_ctrl: if (phba->sli4_hba.ctrl_regs_memmap_p) iounmap(phba->sli4_hba.ctrl_regs_memmap_p); out_iounmap_conf: iounmap(phba->sli4_hba.conf_regs_memmap_p); return error; } /** * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the PCI device memory space for device * with SLI-4 interface spec. **/ static void lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) { uint32_t if_type; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: iounmap(phba->sli4_hba.drbl_regs_memmap_p); iounmap(phba->sli4_hba.ctrl_regs_memmap_p); iounmap(phba->sli4_hba.conf_regs_memmap_p); break; case LPFC_SLI_INTF_IF_TYPE_2: iounmap(phba->sli4_hba.conf_regs_memmap_p); break; case LPFC_SLI_INTF_IF_TYPE_6: iounmap(phba->sli4_hba.drbl_regs_memmap_p); iounmap(phba->sli4_hba.conf_regs_memmap_p); if (phba->sli4_hba.dpp_regs_memmap_p) iounmap(phba->sli4_hba.dpp_regs_memmap_p); break; case LPFC_SLI_INTF_IF_TYPE_1: break; default: dev_printk(KERN_ERR, &phba->pcidev->dev, "FATAL - unsupported SLI4 interface type - %d\n", if_type); break; } } /** * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device * with SLI-3 interface specs. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli_enable_msix(struct lpfc_hba *phba) { int rc; LPFC_MBOXQ_t *pmb; /* Set up MSI-X multi-message vectors */ rc = pci_alloc_irq_vectors(phba->pcidev, LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); if (rc < 0) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0420 PCI enable MSI-X failed (%d)\n", rc); goto vec_fail_out; } /* * Assign MSI-X vectors to interrupt handlers */ /* vector-0 is associated to slow-path handler */ rc = request_irq(pci_irq_vector(phba->pcidev, 0), &lpfc_sli_sp_intr_handler, 0, LPFC_SP_DRIVER_HANDLER_NAME, phba); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0421 MSI-X slow-path request_irq failed " "(%d)\n", rc); goto msi_fail_out; } /* vector-1 is associated to fast-path handler */ rc = request_irq(pci_irq_vector(phba->pcidev, 1), &lpfc_sli_fp_intr_handler, 0, LPFC_FP_DRIVER_HANDLER_NAME, phba); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0429 MSI-X fast-path request_irq failed " "(%d)\n", rc); goto irq_fail_out; } /* * Configure HBA MSI-X attention conditions to messages */ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { rc = -ENOMEM; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0474 Unable to allocate memory for issuing " "MBOX_CONFIG_MSI command\n"); goto mem_fail_out; } rc = lpfc_config_msi(phba, pmb); if (rc) goto mbx_fail_out; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "0351 Config MSI mailbox command failed, " "mbxCmd x%x, mbxStatus x%x\n", pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); goto mbx_fail_out; } /* Free memory allocated for mailbox command */ mempool_free(pmb, phba->mbox_mem_pool); return rc; mbx_fail_out: /* Free memory allocated for mailbox command */ mempool_free(pmb, phba->mbox_mem_pool); mem_fail_out: /* free the irq already requested */ free_irq(pci_irq_vector(phba->pcidev, 1), phba); irq_fail_out: /* free the irq already requested */ free_irq(pci_irq_vector(phba->pcidev, 0), phba); msi_fail_out: /* Unconfigure MSI-X capability structure */ pci_free_irq_vectors(phba->pcidev); vec_fail_out: return rc; } /** * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI interrupt mode to device with * SLI-3 interface spec. The kernel function pci_enable_msi() is called to * enable the MSI vector. The device driver is responsible for calling the * request_irq() to register MSI vector with a interrupt the handler, which * is done in this function. * * Return codes * 0 - successful * other values - error */ static int lpfc_sli_enable_msi(struct lpfc_hba *phba) { int rc; rc = pci_enable_msi(phba->pcidev); if (!rc) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0012 PCI enable MSI mode success.\n"); else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0471 PCI enable MSI mode failed (%d)\n", rc); return rc; } rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 0, LPFC_DRIVER_NAME, phba); if (rc) { pci_disable_msi(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0478 MSI request_irq failed (%d)\n", rc); } return rc; } /** * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. * @phba: pointer to lpfc hba data structure. * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). * * This routine is invoked to enable device interrupt and associate driver's * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface * spec. Depends on the interrupt mode configured to the driver, the driver * will try to fallback from the configured interrupt mode to an interrupt * mode which is supported by the platform, kernel, and device in the order * of: * MSI-X -> MSI -> IRQ. * * Return codes * 0 - successful * other values - error **/ static uint32_t lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) { uint32_t intr_mode = LPFC_INTR_ERROR; int retval; /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); if (retval) return intr_mode; phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; if (cfg_mode == 2) { /* Now, try to enable MSI-X interrupt mode */ retval = lpfc_sli_enable_msix(phba); if (!retval) { /* Indicate initialization to MSI-X mode */ phba->intr_type = MSIX; intr_mode = 2; } } /* Fallback to MSI if MSI-X initialization failed */ if (cfg_mode >= 1 && phba->intr_type == NONE) { retval = lpfc_sli_enable_msi(phba); if (!retval) { /* Indicate initialization to MSI mode */ phba->intr_type = MSI; intr_mode = 1; } } /* Fallback to INTx if both MSI-X/MSI initalization failed */ if (phba->intr_type == NONE) { retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (!retval) { /* Indicate initialization to INTx mode */ phba->intr_type = INTx; intr_mode = 0; } } return intr_mode; } /** * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable device interrupt and disassociate the * driver's interrupt handler(s) from interrupt vector(s) to device with * SLI-3 interface spec. Depending on the interrupt mode, the driver will * release the interrupt vector(s) for the message signaled interrupt. **/ static void lpfc_sli_disable_intr(struct lpfc_hba *phba) { int nr_irqs, i; if (phba->intr_type == MSIX) nr_irqs = LPFC_MSIX_VECTORS; else nr_irqs = 1; for (i = 0; i < nr_irqs; i++) free_irq(pci_irq_vector(phba->pcidev, i), phba); pci_free_irq_vectors(phba->pcidev); /* Reset interrupt management states */ phba->intr_type = NONE; phba->sli.slistat.sli_intr = 0; } /** * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue * @phba: pointer to lpfc hba data structure. * @id: EQ vector index or Hardware Queue index * @match: LPFC_FIND_BY_EQ = match by EQ * LPFC_FIND_BY_HDWQ = match by Hardware Queue * Return the CPU that matches the selection criteria */ static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) { struct lpfc_vector_map_info *cpup; int cpu; /* Loop through all CPUs */ for_each_present_cpu(cpu) { cpup = &phba->sli4_hba.cpu_map[cpu]; /* If we are matching by EQ, there may be multiple CPUs using * using the same vector, so select the one with * LPFC_CPU_FIRST_IRQ set. */ if ((match == LPFC_FIND_BY_EQ) && (cpup->flag & LPFC_CPU_FIRST_IRQ) && (cpup->eq == id)) return cpu; /* If matching by HDWQ, select the first CPU that matches */ if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) return cpu; } return 0; } #ifdef CONFIG_X86 /** * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded * @phba: pointer to lpfc hba data structure. * @cpu: CPU map index * @phys_id: CPU package physical id * @core_id: CPU core id */ static int lpfc_find_hyper(struct lpfc_hba *phba, int cpu, uint16_t phys_id, uint16_t core_id) { struct lpfc_vector_map_info *cpup; int idx; for_each_present_cpu(idx) { cpup = &phba->sli4_hba.cpu_map[idx]; /* Does the cpup match the one we are looking for */ if ((cpup->phys_id == phys_id) && (cpup->core_id == core_id) && (cpu != idx)) return 1; } return 0; } #endif /* * lpfc_assign_eq_map_info - Assigns eq for vector_map structure * @phba: pointer to lpfc hba data structure. * @eqidx: index for eq and irq vector * @flag: flags to set for vector_map structure * @cpu: cpu used to index vector_map structure * * The routine assigns eq info into vector_map structure */ static inline void lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, unsigned int cpu) { struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); cpup->eq = eqidx; cpup->flag |= flag; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", cpu, eqhdl->irq, cpup->eq, cpup->flag); } /** * lpfc_cpu_map_array_init - Initialize cpu_map structure * @phba: pointer to lpfc hba data structure. * * The routine initializes the cpu_map array structure */ static void lpfc_cpu_map_array_init(struct lpfc_hba *phba) { struct lpfc_vector_map_info *cpup; struct lpfc_eq_intr_info *eqi; int cpu; for_each_possible_cpu(cpu) { cpup = &phba->sli4_hba.cpu_map[cpu]; cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; cpup->core_id = LPFC_VECTOR_MAP_EMPTY; cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; cpup->eq = LPFC_VECTOR_MAP_EMPTY; cpup->flag = 0; eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); INIT_LIST_HEAD(&eqi->list); eqi->icnt = 0; } } /** * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure * @phba: pointer to lpfc hba data structure. * * The routine initializes the hba_eq_hdl array structure */ static void lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) { struct lpfc_hba_eq_hdl *eqhdl; int i; for (i = 0; i < phba->cfg_irq_chann; i++) { eqhdl = lpfc_get_eq_hdl(i); eqhdl->irq = LPFC_IRQ_EMPTY; eqhdl->phba = phba; } } /** * lpfc_cpu_affinity_check - Check vector CPU affinity mappings * @phba: pointer to lpfc hba data structure. * @vectors: number of msix vectors allocated. * * The routine will figure out the CPU affinity assignment for every * MSI-X vector allocated for the HBA. * In addition, the CPU to IO channel mapping will be calculated * and the phba->sli4_hba.cpu_map array will reflect this. */ static void lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) { int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; int max_phys_id, min_phys_id; int max_core_id, min_core_id; struct lpfc_vector_map_info *cpup; struct lpfc_vector_map_info *new_cpup; #ifdef CONFIG_X86 struct cpuinfo_x86 *cpuinfo; #endif #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct lpfc_hdwq_stat *c_stat; #endif max_phys_id = 0; min_phys_id = LPFC_VECTOR_MAP_EMPTY; max_core_id = 0; min_core_id = LPFC_VECTOR_MAP_EMPTY; /* Update CPU map with physical id and core id of each CPU */ for_each_present_cpu(cpu) { cpup = &phba->sli4_hba.cpu_map[cpu]; #ifdef CONFIG_X86 cpuinfo = &cpu_data(cpu); cpup->phys_id = cpuinfo->phys_proc_id; cpup->core_id = cpuinfo->cpu_core_id; if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) cpup->flag |= LPFC_CPU_MAP_HYPER; #else /* No distinction between CPUs for other platforms */ cpup->phys_id = 0; cpup->core_id = cpu; #endif lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3328 CPU %d physid %d coreid %d flag x%x\n", cpu, cpup->phys_id, cpup->core_id, cpup->flag); if (cpup->phys_id > max_phys_id) max_phys_id = cpup->phys_id; if (cpup->phys_id < min_phys_id) min_phys_id = cpup->phys_id; if (cpup->core_id > max_core_id) max_core_id = cpup->core_id; if (cpup->core_id < min_core_id) min_core_id = cpup->core_id; } /* After looking at each irq vector assigned to this pcidev, its * possible to see that not ALL CPUs have been accounted for. * Next we will set any unassigned (unaffinitized) cpu map * entries to a IRQ on the same phys_id. */ first_cpu = cpumask_first(cpu_present_mask); start_cpu = first_cpu; for_each_present_cpu(cpu) { cpup = &phba->sli4_hba.cpu_map[cpu]; /* Is this CPU entry unassigned */ if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { /* Mark CPU as IRQ not assigned by the kernel */ cpup->flag |= LPFC_CPU_MAP_UNASSIGN; /* If so, find a new_cpup that is on the SAME * phys_id as cpup. start_cpu will start where we * left off so all unassigned entries don't get assgined * the IRQ of the first entry. */ new_cpu = start_cpu; for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && (new_cpup->phys_id == cpup->phys_id)) goto found_same; new_cpu = lpfc_next_present_cpu(new_cpu); } /* At this point, we leave the CPU as unassigned */ continue; found_same: /* We found a matching phys_id, so copy the IRQ info */ cpup->eq = new_cpup->eq; /* Bump start_cpu to the next slot to minmize the * chance of having multiple unassigned CPU entries * selecting the same IRQ. */ start_cpu = lpfc_next_present_cpu(new_cpu); lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3337 Set Affinity: CPU %d " "eq %d from peer cpu %d same " "phys_id (%d)\n", cpu, cpup->eq, new_cpu, cpup->phys_id); } } /* Set any unassigned cpu map entries to a IRQ on any phys_id */ start_cpu = first_cpu; for_each_present_cpu(cpu) { cpup = &phba->sli4_hba.cpu_map[cpu]; /* Is this entry unassigned */ if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { /* Mark it as IRQ not assigned by the kernel */ cpup->flag |= LPFC_CPU_MAP_UNASSIGN; /* If so, find a new_cpup thats on ANY phys_id * as the cpup. start_cpu will start where we * left off so all unassigned entries don't get * assigned the IRQ of the first entry. */ new_cpu = start_cpu; for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) goto found_any; new_cpu = lpfc_next_present_cpu(new_cpu); } /* We should never leave an entry unassigned */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3339 Set Affinity: CPU %d " "eq %d UNASSIGNED\n", cpup->hdwq, cpup->eq); continue; found_any: /* We found an available entry, copy the IRQ info */ cpup->eq = new_cpup->eq; /* Bump start_cpu to the next slot to minmize the * chance of having multiple unassigned CPU entries * selecting the same IRQ. */ start_cpu = lpfc_next_present_cpu(new_cpu); lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3338 Set Affinity: CPU %d " "eq %d from peer cpu %d (%d/%d)\n", cpu, cpup->eq, new_cpu, new_cpup->phys_id, new_cpup->core_id); } } /* Assign hdwq indices that are unique across all cpus in the map * that are also FIRST_CPUs. */ idx = 0; for_each_present_cpu(cpu) { cpup = &phba->sli4_hba.cpu_map[cpu]; /* Only FIRST IRQs get a hdwq index assignment. */ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) continue; /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ cpup->hdwq = idx; idx++; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3333 Set Affinity: CPU %d (phys %d core %d): " "hdwq %d eq %d flg x%x\n", cpu, cpup->phys_id, cpup->core_id, cpup->hdwq, cpup->eq, cpup->flag); } /* Associate a hdwq with each cpu_map entry * This will be 1 to 1 - hdwq to cpu, unless there are less * hardware queues then CPUs. For that case we will just round-robin * the available hardware queues as they get assigned to CPUs. * The next_idx is the idx from the FIRST_CPU loop above to account * for irq_chann < hdwq. The idx is used for round-robin assignments * and needs to start at 0. */ next_idx = idx; start_cpu = 0; idx = 0; for_each_present_cpu(cpu) { cpup = &phba->sli4_hba.cpu_map[cpu]; /* FIRST cpus are already mapped. */ if (cpup->flag & LPFC_CPU_FIRST_IRQ) continue; /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq * of the unassigned cpus to the next idx so that all * hdw queues are fully utilized. */ if (next_idx < phba->cfg_hdw_queue) { cpup->hdwq = next_idx; next_idx++; continue; } /* Not a First CPU and all hdw_queues are used. Reuse a * Hardware Queue for another CPU, so be smart about it * and pick one that has its IRQ/EQ mapped to the same phys_id * (CPU package) and core_id. */ new_cpu = start_cpu; for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && new_cpup->phys_id == cpup->phys_id && new_cpup->core_id == cpup->core_id) { goto found_hdwq; } new_cpu = lpfc_next_present_cpu(new_cpu); } /* If we can't match both phys_id and core_id, * settle for just a phys_id match. */ new_cpu = start_cpu; for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && new_cpup->phys_id == cpup->phys_id) goto found_hdwq; new_cpu = lpfc_next_present_cpu(new_cpu); } /* Otherwise just round robin on cfg_hdw_queue */ cpup->hdwq = idx % phba->cfg_hdw_queue; idx++; goto logit; found_hdwq: /* We found an available entry, copy the IRQ info */ start_cpu = lpfc_next_present_cpu(new_cpu); cpup->hdwq = new_cpup->hdwq; logit: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3335 Set Affinity: CPU %d (phys %d core %d): " "hdwq %d eq %d flg x%x\n", cpu, cpup->phys_id, cpup->core_id, cpup->hdwq, cpup->eq, cpup->flag); } /* * Initialize the cpu_map slots for not-present cpus in case * a cpu is hot-added. Perform a simple hdwq round robin assignment. */ idx = 0; for_each_possible_cpu(cpu) { cpup = &phba->sli4_hba.cpu_map[cpu]; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); c_stat->hdwq_no = cpup->hdwq; #endif if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) continue; cpup->hdwq = idx++ % phba->cfg_hdw_queue; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS c_stat->hdwq_no = cpup->hdwq; #endif lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3340 Set Affinity: not present " "CPU %d hdwq %d\n", cpu, cpup->hdwq); } /* The cpu_map array will be used later during initialization * when EQ / CQ / WQs are allocated and configured. */ return; } /** * lpfc_cpuhp_get_eq * * @phba: pointer to lpfc hba data structure. * @cpu: cpu going offline * @eqlist: eq list to append to */ static int lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, struct list_head *eqlist) { const struct cpumask *maskp; struct lpfc_queue *eq; struct cpumask *tmp; u16 idx; tmp = kzalloc(cpumask_size(), GFP_KERNEL); if (!tmp) return -ENOMEM; for (idx = 0; idx < phba->cfg_irq_chann; idx++) { maskp = pci_irq_get_affinity(phba->pcidev, idx); if (!maskp) continue; /* * if irq is not affinitized to the cpu going * then we don't need to poll the eq attached * to it. */ if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) continue; /* get the cpus that are online and are affini- * tized to this irq vector. If the count is * more than 1 then cpuhp is not going to shut- * down this vector. Since this cpu has not * gone offline yet, we need >1. */ cpumask_and(tmp, maskp, cpu_online_mask); if (cpumask_weight(tmp) > 1) continue; /* Now that we have an irq to shutdown, get the eq * mapped to this irq. Note: multiple hdwq's in * the software can share an eq, but eventually * only eq will be mapped to this vector */ eq = phba->sli4_hba.hba_eq_hdl[idx].eq; list_add(&eq->_poll_list, eqlist); } kfree(tmp); return 0; } static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) { if (phba->sli_rev != LPFC_SLI_REV4) return; cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); /* * unregistering the instance doesn't stop the polling * timer. Wait for the poll timer to retire. */ synchronize_rcu(); del_timer_sync(&phba->cpuhp_poll_timer); } static void lpfc_cpuhp_remove(struct lpfc_hba *phba) { if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE)) return; __lpfc_cpuhp_remove(phba); } static void lpfc_cpuhp_add(struct lpfc_hba *phba) { if (phba->sli_rev != LPFC_SLI_REV4) return; rcu_read_lock(); if (!list_empty(&phba->poll_list)) mod_timer(&phba->cpuhp_poll_timer, jiffies + msecs_to_jiffies(LPFC_POLL_HB)); rcu_read_unlock(); cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); } static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) { if (phba->pport->load_flag & FC_UNLOADING) { *retval = -EAGAIN; return true; } if (phba->sli_rev != LPFC_SLI_REV4) { *retval = 0; return true; } /* proceed with the hotplug */ return false; } /** * lpfc_irq_set_aff - set IRQ affinity * @eqhdl: EQ handle * @cpu: cpu to set affinity * **/ static inline void lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) { cpumask_clear(&eqhdl->aff_mask); cpumask_set_cpu(cpu, &eqhdl->aff_mask); irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask); } /** * lpfc_irq_clear_aff - clear IRQ affinity * @eqhdl: EQ handle * **/ static inline void lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) { cpumask_clear(&eqhdl->aff_mask); irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); } /** * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event * @phba: pointer to HBA context object. * @cpu: cpu going offline/online * @offline: true, cpu is going offline. false, cpu is coming online. * * If cpu is going offline, we'll try our best effort to find the next * online cpu on the phba's original_mask and migrate all offlining IRQ * affinities. * * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. * * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. * **/ static void lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) { struct lpfc_vector_map_info *cpup; struct cpumask *aff_mask; unsigned int cpu_select, cpu_next, idx; const struct cpumask *orig_mask; if (phba->irq_chann_mode == NORMAL_MODE) return; orig_mask = &phba->sli4_hba.irq_aff_mask; if (!cpumask_test_cpu(cpu, orig_mask)) return; cpup = &phba->sli4_hba.cpu_map[cpu]; if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) return; if (offline) { /* Find next online CPU on original mask */ cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); /* Found a valid CPU */ if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { /* Go through each eqhdl and ensure offlining * cpu aff_mask is migrated */ for (idx = 0; idx < phba->cfg_irq_chann; idx++) { aff_mask = lpfc_get_aff_mask(idx); /* Migrate affinity */ if (cpumask_test_cpu(cpu, aff_mask)) lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), cpu_select); } } else { /* Rely on irqbalance if no online CPUs left on NUMA */ for (idx = 0; idx < phba->cfg_irq_chann; idx++) lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); } } else { /* Migrate affinity back to this CPU */ lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); } } static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) { struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); struct lpfc_queue *eq, *next; LIST_HEAD(eqlist); int retval; if (!phba) { WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); return 0; } if (__lpfc_cpuhp_checks(phba, &retval)) return retval; lpfc_irq_rebalance(phba, cpu, true); retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); if (retval) return retval; /* start polling on these eq's */ list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { list_del_init(&eq->_poll_list); lpfc_sli4_start_polling(eq); } return 0; } static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) { struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); struct lpfc_queue *eq, *next; unsigned int n; int retval; if (!phba) { WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); return 0; } if (__lpfc_cpuhp_checks(phba, &retval)) return retval; lpfc_irq_rebalance(phba, cpu, false); list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); if (n == cpu) lpfc_sli4_stop_polling(eq); } return 0; } /** * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them * to cpus on the system. * * When cfg_irq_numa is enabled, the adapter will only allocate vectors for * the number of cpus on the same numa node as this adapter. The vectors are * allocated without requesting OS affinity mapping. A vector will be * allocated and assigned to each online and offline cpu. If the cpu is * online, then affinity will be set to that cpu. If the cpu is offline, then * affinity will be set to the nearest peer cpu within the numa node that is * online. If there are no online cpus within the numa node, affinity is not * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping * is consistent with the way cpu online/offline is handled when cfg_irq_numa is * configured. * * If numa mode is not enabled and there is more than 1 vector allocated, then * the driver relies on the managed irq interface where the OS assigns vector to * cpu affinity. The driver will then use that affinity mapping to setup its * cpu mapping table. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli4_enable_msix(struct lpfc_hba *phba) { int vectors, rc, index; char *name; const struct cpumask *aff_mask = NULL; unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; struct lpfc_vector_map_info *cpup; struct lpfc_hba_eq_hdl *eqhdl; const struct cpumask *maskp; unsigned int flags = PCI_IRQ_MSIX; /* Set up MSI-X multi-message vectors */ vectors = phba->cfg_irq_chann; if (phba->irq_chann_mode != NORMAL_MODE) aff_mask = &phba->sli4_hba.irq_aff_mask; if (aff_mask) { cpu_cnt = cpumask_weight(aff_mask); vectors = min(phba->cfg_irq_chann, cpu_cnt); /* cpu: iterates over aff_mask including offline or online * cpu_select: iterates over online aff_mask to set affinity */ cpu = cpumask_first(aff_mask); cpu_select = lpfc_next_online_cpu(aff_mask, cpu); } else { flags |= PCI_IRQ_AFFINITY; } rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); if (rc < 0) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0484 PCI enable MSI-X failed (%d)\n", rc); goto vec_fail_out; } vectors = rc; /* Assign MSI-X vectors to interrupt handlers */ for (index = 0; index < vectors; index++) { eqhdl = lpfc_get_eq_hdl(index); name = eqhdl->handler_name; memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, LPFC_DRIVER_HANDLER_NAME"%d", index); eqhdl->idx = index; rc = pci_irq_vector(phba->pcidev, index); if (rc < 0) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0489 MSI-X fast-path (%d) " "pci_irq_vec failed (%d)\n", index, rc); goto cfg_fail_out; } eqhdl->irq = rc; rc = request_threaded_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, &lpfc_sli4_hba_intr_handler_th, IRQF_ONESHOT, name, eqhdl); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " "request_irq failed (%d)\n", index, rc); goto cfg_fail_out; } if (aff_mask) { /* If found a neighboring online cpu, set affinity */ if (cpu_select < nr_cpu_ids) lpfc_irq_set_aff(eqhdl, cpu_select); /* Assign EQ to cpu_map */ lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, cpu); /* Iterate to next offline or online cpu in aff_mask */ cpu = cpumask_next(cpu, aff_mask); /* Find next online cpu in aff_mask to set affinity */ cpu_select = lpfc_next_online_cpu(aff_mask, cpu); } else if (vectors == 1) { cpu = cpumask_first(cpu_present_mask); lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, cpu); } else { maskp = pci_irq_get_affinity(phba->pcidev, index); /* Loop through all CPUs associated with vector index */ for_each_cpu_and(cpu, maskp, cpu_present_mask) { cpup = &phba->sli4_hba.cpu_map[cpu]; /* If this is the first CPU thats assigned to * this vector, set LPFC_CPU_FIRST_IRQ. * * With certain platforms its possible that irq * vectors are affinitized to all the cpu's. * This can result in each cpu_map.eq to be set * to the last vector, resulting in overwrite * of all the previous cpu_map.eq. Ensure that * each vector receives a place in cpu_map. * Later call to lpfc_cpu_affinity_check will * ensure we are nicely balanced out. */ if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) continue; lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, cpu); break; } } } if (vectors != phba->cfg_irq_chann) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3238 Reducing IO channels to match number of " "MSI-X vectors, requested %d got %d\n", phba->cfg_irq_chann, vectors); if (phba->cfg_irq_chann > vectors) phba->cfg_irq_chann = vectors; } return rc; cfg_fail_out: /* free the irq already requested */ for (--index; index >= 0; index--) { eqhdl = lpfc_get_eq_hdl(index); lpfc_irq_clear_aff(eqhdl); free_irq(eqhdl->irq, eqhdl); } /* Unconfigure MSI-X capability structure */ pci_free_irq_vectors(phba->pcidev); vec_fail_out: return rc; } /** * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI interrupt mode to device with * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is * called to enable the MSI vector. The device driver is responsible for * calling the request_irq() to register MSI vector with a interrupt the * handler, which is done in this function. * * Return codes * 0 - successful * other values - error **/ static int lpfc_sli4_enable_msi(struct lpfc_hba *phba) { int rc, index; unsigned int cpu; struct lpfc_hba_eq_hdl *eqhdl; rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_AFFINITY); if (rc > 0) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0487 PCI enable MSI mode success.\n"); else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0488 PCI enable MSI mode failed (%d)\n", rc); return rc ? rc : -1; } rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 0, LPFC_DRIVER_NAME, phba); if (rc) { pci_free_irq_vectors(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0490 MSI request_irq failed (%d)\n", rc); return rc; } eqhdl = lpfc_get_eq_hdl(0); rc = pci_irq_vector(phba->pcidev, 0); if (rc < 0) { pci_free_irq_vectors(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0496 MSI pci_irq_vec failed (%d)\n", rc); return rc; } eqhdl->irq = rc; cpu = cpumask_first(cpu_present_mask); lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); for (index = 0; index < phba->cfg_irq_chann; index++) { eqhdl = lpfc_get_eq_hdl(index); eqhdl->idx = index; } return 0; } /** * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device * @phba: pointer to lpfc hba data structure. * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). * * This routine is invoked to enable device interrupt and associate driver's * interrupt handler(s) to interrupt vector(s) to device with SLI-4 * interface spec. Depends on the interrupt mode configured to the driver, * the driver will try to fallback from the configured interrupt mode to an * interrupt mode which is supported by the platform, kernel, and device in * the order of: * MSI-X -> MSI -> IRQ. * * Return codes * Interrupt mode (2, 1, 0) - successful * LPFC_INTR_ERROR - error **/ static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) { uint32_t intr_mode = LPFC_INTR_ERROR; int retval, idx; if (cfg_mode == 2) { /* Preparation before conf_msi mbox cmd */ retval = 0; if (!retval) { /* Now, try to enable MSI-X interrupt mode */ retval = lpfc_sli4_enable_msix(phba); if (!retval) { /* Indicate initialization to MSI-X mode */ phba->intr_type = MSIX; intr_mode = 2; } } } /* Fallback to MSI if MSI-X initialization failed */ if (cfg_mode >= 1 && phba->intr_type == NONE) { retval = lpfc_sli4_enable_msi(phba); if (!retval) { /* Indicate initialization to MSI mode */ phba->intr_type = MSI; intr_mode = 1; } } /* Fallback to INTx if both MSI-X/MSI initalization failed */ if (phba->intr_type == NONE) { retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (!retval) { struct lpfc_hba_eq_hdl *eqhdl; unsigned int cpu; /* Indicate initialization to INTx mode */ phba->intr_type = INTx; intr_mode = 0; eqhdl = lpfc_get_eq_hdl(0); retval = pci_irq_vector(phba->pcidev, 0); if (retval < 0) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0502 INTR pci_irq_vec failed (%d)\n", retval); return LPFC_INTR_ERROR; } eqhdl->irq = retval; cpu = cpumask_first(cpu_present_mask); lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); for (idx = 0; idx < phba->cfg_irq_chann; idx++) { eqhdl = lpfc_get_eq_hdl(idx); eqhdl->idx = idx; } } } return intr_mode; } /** * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to disable device interrupt and disassociate * the driver's interrupt handler(s) from interrupt vector(s) to device * with SLI-4 interface spec. Depending on the interrupt mode, the driver * will release the interrupt vector(s) for the message signaled interrupt. **/ static void lpfc_sli4_disable_intr(struct lpfc_hba *phba) { /* Disable the currently initialized interrupt mode */ if (phba->intr_type == MSIX) { int index; struct lpfc_hba_eq_hdl *eqhdl; /* Free up MSI-X multi-message vectors */ for (index = 0; index < phba->cfg_irq_chann; index++) { eqhdl = lpfc_get_eq_hdl(index); lpfc_irq_clear_aff(eqhdl); free_irq(eqhdl->irq, eqhdl); } } else { free_irq(phba->pcidev->irq, phba); } pci_free_irq_vectors(phba->pcidev); /* Reset interrupt management states */ phba->intr_type = NONE; phba->sli.slistat.sli_intr = 0; } /** * lpfc_unset_hba - Unset SLI3 hba device initialization * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unset the HBA device initialization steps to * a device with SLI-3 interface spec. **/ static void lpfc_unset_hba(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(shost->host_lock); kfree(phba->vpi_bmask); kfree(phba->vpi_ids); lpfc_stop_hba_timers(phba); phba->pport->work_port_events = 0; lpfc_sli_hba_down(phba); lpfc_sli_brdrestart(phba); lpfc_sli_disable_intr(phba); return; } /** * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy * @phba: Pointer to HBA context object. * * This function is called in the SLI4 code path to wait for completion * of device's XRIs exchange busy. It will check the XRI exchange busy * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after * that, it will check the XRI exchange busy on outstanding FCP and ELS * I/Os every 30 seconds, log error message, and wait forever. Only when * all XRI exchange busy complete, the driver unload shall proceed with * invoking the function reset ioctl mailbox command to the CNA and the * the rest of the driver unload resource release. **/ static void lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) { struct lpfc_sli4_hdw_queue *qp; int idx, ccnt; int wait_time = 0; int io_xri_cmpl = 1; int nvmet_xri_cmpl = 1; int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); /* Driver just aborted IOs during the hba_unset process. Pause * here to give the HBA time to complete the IO and get entries * into the abts lists. */ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); /* Wait for NVME pending IO to flush back to transport. */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) lpfc_nvme_wait_for_io_drain(phba); ccnt = 0; for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); if (!io_xri_cmpl) /* if list is NOT empty */ ccnt++; } if (ccnt) io_xri_cmpl = 0; if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { nvmet_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); } while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { if (!nvmet_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6424 NVMET XRI exchange busy " "wait time: %d seconds.\n", wait_time/1000); if (!io_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6100 IO XRI exchange busy " "wait time: %d seconds.\n", wait_time/1000); if (!els_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2878 ELS XRI exchange busy " "wait time: %d seconds.\n", wait_time/1000); msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; } else { msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; } ccnt = 0; for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; io_xri_cmpl = list_empty( &qp->lpfc_abts_io_buf_list); if (!io_xri_cmpl) /* if list is NOT empty */ ccnt++; } if (ccnt) io_xri_cmpl = 0; if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { nvmet_xri_cmpl = list_empty( &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); } els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); } } /** * lpfc_sli4_hba_unset - Unset the fcoe hba * @phba: Pointer to HBA context object. * * This function is called in the SLI4 code path to reset the HBA's FCoE * function. The caller is not required to hold any lock. This routine * issues PCI function reset mailbox command to reset the FCoE function. * At the end of the function, it calls lpfc_hba_down_post function to * free any pending commands. **/ static void lpfc_sli4_hba_unset(struct lpfc_hba *phba) { int wait_cnt = 0; LPFC_MBOXQ_t *mboxq; struct pci_dev *pdev = phba->pcidev; lpfc_stop_hba_timers(phba); hrtimer_cancel(&phba->cmf_stats_timer); hrtimer_cancel(&phba->cmf_timer); if (phba->pport) phba->sli4_hba.intr_enable = 0; /* * Gracefully wait out the potential current outstanding asynchronous * mailbox command. */ /* First, block any pending async mailbox command from posted */ spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; spin_unlock_irq(&phba->hbalock); /* Now, trying to wait it out if we can */ while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { msleep(10); if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) break; } /* Forcefully release the outstanding mailbox command if timed out */ if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { spin_lock_irq(&phba->hbalock); mboxq = phba->sli.mbox_active; mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; __lpfc_mbox_cmpl_put(phba, mboxq); phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; phba->sli.mbox_active = NULL; spin_unlock_irq(&phba->hbalock); } /* Abort all iocbs associated with the hba */ lpfc_sli_hba_iocb_abort(phba); if (!pci_channel_offline(phba->pcidev)) /* Wait for completion of device XRI exchange busy */ lpfc_sli4_xri_exchange_busy_wait(phba); /* per-phba callback de-registration for hotplug event */ if (phba->pport) lpfc_cpuhp_remove(phba); /* Disable PCI subsystem interrupt */ lpfc_sli4_disable_intr(phba); /* Disable SR-IOV if enabled */ if (phba->cfg_sriov_nr_virtfn) pci_disable_sriov(pdev); /* Stop kthread signal shall trigger work_done one more time */ kthread_stop(phba->worker_thread); /* Disable FW logging to host memory */ lpfc_ras_stop_fwlog(phba); /* Reset SLI4 HBA FCoE function */ lpfc_pci_function_reset(phba); /* release all queue allocated resources. */ lpfc_sli4_queue_destroy(phba); /* Free RAS DMA memory */ if (phba->ras_fwlog.ras_enabled) lpfc_sli4_ras_dma_free(phba); /* Stop the SLI4 device port */ if (phba->pport) phba->pport->work_port_events = 0; } static uint32_t lpfc_cgn_crc32(uint32_t crc, u8 byte) { uint32_t msb = 0; uint32_t bit; for (bit = 0; bit < 8; bit++) { msb = (crc >> 31) & 1; crc <<= 1; if (msb ^ (byte & 1)) { crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER; crc |= 1; } byte >>= 1; } return crc; } static uint32_t lpfc_cgn_reverse_bits(uint32_t wd) { uint32_t result = 0; uint32_t i; for (i = 0; i < 32; i++) { result <<= 1; result |= (1 & (wd >> i)); } return result; } /* * The routine corresponds with the algorithm the HBA firmware * uses to validate the data integrity. */ uint32_t lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc) { uint32_t i; uint32_t result; uint8_t *data = (uint8_t *)ptr; for (i = 0; i < byteLen; ++i) crc = lpfc_cgn_crc32(crc, data[i]); result = ~lpfc_cgn_reverse_bits(crc); return result; } void lpfc_init_congestion_buf(struct lpfc_hba *phba) { struct lpfc_cgn_info *cp; uint16_t size; uint32_t crc; lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6235 INIT Congestion Buffer %p\n", phba->cgn_i); if (!phba->cgn_i) return; cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; atomic_set(&phba->cgn_fabric_warn_cnt, 0); atomic_set(&phba->cgn_fabric_alarm_cnt, 0); atomic_set(&phba->cgn_sync_alarm_cnt, 0); atomic_set(&phba->cgn_sync_warn_cnt, 0); atomic_set(&phba->cgn_driver_evt_cnt, 0); atomic_set(&phba->cgn_latency_evt_cnt, 0); atomic64_set(&phba->cgn_latency_evt, 0); phba->cgn_evt_minute = 0; memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat)); cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); cp->cgn_info_version = LPFC_CGN_INFO_V4; /* cgn parameters */ cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; lpfc_cgn_update_tstamp(phba, &cp->base_time); /* Fill in default LUN qdepth */ if (phba->pport) { size = (uint16_t)(phba->pport->cfg_lun_queue_depth); cp->cgn_lunq = cpu_to_le16(size); } /* last used Index initialized to 0xff already */ cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); cp->cgn_info_crc = cpu_to_le32(crc); phba->cgn_evt_timestamp = jiffies + msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); } void lpfc_init_congestion_stat(struct lpfc_hba *phba) { struct lpfc_cgn_info *cp; uint32_t crc; lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6236 INIT Congestion Stat %p\n", phba->cgn_i); if (!phba->cgn_i) return; cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat)); lpfc_cgn_update_tstamp(phba, &cp->stat_start); crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); cp->cgn_info_crc = cpu_to_le32(crc); } /** * __lpfc_reg_congestion_buf - register congestion info buffer with HBA * @phba: Pointer to hba context object. * @reg: flag to determine register or unregister. */ static int __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg) { struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf; union lpfc_sli4_cfg_shdr *shdr; uint32_t shdr_status, shdr_add_status; LPFC_MBOXQ_t *mboxq; int length, rc; if (!phba->cgn_i) return -ENXIO; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2641 REG_CONGESTION_BUF mbox allocation fail: " "HBA state x%x reg %d\n", phba->pport->port_state, reg); return -ENOMEM; } length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length, LPFC_SLI4_MBX_EMBED); reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1); if (reg > 0) bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1); else bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0); reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); reg_congestion_buf->addr_lo = putPaddrLow(phba->cgn_i->phys); reg_congestion_buf->addr_hi = putPaddrHigh(phba->cgn_i->phys); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); mempool_free(mboxq, phba->mbox_mem_pool); if (shdr_status || shdr_add_status || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2642 REG_CONGESTION_BUF mailbox " "failed with status x%x add_status x%x," " mbx status x%x reg %d\n", shdr_status, shdr_add_status, rc, reg); return -ENXIO; } return 0; } int lpfc_unreg_congestion_buf(struct lpfc_hba *phba) { lpfc_cmf_stop(phba); return __lpfc_reg_congestion_buf(phba, 0); } int lpfc_reg_congestion_buf(struct lpfc_hba *phba) { return __lpfc_reg_congestion_buf(phba, 1); } /** * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. * @phba: Pointer to HBA context object. * @mboxq: Pointer to the mailboxq memory for the mailbox command response. * * This function is called in the SLI4 code path to read the port's * sli4 capabilities. * * This function may be be called from any context that can block-wait * for the completion. The expectation is that this routine is called * typically from probe_one or from the online routine. **/ int lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { int rc; struct lpfc_mqe *mqe = &mboxq->u.mqe; struct lpfc_pc_sli4_params *sli4_params; uint32_t mbox_tmo; int length; bool exp_wqcq_pages = true; struct lpfc_sli4_parameters *mbx_sli4_parameters; /* * By default, the driver assumes the SLI4 port requires RPI * header postings. The SLI4_PARAM response will correct this * assumption. */ phba->sli4_hba.rpi_hdrs_in_use = 1; /* Read the port's SLI4 Config Parameters */ length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, length, LPFC_SLI4_MBX_EMBED); if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); } if (unlikely(rc)) return rc; sli4_params = &phba->sli4_hba.pc_sli4_params; mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, mbx_sli4_parameters); sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, mbx_sli4_parameters); if (bf_get(cfg_phwq, mbx_sli4_parameters)) phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; else phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope, mbx_sli4_parameters); sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, mbx_sli4_parameters); sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, mbx_sli4_parameters); phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); sli4_params->mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters); /* Check for Extended Pre-Registered SGL support */ phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); /* Check for firmware nvme support */ rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && bf_get(cfg_xib, mbx_sli4_parameters)); if (rc) { /* Save this to indicate the Firmware supports NVME */ sli4_params->nvme = 1; /* Firmware NVME support, check driver FC4 NVME support */ if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, "6133 Disabling NVME support: " "FC4 type not supported: x%x\n", phba->cfg_enable_fc4_type); goto fcponly; } } else { /* No firmware NVME support, check driver FC4 NVME support */ sli4_params->nvme = 0; if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, "6101 Disabling NVME support: Not " "supported by firmware (%d %d) x%x\n", bf_get(cfg_nvme, mbx_sli4_parameters), bf_get(cfg_xib, mbx_sli4_parameters), phba->cfg_enable_fc4_type); fcponly: phba->nvmet_support = 0; phba->cfg_nvmet_mrq = 0; phba->cfg_nvme_seg_cnt = 0; /* If no FC4 type support, move to just SCSI support */ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return -ENODEV; phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; } } /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to * accommodate 512K and 1M IOs in a single nvme buf. */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; /* Enable embedded Payload BDE if support is indicated */ if (bf_get(cfg_pbde, mbx_sli4_parameters)) phba->cfg_enable_pbde = 1; else phba->cfg_enable_pbde = 0; /* * To support Suppress Response feature we must satisfy 3 conditions. * lpfc_suppress_rsp module parameter must be set (default). * In SLI4-Parameters Descriptor: * Extended Inline Buffers (XIB) must be supported. * Suppress Response IU Not Supported (SRIUNS) must NOT be supported * (double negative). */ if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && !(bf_get(cfg_nosr, mbx_sli4_parameters))) phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; else phba->cfg_suppress_rsp = 0; if (bf_get(cfg_eqdr, mbx_sli4_parameters)) phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; /* Make sure that sge_supp_len can be handled by the driver */ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "6400 Can't set dma maximum segment size\n"); return rc; } /* * Check whether the adapter supports an embedded copy of the * FCP CMD IU within the WQE for FCP_Ixxx commands. In order * to use this option, 128-byte WQEs must be used. */ if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) phba->fcp_embed_io = 1; else phba->fcp_embed_io = 0; lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", bf_get(cfg_xib, mbx_sli4_parameters), phba->cfg_enable_pbde, phba->fcp_embed_io, sli4_params->nvme, phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_2) && (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_FAMILY_LNCR_A0)) exp_wqcq_pages = false; if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && exp_wqcq_pages && (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) phba->enab_exp_wqcq_pages = 1; else phba->enab_exp_wqcq_pages = 0; /* * Check if the SLI port supports MDS Diagnostics */ if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) phba->mds_diags_support = 1; else phba->mds_diags_support = 0; /* * Check if the SLI port supports NSLER */ if (bf_get(cfg_nsler, mbx_sli4_parameters)) phba->nsler = 1; else phba->nsler = 0; return 0; } /** * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier * * This routine is to be called to attach a device with SLI-3 interface spec * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific * information of the device and driver to see if the driver state that it can * support this kind of device. If the match is successful, the driver core * invokes this routine. If this routine determines it can claim the HBA, it * does all the initialization that it needs to do to handle the HBA properly. * * Return code * 0 - driver can claim the device * negative value - driver can not claim the device **/ static int lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; struct lpfc_vport *vport = NULL; struct Scsi_Host *shost = NULL; int error; uint32_t cfg_mode, intr_mode; /* Allocate memory for HBA structure */ phba = lpfc_hba_alloc(pdev); if (!phba) return -ENOMEM; /* Perform generic PCI device enabling operation */ error = lpfc_enable_pci_dev(phba); if (error) goto out_free_phba; /* Set up SLI API function jump table for PCI-device group-0 HBAs */ error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); if (error) goto out_disable_pci_dev; /* Set up SLI-3 specific device PCI memory space */ error = lpfc_sli_pci_mem_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1402 Failed to set up pci memory space.\n"); goto out_disable_pci_dev; } /* Set up SLI-3 specific device driver resources */ error = lpfc_sli_driver_resource_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1404 Failed to set up driver resource.\n"); goto out_unset_pci_mem_s3; } /* Initialize and populate the iocb list per host */ error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1405 Failed to initialize iocb list.\n"); goto out_unset_driver_resource_s3; } /* Set up common device driver resources */ error = lpfc_setup_driver_resource_phase2(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1406 Failed to set up driver resource.\n"); goto out_free_iocb_list; } /* Get the default values for Model Name and Description */ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); /* Create SCSI host to the physical port */ error = lpfc_create_shost(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1407 Failed to create scsi host.\n"); goto out_unset_driver_resource; } /* Configure sysfs attributes */ vport = phba->pport; error = lpfc_alloc_sysfs_attr(vport); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1476 Failed to allocate sysfs attr\n"); goto out_destroy_shost; } shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; while (true) { /* Put device to a known state before enabling interrupt */ lpfc_stop_port(phba); /* Configure and enable interrupt */ intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0431 Failed to enable interrupt.\n"); error = -ENODEV; goto out_free_sysfs_attr; } /* SLI-3 HBA setup */ if (lpfc_sli_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1477 Failed to set up hba\n"); error = -ENODEV; goto out_remove_device; } /* Wait 50ms for the interrupts of previous mailbox commands */ msleep(50); /* Check active interrupts on message signaled interrupts */ if (intr_mode == 0 || phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { /* Log the current active interrupt mode */ phba->intr_mode = intr_mode; lpfc_log_intr_mode(phba, intr_mode); break; } else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0447 Configure interrupt mode (%d) " "failed active interrupt test.\n", intr_mode); /* Disable the current interrupt mode */ lpfc_sli_disable_intr(phba); /* Try next level of interrupt mode */ cfg_mode = --intr_mode; } } /* Perform post initialization setup */ lpfc_post_init_setup(phba); /* Check if there are static vports to be created. */ lpfc_create_static_vport(phba); return 0; out_remove_device: lpfc_unset_hba(phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(vport); out_destroy_shost: lpfc_destroy_shost(phba); out_unset_driver_resource: lpfc_unset_driver_resource_phase2(phba); out_free_iocb_list: lpfc_free_iocb_list(phba); out_unset_driver_resource_s3: lpfc_sli_driver_resource_unset(phba); out_unset_pci_mem_s3: lpfc_sli_pci_mem_unset(phba); out_disable_pci_dev: lpfc_disable_pci_dev(phba); if (shost) scsi_host_put(shost); out_free_phba: lpfc_hba_free(phba); return error; } /** * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. * @pdev: pointer to PCI device * * This routine is to be called to disattach a device with SLI-3 interface * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is * removed from PCI bus, it performs all the necessary cleanup for the HBA * device to be removed from the PCI subsystem properly. **/ static void lpfc_pci_remove_one_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_vport **vports; struct lpfc_hba *phba = vport->phba; int i; spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); lpfc_free_sysfs_attr(vport); /* Release all the vports against this physical port */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if (vports[i]->port_type == LPFC_PHYSICAL_PORT) continue; fc_vport_terminate(vports[i]->fc_vport); } lpfc_destroy_vport_work_array(phba, vports); /* Remove FC host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); /* Clean up all nodes, mailboxes and IOs. */ lpfc_cleanup(vport); /* * Bring down the SLI Layer. This step disable all interrupts, * clears the rings, discards all mailbox commands, and resets * the HBA. */ /* HBA interrupt will be disabled after this call */ lpfc_sli_hba_down(phba); /* Stop kthread signal shall trigger work_done one more time */ kthread_stop(phba->worker_thread); /* Final cleanup of txcmplq and reset the HBA */ lpfc_sli_brdrestart(phba); kfree(phba->vpi_bmask); kfree(phba->vpi_ids); lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->port_list_lock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->port_list_lock); lpfc_debugfs_terminate(vport); /* Disable SR-IOV if enabled */ if (phba->cfg_sriov_nr_virtfn) pci_disable_sriov(pdev); /* Disable interrupt */ lpfc_sli_disable_intr(phba); scsi_host_put(shost); /* * Call scsi_free before mem_free since scsi bufs are released to their * corresponding pools here. */ lpfc_scsi_free(phba); lpfc_free_iocb_list(phba); lpfc_mem_free_all(phba); dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, phba->hbqslimp.phys); /* Free resources associated with SLI2 interface */ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p.virt, phba->slim2p.phys); /* unmap adapter SLIM and Control Registers */ iounmap(phba->ctrl_regs_memmap_p); iounmap(phba->slim_memmap_p); lpfc_hba_free(phba); pci_release_mem_regions(pdev); pci_disable_device(pdev); } /** * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt * @dev_d: pointer to device * * This routine is to be called from the kernel's PCI subsystem to support * system Power Management (PM) to device with SLI-3 interface spec. When * PM invokes this method, it quiesces the device by stopping the driver's * worker thread for the device, turning off device's interrupt and DMA, * and bring the device offline. Note that as the driver implements the * minimum PM requirements to a power-aware driver's PM support for the * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) * to the suspend() method call will be treated as SUSPEND and the driver will * fully reinitialize its device during resume() method call, the driver will * set device to PCI_D3hot state in PCI config space instead of setting it * according to the @msg provided by the PM. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int __maybe_unused lpfc_pci_suspend_one_s3(struct device *dev_d) { struct Scsi_Host *shost = dev_get_drvdata(dev_d); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0473 PCI device Power Management suspend.\n"); /* Bring down the device */ lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); kthread_stop(phba->worker_thread); /* Disable interrupt from device */ lpfc_sli_disable_intr(phba); return 0; } /** * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt * @dev_d: pointer to device * * This routine is to be called from the kernel's PCI subsystem to support * system Power Management (PM) to device with SLI-3 interface spec. When PM * invokes this method, it restores the device's PCI config space state and * fully reinitializes the device and brings it online. Note that as the * driver implements the minimum PM requirements to a power-aware driver's * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, * FREEZE) to the suspend() method call will be treated as SUSPEND and the * driver will fully reinitialize its device during resume() method call, * the device will be set to PCI_D0 directly in PCI config space before * restoring the state. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int __maybe_unused lpfc_pci_resume_one_s3(struct device *dev_d) { struct Scsi_Host *shost = dev_get_drvdata(dev_d); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; uint32_t intr_mode; int error; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0452 PCI device Power Management resume.\n"); /* Startup the kernel thread for this host adapter. */ phba->worker_thread = kthread_run(lpfc_do_work, phba, "lpfc_worker_%d", phba->brd_no); if (IS_ERR(phba->worker_thread)) { error = PTR_ERR(phba->worker_thread); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0434 PM resume failed to start worker " "thread: error=x%x.\n", error); return error; } /* Init cpu_map array */ lpfc_cpu_map_array_init(phba); /* Init hba_eq_hdl array */ lpfc_hba_eq_hdl_array_init(phba); /* Configure and enable interrupt */ intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0430 PM resume Failed to enable interrupt\n"); return -EIO; } else phba->intr_mode = intr_mode; /* Restart HBA and bring it online */ lpfc_sli_brdrestart(phba); lpfc_online(phba); /* Log the current active interrupt mode */ lpfc_log_intr_mode(phba, phba->intr_mode); return 0; } /** * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI3 device for PCI slot recover. It * aborts all the outstanding SCSI I/Os to the pci device. **/ static void lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2723 PCI channel I/O abort preparing for recovery\n"); /* * There may be errored I/Os through HBA, abort all I/Os on txcmplq * and let the SCSI mid-layer to retry them to recover. */ lpfc_sli_abort_fcp_rings(phba); } /** * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI3 device for PCI slot reset. It * disables the device interrupt and pci device, and aborts the internal FCP * pending I/Os. **/ static void lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2710 PCI channel disable preparing for reset\n"); /* Block any management I/Os to the device */ lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); /* Flush all driver's outstanding SCSI I/Os as we are to reset */ lpfc_sli_flush_io_rings(phba); /* stop all timers */ lpfc_stop_hba_timers(phba); /* Disable interrupt and pci device */ lpfc_sli_disable_intr(phba); pci_disable_device(phba->pcidev); } /** * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI3 device for PCI slot permanently * disabling. It blocks the SCSI transport layer traffic and flushes the FCP * pending I/Os. **/ static void lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2711 PCI channel permanent disable for failure\n"); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); lpfc_sli4_prep_dev_for_reset(phba); /* stop all timers */ lpfc_stop_hba_timers(phba); /* Clean up all driver's outstanding SCSI I/Os */ lpfc_sli_flush_io_rings(phba); } /** * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error * @pdev: pointer to PCI device. * @state: the current PCI connection state. * * This routine is called from the PCI subsystem for I/O error handling to * device with SLI-3 interface spec. This function is called by the PCI * subsystem after a PCI bus error affecting this device has been detected. * When this function is invoked, it will need to stop all the I/Os and * interrupt(s) to the device. Once that is done, it will return * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery * as desired. * * Return codes * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; switch (state) { case pci_channel_io_normal: /* Non-fatal error, prepare for recovery */ lpfc_sli_prep_dev_for_recover(phba); return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: /* Fatal error, prepare for slot reset */ lpfc_sli_prep_dev_for_reset(phba); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: /* Permanent failure, prepare for device down */ lpfc_sli_prep_dev_for_perm_failure(phba); return PCI_ERS_RESULT_DISCONNECT; default: /* Unknown state, prepare and request slot reset */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0472 Unknown PCI error state: x%x\n", state); lpfc_sli_prep_dev_for_reset(phba); return PCI_ERS_RESULT_NEED_RESET; } } /** * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. * @pdev: pointer to PCI device. * * This routine is called from the PCI subsystem for error handling to * device with SLI-3 interface spec. This is called after PCI bus has been * reset to restart the PCI card from scratch, as if from a cold-boot. * During the PCI subsystem error recovery, after driver returns * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error * recovery and then call this routine before calling the .resume method * to recover the device. This function will initialize the HBA device, * enable the interrupt, but it will just put the HBA to offline state * without passing any I/O traffic. * * Return codes * PCI_ERS_RESULT_RECOVERED - the device has been recovered * PCI_ERS_RESULT_DISCONNECT - device could not be recovered */ static pci_ers_result_t lpfc_io_slot_reset_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; struct lpfc_sli *psli = &phba->sli; uint32_t intr_mode; dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); if (pci_enable_device_mem(pdev)) { printk(KERN_ERR "lpfc: Cannot re-enable " "PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_restore_state(pdev); /* * As the new kernel behavior of pci_restore_state() API call clears * device saved_state flag, need to save the restored state again. */ pci_save_state(pdev); if (pdev->is_busmaster) pci_set_master(pdev); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* Configure and enable interrupt */ intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0427 Cannot re-enable interrupt after " "slot reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } else phba->intr_mode = intr_mode; /* Take device offline, it will perform cleanup */ lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); lpfc_sli_brdrestart(phba); /* Log the current active interrupt mode */ lpfc_log_intr_mode(phba, phba->intr_mode); return PCI_ERS_RESULT_RECOVERED; } /** * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. * @pdev: pointer to PCI device * * This routine is called from the PCI subsystem for error handling to device * with SLI-3 interface spec. It is called when kernel error recovery tells * the lpfc driver that it is ok to resume normal PCI operation after PCI bus * error recovery. After this call, traffic can start to flow from this device * again. */ static void lpfc_io_resume_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; /* Bring device online, it will be no-op for non-fatal error resume */ lpfc_online(phba); } /** * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve * @phba: pointer to lpfc hba data structure. * * returns the number of ELS/CT IOCBs to reserve **/ int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) { int max_xri = phba->sli4_hba.max_cfg_param.max_xri; if (phba->sli_rev == LPFC_SLI_REV4) { if (max_xri <= 100) return 10; else if (max_xri <= 256) return 25; else if (max_xri <= 512) return 50; else if (max_xri <= 1024) return 100; else if (max_xri <= 1536) return 150; else if (max_xri <= 2048) return 200; else return 250; } else return 0; } /** * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve * @phba: pointer to lpfc hba data structure. * * returns the number of ELS/CT + NVMET IOCBs to reserve **/ int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) { int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); if (phba->nvmet_support) max_xri += LPFC_NVMET_BUF_POST; return max_xri; } static int lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, const struct firmware *fw) { int rc; u8 sli_family; sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); /* Three cases: (1) FW was not supported on the detected adapter. * (2) FW update has been locked out administratively. * (3) Some other error during FW update. * In each case, an unmaskable message is written to the console * for admin diagnosis. */ if (offset == ADD_STATUS_FW_NOT_SUPPORTED || (sli_family == LPFC_SLI_INTF_FAMILY_G6 && magic_number != MAGIC_NUMBER_G6) || (sli_family == LPFC_SLI_INTF_FAMILY_G7 && magic_number != MAGIC_NUMBER_G7) || (sli_family == LPFC_SLI_INTF_FAMILY_G7P && magic_number != MAGIC_NUMBER_G7P)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3030 This firmware version is not supported on" " this HBA model. Device:%x Magic:%x Type:%x " "ID:%x Size %d %zd\n", phba->pcidev->device, magic_number, ftype, fid, fsize, fw->size); rc = -EINVAL; } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3021 Firmware downloads have been prohibited " "by a system configuration setting on " "Device:%x Magic:%x Type:%x ID:%x Size %d " "%zd\n", phba->pcidev->device, magic_number, ftype, fid, fsize, fw->size); rc = -EACCES; } else { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3022 FW Download failed. Add Status x%x " "Device:%x Magic:%x Type:%x ID:%x Size %d " "%zd\n", offset, phba->pcidev->device, magic_number, ftype, fid, fsize, fw->size); rc = -EIO; } return rc; } /** * lpfc_write_firmware - attempt to write a firmware image to the port * @fw: pointer to firmware image returned from request_firmware. * @context: pointer to firmware image returned from request_firmware. * **/ static void lpfc_write_firmware(const struct firmware *fw, void *context) { struct lpfc_hba *phba = (struct lpfc_hba *)context; char fwrev[FW_REV_STR_SIZE]; struct lpfc_grp_hdr *image; struct list_head dma_buffer_list; int i, rc = 0; struct lpfc_dmabuf *dmabuf, *next; uint32_t offset = 0, temp_offset = 0; uint32_t magic_number, ftype, fid, fsize; /* It can be null in no-wait mode, sanity check */ if (!fw) { rc = -ENXIO; goto out; } image = (struct lpfc_grp_hdr *)fw->data; magic_number = be32_to_cpu(image->magic_number); ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); fid = bf_get_be32(lpfc_grp_hdr_id, image); fsize = be32_to_cpu(image->size); INIT_LIST_HEAD(&dma_buffer_list); lpfc_decode_firmware_rev(phba, fwrev, 1); if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, "3023 Updating Firmware, Current Version:%s " "New Version:%s\n", fwrev, image->revision); for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) { rc = -ENOMEM; goto release_out; } dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); rc = -ENOMEM; goto release_out; } list_add_tail(&dmabuf->list, &dma_buffer_list); } while (offset < fw->size) { temp_offset = offset; list_for_each_entry(dmabuf, &dma_buffer_list, list) { if (temp_offset + SLI4_PAGE_SIZE > fw->size) { memcpy(dmabuf->virt, fw->data + temp_offset, fw->size - temp_offset); temp_offset = fw->size; break; } memcpy(dmabuf->virt, fw->data + temp_offset, SLI4_PAGE_SIZE); temp_offset += SLI4_PAGE_SIZE; } rc = lpfc_wr_object(phba, &dma_buffer_list, (fw->size - offset), &offset); if (rc) { rc = lpfc_log_write_firmware_error(phba, offset, magic_number, ftype, fid, fsize, fw); goto release_out; } } rc = offset; } else lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, "3029 Skipped Firmware update, Current " "Version:%s New Version:%s\n", fwrev, image->revision); release_out: list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { list_del(&dmabuf->list); dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, dmabuf->virt, dmabuf->phys); kfree(dmabuf); } release_firmware(fw); out: if (rc < 0) lpfc_log_msg(phba, KERN_ERR, LOG_INIT | LOG_SLI, "3062 Firmware update error, status %d.\n", rc); else lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, "3024 Firmware update success: size %d.\n", rc); } /** * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade * @phba: pointer to lpfc hba data structure. * @fw_upgrade: which firmware to update. * * This routine is called to perform Linux generic firmware upgrade on device * that supports such feature. **/ int lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) { uint8_t file_name[ELX_MODEL_NAME_SIZE]; int ret; const struct firmware *fw; /* Only supported on SLI4 interface type 2 for now */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2) return -EPERM; snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); if (fw_upgrade == INT_FW_UPGRADE) { ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, file_name, &phba->pcidev->dev, GFP_KERNEL, (void *)phba, lpfc_write_firmware); } else if (fw_upgrade == RUN_FW_UPGRADE) { ret = request_firmware(&fw, file_name, &phba->pcidev->dev); if (!ret) lpfc_write_firmware(fw, (void *)phba); } else { ret = -EINVAL; } return ret; } /** * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier * * This routine is called from the kernel's PCI subsystem to device with * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific * information of the device and driver to see if the driver state that it * can support this kind of device. If the match is successful, the driver * core invokes this routine. If this routine determines it can claim the HBA, * it does all the initialization that it needs to do to handle the HBA * properly. * * Return code * 0 - driver can claim the device * negative value - driver can not claim the device **/ static int lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; struct lpfc_vport *vport = NULL; struct Scsi_Host *shost = NULL; int error; uint32_t cfg_mode, intr_mode; /* Allocate memory for HBA structure */ phba = lpfc_hba_alloc(pdev); if (!phba) return -ENOMEM; INIT_LIST_HEAD(&phba->poll_list); /* Perform generic PCI device enabling operation */ error = lpfc_enable_pci_dev(phba); if (error) goto out_free_phba; /* Set up SLI API function jump table for PCI-device group-1 HBAs */ error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); if (error) goto out_disable_pci_dev; /* Set up SLI-4 specific device PCI memory space */ error = lpfc_sli4_pci_mem_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1410 Failed to set up pci memory space.\n"); goto out_disable_pci_dev; } /* Set up SLI-4 Specific device driver resources */ error = lpfc_sli4_driver_resource_setup(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1412 Failed to set up driver resource.\n"); goto out_unset_pci_mem_s4; } INIT_LIST_HEAD(&phba->active_rrq_list); INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); /* Set up common device driver resources */ error = lpfc_setup_driver_resource_phase2(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1414 Failed to set up driver resource.\n"); goto out_unset_driver_resource_s4; } /* Get the default values for Model Name and Description */ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; /* Put device to a known state before enabling interrupt */ phba->pport = NULL; lpfc_stop_port(phba); /* Init cpu_map array */ lpfc_cpu_map_array_init(phba); /* Init hba_eq_hdl array */ lpfc_hba_eq_hdl_array_init(phba); /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0426 Failed to enable interrupt.\n"); error = -ENODEV; goto out_unset_driver_resource; } /* Default to single EQ for non-MSI-X */ if (phba->intr_type != MSIX) { phba->cfg_irq_chann = 1; if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { if (phba->nvmet_support) phba->cfg_nvmet_mrq = 1; } } lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); /* Create SCSI host to the physical port */ error = lpfc_create_shost(phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1415 Failed to create scsi host.\n"); goto out_disable_intr; } vport = phba->pport; shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ /* Configure sysfs attributes */ error = lpfc_alloc_sysfs_attr(vport); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1416 Failed to allocate sysfs attr\n"); goto out_destroy_shost; } /* Set up SLI-4 HBA */ if (lpfc_sli4_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1421 Failed to set up hba\n"); error = -ENODEV; goto out_free_sysfs_attr; } /* Log the current active interrupt mode */ phba->intr_mode = intr_mode; lpfc_log_intr_mode(phba, intr_mode); /* Perform post initialization setup */ lpfc_post_init_setup(phba); /* NVME support in FW earlier in the driver load corrects the * FC4 type making a check for nvme_support unnecessary. */ if (phba->nvmet_support == 0) { if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { /* Create NVME binding with nvme_fc_transport. This * ensures the vport is initialized. If the localport * create fails, it should not unload the driver to * support field issues. */ error = lpfc_nvme_create_localport(vport); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6004 NVME registration " "failed, error x%x\n", error); } } } /* check for firmware upgrade or downgrade */ if (phba->cfg_request_firmware_upgrade) lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); /* Check if there are static vports to be created. */ lpfc_create_static_vport(phba); timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); return 0; out_free_sysfs_attr: lpfc_free_sysfs_attr(vport); out_destroy_shost: lpfc_destroy_shost(phba); out_disable_intr: lpfc_sli4_disable_intr(phba); out_unset_driver_resource: lpfc_unset_driver_resource_phase2(phba); out_unset_driver_resource_s4: lpfc_sli4_driver_resource_unset(phba); out_unset_pci_mem_s4: lpfc_sli4_pci_mem_unset(phba); out_disable_pci_dev: lpfc_disable_pci_dev(phba); if (shost) scsi_host_put(shost); out_free_phba: lpfc_hba_free(phba); return error; } /** * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem * @pdev: pointer to PCI device * * This routine is called from the kernel's PCI subsystem to device with * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is * removed from PCI bus, it performs all the necessary cleanup for the HBA * device to be removed from the PCI subsystem properly. **/ static void lpfc_pci_remove_one_s4(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_vport **vports; struct lpfc_hba *phba = vport->phba; int i; /* Mark the device unloading flag */ spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); if (phba->cgn_i) lpfc_unreg_congestion_buf(phba); lpfc_free_sysfs_attr(vport); /* Release all the vports against this physical port */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if (vports[i]->port_type == LPFC_PHYSICAL_PORT) continue; fc_vport_terminate(vports[i]->fc_vport); } lpfc_destroy_vport_work_array(phba, vports); /* Remove FC host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); /* Perform ndlp cleanup on the physical port. The nvme and nvmet * localports are destroyed after to cleanup all transport memory. */ lpfc_cleanup(vport); lpfc_nvmet_destroy_targetport(phba); lpfc_nvme_destroy_localport(vport); /* De-allocate multi-XRI pools */ if (phba->cfg_xri_rebalancing) lpfc_destroy_multixri_pools(phba); /* * Bring down the SLI Layer. This step disables all interrupts, * clears the rings, discards all mailbox commands, and resets * the HBA FCoE function. */ lpfc_debugfs_terminate(vport); lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->port_list_lock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->port_list_lock); /* Perform scsi free before driver resource_unset since scsi * buffers are released to their corresponding pools here. */ lpfc_io_free(phba); lpfc_free_iocb_list(phba); lpfc_sli4_hba_unset(phba); lpfc_unset_driver_resource_phase2(phba); lpfc_sli4_driver_resource_unset(phba); /* Unmap adapter Control and Doorbell registers */ lpfc_sli4_pci_mem_unset(phba); /* Release PCI resources and disable device's PCI function */ scsi_host_put(shost); lpfc_disable_pci_dev(phba); /* Finally, free the driver's device data structure */ lpfc_hba_free(phba); return; } /** * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt * @dev_d: pointer to device * * This routine is called from the kernel's PCI subsystem to support system * Power Management (PM) to device with SLI-4 interface spec. When PM invokes * this method, it quiesces the device by stopping the driver's worker * thread for the device, turning off device's interrupt and DMA, and bring * the device offline. Note that as the driver implements the minimum PM * requirements to a power-aware driver's PM support for suspend/resume -- all * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() * method call will be treated as SUSPEND and the driver will fully * reinitialize its device during resume() method call, the driver will set * device to PCI_D3hot state in PCI config space instead of setting it * according to the @msg provided by the PM. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int __maybe_unused lpfc_pci_suspend_one_s4(struct device *dev_d) { struct Scsi_Host *shost = dev_get_drvdata(dev_d); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2843 PCI device Power Management suspend.\n"); /* Bring down the device */ lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); kthread_stop(phba->worker_thread); /* Disable interrupt from device */ lpfc_sli4_disable_intr(phba); lpfc_sli4_queue_destroy(phba); return 0; } /** * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt * @dev_d: pointer to device * * This routine is called from the kernel's PCI subsystem to support system * Power Management (PM) to device with SLI-4 interface spac. When PM invokes * this method, it restores the device's PCI config space state and fully * reinitializes the device and brings it online. Note that as the driver * implements the minimum PM requirements to a power-aware driver's PM for * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) * to the suspend() method call will be treated as SUSPEND and the driver * will fully reinitialize its device during resume() method call, the device * will be set to PCI_D0 directly in PCI config space before restoring the * state. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int __maybe_unused lpfc_pci_resume_one_s4(struct device *dev_d) { struct Scsi_Host *shost = dev_get_drvdata(dev_d); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; uint32_t intr_mode; int error; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0292 PCI device Power Management resume.\n"); /* Startup the kernel thread for this host adapter. */ phba->worker_thread = kthread_run(lpfc_do_work, phba, "lpfc_worker_%d", phba->brd_no); if (IS_ERR(phba->worker_thread)) { error = PTR_ERR(phba->worker_thread); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0293 PM resume failed to start worker " "thread: error=x%x.\n", error); return error; } /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0294 PM resume Failed to enable interrupt\n"); return -EIO; } else phba->intr_mode = intr_mode; /* Restart HBA and bring it online */ lpfc_sli_brdrestart(phba); lpfc_online(phba); /* Log the current active interrupt mode */ lpfc_log_intr_mode(phba, phba->intr_mode); return 0; } /** * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI4 device for PCI slot recover. It * aborts all the outstanding SCSI I/Os to the pci device. **/ static void lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2828 PCI channel I/O abort preparing for recovery\n"); /* * There may be errored I/Os through HBA, abort all I/Os on txcmplq * and let the SCSI mid-layer to retry them to recover. */ lpfc_sli_abort_fcp_rings(phba); } /** * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI4 device for PCI slot reset. It * disables the device interrupt and pci device, and aborts the internal FCP * pending I/Os. **/ static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) { int offline = pci_channel_offline(phba->pcidev); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2826 PCI channel disable preparing for reset offline" " %d\n", offline); /* Block any management I/Os to the device */ lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); /* HBA_PCI_ERR was set in io_error_detect */ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); /* Flush all driver's outstanding I/Os as we are to reset */ lpfc_sli_flush_io_rings(phba); lpfc_offline(phba); /* stop all timers */ lpfc_stop_hba_timers(phba); lpfc_sli4_queue_destroy(phba); /* Disable interrupt and pci device */ lpfc_sli4_disable_intr(phba); pci_disable_device(phba->pcidev); } /** * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI4 device for PCI slot permanently * disabling. It blocks the SCSI transport layer traffic and flushes the FCP * pending I/Os. **/ static void lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2827 PCI channel permanent disable for failure\n"); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); /* stop all timers */ lpfc_stop_hba_timers(phba); /* Clean up all driver's outstanding I/Os */ lpfc_sli_flush_io_rings(phba); } /** * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device * @pdev: pointer to PCI device. * @state: the current PCI connection state. * * This routine is called from the PCI subsystem for error handling to device * with SLI-4 interface spec. This function is called by the PCI subsystem * after a PCI bus error affecting this device has been detected. When this * function is invoked, it will need to stop all the I/Os and interrupt(s) * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET * for the PCI subsystem to perform proper recovery as desired. * * Return codes * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; bool hba_pci_err; switch (state) { case pci_channel_io_normal: /* Non-fatal error, prepare for recovery */ lpfc_sli4_prep_dev_for_recover(phba); return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); /* Fatal error, prepare for slot reset */ if (!hba_pci_err) lpfc_sli4_prep_dev_for_reset(phba); else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2832 Already handling PCI error " "state: x%x\n", state); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: set_bit(HBA_PCI_ERR, &phba->bit_flags); /* Permanent failure, prepare for device down */ lpfc_sli4_prep_dev_for_perm_failure(phba); return PCI_ERS_RESULT_DISCONNECT; default: hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); if (!hba_pci_err) lpfc_sli4_prep_dev_for_reset(phba); /* Unknown state, prepare and request slot reset */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2825 Unknown PCI error state: x%x\n", state); lpfc_sli4_prep_dev_for_reset(phba); return PCI_ERS_RESULT_NEED_RESET; } } /** * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch * @pdev: pointer to PCI device. * * This routine is called from the PCI subsystem for error handling to device * with SLI-4 interface spec. It is called after PCI bus has been reset to * restart the PCI card from scratch, as if from a cold-boot. During the * PCI subsystem error recovery, after the driver returns * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error * recovery and then call this routine before calling the .resume method to * recover the device. This function will initialize the HBA device, enable * the interrupt, but it will just put the HBA to offline state without * passing any I/O traffic. * * Return codes * PCI_ERS_RESULT_RECOVERED - the device has been recovered * PCI_ERS_RESULT_DISCONNECT - device could not be recovered */ static pci_ers_result_t lpfc_io_slot_reset_s4(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; struct lpfc_sli *psli = &phba->sli; uint32_t intr_mode; bool hba_pci_err; dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); if (pci_enable_device_mem(pdev)) { printk(KERN_ERR "lpfc: Cannot re-enable " "PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_restore_state(pdev); hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags); if (!hba_pci_err) dev_info(&pdev->dev, "hba_pci_err was not set, recovering slot reset.\n"); /* * As the new kernel behavior of pci_restore_state() API call clears * device saved_state flag, need to save the restored state again. */ pci_save_state(pdev); if (pdev->is_busmaster) pci_set_master(pdev); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* Init cpu_map array */ lpfc_cpu_map_array_init(phba); /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2824 Cannot re-enable interrupt after " "slot reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } else phba->intr_mode = intr_mode; lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); /* Log the current active interrupt mode */ lpfc_log_intr_mode(phba, phba->intr_mode); return PCI_ERS_RESULT_RECOVERED; } /** * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device * @pdev: pointer to PCI device * * This routine is called from the PCI subsystem for error handling to device * with SLI-4 interface spec. It is called when kernel error recovery tells * the lpfc driver that it is ok to resume normal PCI operation after PCI bus * error recovery. After this call, traffic can start to flow from this device * again. **/ static void lpfc_io_resume_s4(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; /* * In case of slot reset, as function reset is performed through * mailbox command which needs DMA to be enabled, this operation * has to be moved to the io resume phase. Taking device offline * will perform the necessary cleanup. */ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { /* Perform device reset */ lpfc_sli_brdrestart(phba); /* Bring the device back online */ lpfc_online(phba); } } /** * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier * * This routine is to be registered to the kernel's PCI subsystem. When an * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks * at PCI device-specific information of the device and driver to see if the * driver state that it can support this kind of device. If the match is * successful, the driver core invokes this routine. This routine dispatches * the action to the proper SLI-3 or SLI-4 device probing routine, which will * do all the initialization that it needs to do to handle the HBA device * properly. * * Return code * 0 - driver can claim the device * negative value - driver can not claim the device **/ static int lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) { int rc; struct lpfc_sli_intf intf; if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) return -ENODEV; if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) rc = lpfc_pci_probe_one_s4(pdev, pid); else rc = lpfc_pci_probe_one_s3(pdev, pid); return rc; } /** * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem * @pdev: pointer to PCI device * * This routine is to be registered to the kernel's PCI subsystem. When an * Emulex HBA is removed from PCI bus, the driver core invokes this routine. * This routine dispatches the action to the proper SLI-3 or SLI-4 device * remove routine, which will perform all the necessary cleanup for the * device to be removed from the PCI subsystem properly. **/ static void lpfc_pci_remove_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: lpfc_pci_remove_one_s3(pdev); break; case LPFC_PCI_DEV_OC: lpfc_pci_remove_one_s4(pdev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1424 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return; } /** * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management * @dev: pointer to device * * This routine is to be registered to the kernel's PCI subsystem to support * system Power Management (PM). When PM invokes this method, it dispatches * the action to the proper SLI-3 or SLI-4 device suspend routine, which will * suspend the device. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int __maybe_unused lpfc_pci_suspend_one(struct device *dev) { struct Scsi_Host *shost = dev_get_drvdata(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; int rc = -ENODEV; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: rc = lpfc_pci_suspend_one_s3(dev); break; case LPFC_PCI_DEV_OC: rc = lpfc_pci_suspend_one_s4(dev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1425 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return rc; } /** * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management * @dev: pointer to device * * This routine is to be registered to the kernel's PCI subsystem to support * system Power Management (PM). When PM invokes this method, it dispatches * the action to the proper SLI-3 or SLI-4 device resume routine, which will * resume the device. * * Return code * 0 - driver suspended the device * Error otherwise **/ static int __maybe_unused lpfc_pci_resume_one(struct device *dev) { struct Scsi_Host *shost = dev_get_drvdata(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; int rc = -ENODEV; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: rc = lpfc_pci_resume_one_s3(dev); break; case LPFC_PCI_DEV_OC: rc = lpfc_pci_resume_one_s4(dev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1426 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return rc; } /** * lpfc_io_error_detected - lpfc method for handling PCI I/O error * @pdev: pointer to PCI device. * @state: the current PCI connection state. * * This routine is registered to the PCI subsystem for error handling. This * function is called by the PCI subsystem after a PCI bus error affecting * this device has been detected. When this routine is invoked, it dispatches * the action to the proper SLI-3 or SLI-4 device error detected handling * routine, which will perform the proper error detected operation. * * Return codes * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; if (phba->link_state == LPFC_HBA_ERROR && phba->hba_flag & HBA_IOQ_FLUSH) return PCI_ERS_RESULT_NEED_RESET; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: rc = lpfc_io_error_detected_s3(pdev, state); break; case LPFC_PCI_DEV_OC: rc = lpfc_io_error_detected_s4(pdev, state); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1427 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return rc; } /** * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch * @pdev: pointer to PCI device. * * This routine is registered to the PCI subsystem for error handling. This * function is called after PCI bus has been reset to restart the PCI card * from scratch, as if from a cold-boot. When this routine is invoked, it * dispatches the action to the proper SLI-3 or SLI-4 device reset handling * routine, which will perform the proper device reset. * * Return codes * PCI_ERS_RESULT_RECOVERED - the device has been recovered * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: rc = lpfc_io_slot_reset_s3(pdev); break; case LPFC_PCI_DEV_OC: rc = lpfc_io_slot_reset_s4(pdev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1428 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return rc; } /** * lpfc_io_resume - lpfc method for resuming PCI I/O operation * @pdev: pointer to PCI device * * This routine is registered to the PCI subsystem for error handling. It * is called when kernel error recovery tells the lpfc driver that it is * OK to resume normal PCI operation after PCI bus error recovery. When * this routine is invoked, it dispatches the action to the proper SLI-3 * or SLI-4 device io_resume routine, which will resume the device operation. **/ static void lpfc_io_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: lpfc_io_resume_s3(pdev); break; case LPFC_PCI_DEV_OC: lpfc_io_resume_s4(pdev); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1429 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; } return; } /** * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter * @phba: pointer to lpfc hba data structure. * * This routine checks to see if OAS is supported for this adapter. If * supported, the configure Flash Optimized Fabric flag is set. Otherwise, * the enable oas flag is cleared and the pool created for OAS device data * is destroyed. * **/ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba) { if (!phba->cfg_EnableXLane) return; if (phba->sli4_hba.pc_sli4_params.oas_supported) { phba->cfg_fof = 1; } else { phba->cfg_fof = 0; mempool_destroy(phba->device_data_mem_pool); phba->device_data_mem_pool = NULL; } return; } /** * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter * @phba: pointer to lpfc hba data structure. * * This routine checks to see if RAS is supported by the adapter. Check the * function through which RAS support enablement is to be done. **/ void lpfc_sli4_ras_init(struct lpfc_hba *phba) { /* if ASIC_GEN_NUM >= 0xC) */ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_6) || (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_FAMILY_G6)) { phba->ras_fwlog.ras_hwsupport = true; if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && phba->cfg_ras_fwlog_buffsize) phba->ras_fwlog.ras_enabled = true; else phba->ras_fwlog.ras_enabled = false; } else { phba->ras_fwlog.ras_hwsupport = false; } } MODULE_DEVICE_TABLE(pci, lpfc_id_table); static const struct pci_error_handlers lpfc_err_handler = { .error_detected = lpfc_io_error_detected, .slot_reset = lpfc_io_slot_reset, .resume = lpfc_io_resume, }; static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one, lpfc_pci_suspend_one, lpfc_pci_resume_one); static struct pci_driver lpfc_driver = { .name = LPFC_DRIVER_NAME, .id_table = lpfc_id_table, .probe = lpfc_pci_probe_one, .remove = lpfc_pci_remove_one, .shutdown = lpfc_pci_remove_one, .driver.pm = &lpfc_pci_pm_ops_one, .err_handler = &lpfc_err_handler, }; static const struct file_operations lpfc_mgmt_fop = { .owner = THIS_MODULE, }; static struct miscdevice lpfc_mgmt_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "lpfcmgmt", .fops = &lpfc_mgmt_fop, }; /** * lpfc_init - lpfc module initialization routine * * This routine is to be invoked when the lpfc module is loaded into the * kernel. The special kernel macro module_init() is used to indicate the * role of this routine to the kernel as lpfc module entry point. * * Return codes * 0 - successful * -ENOMEM - FC attach transport failed * all others - failed */ static int __init lpfc_init(void) { int error = 0; pr_info(LPFC_MODULE_DESC "\n"); pr_info(LPFC_COPYRIGHT "\n"); error = misc_register(&lpfc_mgmt_dev); if (error) printk(KERN_ERR "Could not register lpfcmgmt device, " "misc_register returned with status %d", error); error = -ENOMEM; lpfc_transport_functions.vport_create = lpfc_vport_create; lpfc_transport_functions.vport_delete = lpfc_vport_delete; lpfc_transport_template = fc_attach_transport(&lpfc_transport_functions); if (lpfc_transport_template == NULL) goto unregister; lpfc_vport_transport_template = fc_attach_transport(&lpfc_vport_transport_functions); if (lpfc_vport_transport_template == NULL) { fc_release_transport(lpfc_transport_template); goto unregister; } lpfc_wqe_cmd_template(); lpfc_nvmet_cmd_template(); /* Initialize in case vector mapping is needed */ lpfc_present_cpu = num_present_cpus(); lpfc_pldv_detect = false; error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lpfc/sli4:online", lpfc_cpu_online, lpfc_cpu_offline); if (error < 0) goto cpuhp_failure; lpfc_cpuhp_state = error; error = pci_register_driver(&lpfc_driver); if (error) goto unwind; return error; unwind: cpuhp_remove_multi_state(lpfc_cpuhp_state); cpuhp_failure: fc_release_transport(lpfc_transport_template); fc_release_transport(lpfc_vport_transport_template); unregister: misc_deregister(&lpfc_mgmt_dev); return error; } void lpfc_dmp_dbg(struct lpfc_hba *phba) { unsigned int start_idx; unsigned int dbg_cnt; unsigned int temp_idx; int i; int j = 0; unsigned long rem_nsec; if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) return; start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); if (!dbg_cnt) goto out; temp_idx = start_idx; if (dbg_cnt >= DBG_LOG_SZ) { dbg_cnt = DBG_LOG_SZ; temp_idx -= 1; } else { if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; } else { if (start_idx < dbg_cnt) start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); else start_idx -= dbg_cnt; } } dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", start_idx, temp_idx, dbg_cnt); for (i = 0; i < dbg_cnt; i++) { if ((start_idx + i) < DBG_LOG_SZ) temp_idx = (start_idx + i) % DBG_LOG_SZ; else temp_idx = j++; rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", temp_idx, (unsigned long)phba->dbg_log[temp_idx].t_ns, rem_nsec / 1000, phba->dbg_log[temp_idx].log); } out: atomic_set(&phba->dbg_log_cnt, 0); atomic_set(&phba->dbg_log_dmping, 0); } __printf(2, 3) void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) { unsigned int idx; va_list args; int dbg_dmping = atomic_read(&phba->dbg_log_dmping); struct va_format vaf; va_start(args, fmt); if (unlikely(dbg_dmping)) { vaf.fmt = fmt; vaf.va = &args; dev_info(&phba->pcidev->dev, "%pV", &vaf); va_end(args); return; } idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % DBG_LOG_SZ; atomic_inc(&phba->dbg_log_cnt); vscnprintf(phba->dbg_log[idx].log, sizeof(phba->dbg_log[idx].log), fmt, args); va_end(args); phba->dbg_log[idx].t_ns = local_clock(); } /** * lpfc_exit - lpfc module removal routine * * This routine is invoked when the lpfc module is removed from the kernel. * The special kernel macro module_exit() is used to indicate the role of * this routine to the kernel as lpfc module exit point. */ static void __exit lpfc_exit(void) { misc_deregister(&lpfc_mgmt_dev); pci_unregister_driver(&lpfc_driver); cpuhp_remove_multi_state(lpfc_cpuhp_state); fc_release_transport(lpfc_transport_template); fc_release_transport(lpfc_vport_transport_template); idr_destroy(&lpfc_hba_index); } module_init(lpfc_init); module_exit(lpfc_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(LPFC_MODULE_DESC); MODULE_AUTHOR("Broadcom"); MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
linux-master
drivers/scsi/lpfc/lpfc_init.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_compat.h" /** * lpfc_mbox_rsrc_prep - Prepare a mailbox with DMA buffer memory. * @phba: pointer to lpfc hba data structure. * @mbox: pointer to the driver internal queue element for mailbox command. * * A mailbox command consists of the pool memory for the command, @mbox, and * one or more DMA buffers for the data transfer. This routine provides * a standard framework for allocating the dma buffer and assigning to the * @mbox. Callers should cleanup the mbox with a call to * lpfc_mbox_rsrc_cleanup. * * The lpfc_mbuf_alloc routine acquires the hbalock so the caller is * responsible to ensure the hbalock is released. Also note that the * driver design is a single dmabuf/mbuf per mbox in the ctx_buf. * **/ int lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { struct lpfc_dmabuf *mp; mp = kmalloc(sizeof(*mp), GFP_KERNEL); if (!mp) return -ENOMEM; mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp->virt) { kfree(mp); return -ENOMEM; } memset(mp->virt, 0, LPFC_BPL_SIZE); /* Initialization only. Driver does not use a list of dmabufs. */ INIT_LIST_HEAD(&mp->list); mbox->ctx_buf = mp; return 0; } /** * lpfc_mbox_rsrc_cleanup - Free the mailbox DMA buffer and virtual memory. * @phba: pointer to lpfc hba data structure. * @mbox: pointer to the driver internal queue element for mailbox command. * @locked: value that indicates if the hbalock is held (1) or not (0). * * A mailbox command consists of the pool memory for the command, @mbox, and * possibly a DMA buffer for the data transfer. This routine provides * a standard framework for releasing any dma buffers and freeing all * memory resources in it as well as releasing the @mbox back to the @phba pool. * Callers should use this routine for cleanup for all mailboxes prepped with * lpfc_mbox_rsrc_prep. * **/ void lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, enum lpfc_mbox_ctx locked) { struct lpfc_dmabuf *mp; mp = (struct lpfc_dmabuf *)mbox->ctx_buf; mbox->ctx_buf = NULL; /* Release the generic BPL buffer memory. */ if (mp) { if (locked == MBOX_THD_LOCKED) __lpfc_mbuf_free(phba, mp->virt, mp->phys); else lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } mempool_free(mbox, phba->mbox_mem_pool); } /** * lpfc_dump_static_vport - Dump HBA's static vport information. * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @offset: offset for dumping vport info. * * The dump mailbox command provides a method for the device driver to obtain * various types of information from the HBA device. * * This routine prepares the mailbox command for dumping list of static * vports to be created. **/ int lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset) { MAILBOX_t *mb; struct lpfc_dmabuf *mp; int rc; mb = &pmb->u.mb; /* Setup to dump vport info region */ memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_DUMP_MEMORY; mb->un.varDmp.type = DMP_NV_PARAMS; mb->un.varDmp.entry_index = offset; mb->un.varDmp.region_id = DMP_REGION_VPORT; mb->mbxOwner = OWN_HOST; /* For SLI3 HBAs data is embedded in mailbox */ if (phba->sli_rev != LPFC_SLI_REV4) { mb->un.varDmp.cv = 1; mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t); return 0; } rc = lpfc_mbox_rsrc_prep(phba, pmb); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2605 %s: memory allocation failed\n", __func__); return 1; } mp = pmb->ctx_buf; mb->un.varWords[3] = putPaddrLow(mp->phys); mb->un.varWords[4] = putPaddrHigh(mp->phys); mb->un.varDmp.sli4_length = sizeof(struct static_vport_info); return 0; } /** * lpfc_down_link - Bring down HBAs link. * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * This routine prepares a mailbox command to bring down HBA link. **/ void lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb = &pmb->u.mb; mb->mbxCommand = MBX_DOWN_LINK; mb->mbxOwner = OWN_HOST; } /** * lpfc_dump_mem - Prepare a mailbox command for reading a region. * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @offset: offset into the region. * @region_id: config region id. * * The dump mailbox command provides a method for the device driver to obtain * various types of information from the HBA device. * * This routine prepares the mailbox command for dumping HBA's config region. **/ void lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset, uint16_t region_id) { MAILBOX_t *mb; void *ctx; mb = &pmb->u.mb; ctx = pmb->ctx_buf; /* Setup to dump VPD region */ memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_DUMP_MEMORY; mb->un.varDmp.cv = 1; mb->un.varDmp.type = DMP_NV_PARAMS; mb->un.varDmp.entry_index = offset; mb->un.varDmp.region_id = region_id; mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t)); mb->un.varDmp.co = 0; mb->un.varDmp.resp_offset = 0; pmb->ctx_buf = ctx; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_dump_wakeup_param - Prepare mailbox command for retrieving wakeup params * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * This function create a dump memory mailbox command to dump wake up * parameters. */ void lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb; void *ctx; mb = &pmb->u.mb; /* Save context so that we can restore after memset */ ctx = pmb->ctx_buf; /* Setup to dump VPD region */ memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_DUMP_MEMORY; mb->mbxOwner = OWN_HOST; mb->un.varDmp.cv = 1; mb->un.varDmp.type = DMP_NV_PARAMS; if (phba->sli_rev < LPFC_SLI_REV4) mb->un.varDmp.entry_index = 0; mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID; mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE; mb->un.varDmp.co = 0; mb->un.varDmp.resp_offset = 0; pmb->ctx_buf = ctx; return; } /** * lpfc_read_nv - Prepare a mailbox command for reading HBA's NVRAM param * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The read NVRAM mailbox command returns the HBA's non-volatile parameters * that are used as defaults when the Fibre Channel link is brought on-line. * * This routine prepares the mailbox command for reading information stored * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN. **/ void lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_NV; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_config_async - Prepare a mailbox command for enabling HBA async event * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @ring: ring number for the asynchronous event to be configured. * * The asynchronous event enable mailbox command is used to enable the * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and * specifies the default ring to which events are posted. * * This routine prepares the mailbox command for enabling HBA asynchronous * event support on a IOCB ring. **/ void lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t ring) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_ASYNCEVT_ENABLE; mb->un.varCfgAsyncEvent.ring = ring; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_heart_beat - Prepare a mailbox command for heart beat * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The heart beat mailbox command is used to detect an unresponsive HBA, which * is defined as any device where no error attention is sent and both mailbox * and rings are not processed. * * This routine prepares the mailbox command for issuing a heart beat in the * form of mailbox command to the HBA. The timely completion of the heart * beat mailbox command indicates the health of the HBA. **/ void lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_HEARTBEAT; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_read_topology - Prepare a mailbox command for reading HBA topology * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @mp: DMA buffer memory for reading the link attention information into. * * The read topology mailbox command is issued to read the link topology * information indicated by the HBA port when the Link Event bit of the Host * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link * Attention ACQE is received from the port (For SLI-4). A Link Event * Attention occurs based on an exception detected at the Fibre Channel link * interface. * * This routine prepares the mailbox command for reading HBA link topology * information. A DMA memory has been set aside and address passed to the * HBA through @mp for the HBA to DMA link attention information into the * memory as part of the execution of the mailbox command. * * Return codes * 0 - Success (currently always return 0) **/ int lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, struct lpfc_dmabuf *mp) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); INIT_LIST_HEAD(&mp->list); mb->mbxCommand = MBX_READ_TOPOLOGY; mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE; mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys); mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys); /* Save address for later completion and set the owner to host so that * the FW knows this mailbox is available for processing. */ pmb->ctx_buf = (uint8_t *)mp; mb->mbxOwner = OWN_HOST; return (0); } /** * lpfc_clear_la - Prepare a mailbox command for clearing HBA link attention * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The clear link attention mailbox command is issued to clear the link event * attention condition indicated by the Link Event bit of the Host Attention * (HSTATT) register. The link event attention condition is cleared only if * the event tag specified matches that of the current link event counter. * The current event tag is read using the read link attention event mailbox * command. * * This routine prepares the mailbox command for clearing HBA link attention * information. **/ void lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varClearLA.eventTag = phba->fc_eventTag; mb->mbxCommand = MBX_CLEAR_LA; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_config_link - Prepare a mailbox command for configuring link on a HBA * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure link mailbox command is used before the initialize link * mailbox command to override default value and to configure link-oriented * parameters such as DID address and various timers. Typically, this * command would be used after an F_Port login to set the returned DID address * and the fabric timeout values. This command is not valid before a configure * port command has configured the HBA port. * * This routine prepares the mailbox command for configuring link on a HBA. **/ void lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { struct lpfc_vport *vport = phba->pport; MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); /* NEW_FEATURE * SLI-2, Coalescing Response Feature. */ if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) { mb->un.varCfgLnk.cr = 1; mb->un.varCfgLnk.ci = 1; mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay; mb->un.varCfgLnk.cr_count = phba->cfg_cr_count; } mb->un.varCfgLnk.myId = vport->fc_myDID; mb->un.varCfgLnk.edtov = phba->fc_edtov; mb->un.varCfgLnk.arbtov = phba->fc_arbtov; mb->un.varCfgLnk.ratov = phba->fc_ratov; mb->un.varCfgLnk.rttov = phba->fc_rttov; mb->un.varCfgLnk.altov = phba->fc_altov; mb->un.varCfgLnk.crtov = phba->fc_crtov; mb->un.varCfgLnk.cscn = 0; if (phba->bbcredit_support && phba->cfg_enable_bbcr) { mb->un.varCfgLnk.cscn = 1; mb->un.varCfgLnk.bbscn = bf_get(lpfc_bbscn_def, &phba->sli4_hba.bbscn_params); } if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4)) mb->un.varCfgLnk.ack0_enable = 1; mb->mbxCommand = MBX_CONFIG_LINK; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_config_msi - Prepare a mailbox command for configuring msi-x * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure MSI-X mailbox command is used to configure the HBA's SLI-3 * MSI-X multi-message interrupt vector association to interrupt attention * conditions. * * Return codes * 0 - Success * -EINVAL - Failure **/ int lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; uint32_t attentionConditions[2]; /* Sanity check */ if (phba->cfg_use_msi != 2) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0475 Not configured for supporting MSI-X " "cfg_use_msi: 0x%x\n", phba->cfg_use_msi); return -EINVAL; } if (phba->sli_rev < 3) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0476 HBA not supporting SLI-3 or later " "SLI Revision: 0x%x\n", phba->sli_rev); return -EINVAL; } /* Clear mailbox command fields */ memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); /* * SLI-3, Message Signaled Interrupt Feature. */ /* Multi-message attention configuration */ attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT | HA_LATT | HA_MBATT); attentionConditions[1] = 0; mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0]; mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1]; /* * Set up message number to HA bit association */ #ifdef __BIG_ENDIAN_BITFIELD /* RA0 (FCP Ring) */ mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1; /* RA1 (Other Protocol Extra Ring) */ mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1; #else /* __LITTLE_ENDIAN_BITFIELD */ /* RA0 (FCP Ring) */ mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1; /* RA1 (Other Protocol Extra Ring) */ mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1; #endif /* Multi-message interrupt autoclear configuration*/ mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0]; mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1]; /* For now, HBA autoclear does not work reliably, disable it */ mb->un.varCfgMSI.autoClearHA[0] = 0; mb->un.varCfgMSI.autoClearHA[1] = 0; /* Set command and owner bit */ mb->mbxCommand = MBX_CONFIG_MSI; mb->mbxOwner = OWN_HOST; return 0; } /** * lpfc_init_link - Prepare a mailbox command for initialize link on a HBA * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @topology: the link topology for the link to be initialized to. * @linkspeed: the link speed for the link to be initialized to. * * The initialize link mailbox command is used to initialize the Fibre * Channel link. This command must follow a configure port command that * establishes the mode of operation. * * This routine prepares the mailbox command for initializing link on a HBA * with the specified link topology and speed. **/ void lpfc_init_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed) { lpfc_vpd_t *vpd; MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); switch (topology) { case FLAGS_TOPOLOGY_MODE_LOOP_PT: mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; break; case FLAGS_TOPOLOGY_MODE_PT_PT: mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; break; case FLAGS_TOPOLOGY_MODE_LOOP: mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; break; case FLAGS_TOPOLOGY_MODE_PT_LOOP: mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; break; case FLAGS_LOCAL_LB: mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB; break; } /* Topology handling for ASIC_GEN_NUM 0xC and later */ if ((phba->sli4_hba.pc_sli4_params.sli_family == LPFC_SLI_INTF_FAMILY_G6 || phba->sli4_hba.pc_sli4_params.if_type == LPFC_SLI_INTF_IF_TYPE_6) && !(phba->sli4_hba.pc_sli4_params.pls) && mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) { mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT; } /* Enable asynchronous ABTS responses from firmware */ if (phba->sli_rev == LPFC_SLI_REV3 && !phba->cfg_fcp_wait_abts_rsp) mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT; /* NEW_FEATURE * Setting up the link speed */ vpd = &phba->vpd; if (vpd->rev.feaLevelHigh >= 0x02){ switch(linkspeed){ case LPFC_USER_LINK_SPEED_1G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_1G; break; case LPFC_USER_LINK_SPEED_2G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_2G; break; case LPFC_USER_LINK_SPEED_4G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_4G; break; case LPFC_USER_LINK_SPEED_8G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_8G; break; case LPFC_USER_LINK_SPEED_10G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_10G; break; case LPFC_USER_LINK_SPEED_16G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_16G; break; case LPFC_USER_LINK_SPEED_32G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_32G; break; case LPFC_USER_LINK_SPEED_64G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_64G; break; case LPFC_USER_LINK_SPEED_AUTO: default: mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO; break; } } else mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO; mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK; mb->mbxOwner = OWN_HOST; mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA; return; } /** * lpfc_read_sparam - Prepare a mailbox command for reading HBA parameters * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @vpi: virtual N_Port identifier. * * The read service parameter mailbox command is used to read the HBA port * service parameters. The service parameters are read into the buffer * specified directly by a BDE in the mailbox command. These service * parameters may then be used to build the payload of an N_Port/F_POrt * login request and reply (LOGI/ACC). * * This routine prepares the mailbox command for reading HBA port service * parameters. The DMA memory is allocated in this function and the addresses * are populated into the mailbox command for the HBA to DMA the service * parameters into. * * Return codes * 0 - Success * 1 - DMA memory allocation failed **/ int lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) { struct lpfc_dmabuf *mp; MAILBOX_t *mb; int rc; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); /* Get a buffer to hold the HBAs Service Parameters */ rc = lpfc_mbox_rsrc_prep(phba, pmb); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "0301 READ_SPARAM: no buffers\n"); return 1; } mp = pmb->ctx_buf; mb = &pmb->u.mb; mb->mbxOwner = OWN_HOST; mb->mbxCommand = MBX_READ_SPARM64; mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); if (phba->sli_rev >= LPFC_SLI_REV3) mb->un.varRdSparm.vpi = phba->vpi_ids[vpi]; return (0); } /** * lpfc_unreg_did - Prepare a mailbox command for unregistering DID * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @did: remote port identifier. * @pmb: pointer to the driver internal queue element for mailbox command. * * The unregister DID mailbox command is used to unregister an N_Port/F_Port * login for an unknown RPI by specifying the DID of a remote port. This * command frees an RPI context in the HBA port. This has the effect of * performing an implicit N_Port/F_Port logout. * * This routine prepares the mailbox command for unregistering a remote * N_Port/F_Port (DID) login. **/ void lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregDID.did = did; mb->un.varUnregDID.vpi = vpi; if ((vpi != 0xffff) && (phba->sli_rev == LPFC_SLI_REV4)) mb->un.varUnregDID.vpi = phba->vpi_ids[vpi]; mb->mbxCommand = MBX_UNREG_D_ID; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_read_config - Prepare a mailbox command for reading HBA configuration * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The read configuration mailbox command is used to read the HBA port * configuration parameters. This mailbox command provides a method for * seeing any parameters that may have changed via various configuration * mailbox commands. * * This routine prepares the mailbox command for reading out HBA configuration * parameters. **/ void lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_CONFIG; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_read_lnk_stat - Prepare a mailbox command for reading HBA link stats * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The read link status mailbox command is used to read the link status from * the HBA. Link status includes all link-related error counters. These * counters are maintained by the HBA and originated in the link hardware * unit. Note that all of these counters wrap. * * This routine prepares the mailbox command for reading out HBA link status. **/ void lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_LNK_STAT; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_reg_rpi - Prepare a mailbox command for registering remote login * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @did: remote port identifier. * @param: pointer to memory holding the server parameters. * @pmb: pointer to the driver internal queue element for mailbox command. * @rpi: the rpi to use in the registration (usually only used for SLI4. * * The registration login mailbox command is used to register an N_Port or * F_Port login. This registration allows the HBA to cache the remote N_Port * service parameters internally and thereby make the appropriate FC-2 * decisions. The remote port service parameters are handed off by the driver * to the HBA using a descriptor entry that directly identifies a buffer in * host memory. In exchange, the HBA returns an RPI identifier. * * This routine prepares the mailbox command for registering remote port login. * The function allocates DMA buffer for passing the service parameters to the * HBA with the mailbox command. * * Return codes * 0 - Success * 1 - DMA memory allocation failed **/ int lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi) { MAILBOX_t *mb = &pmb->u.mb; uint8_t *sparam; struct lpfc_dmabuf *mp; int rc; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRegLogin.rpi = 0; if (phba->sli_rev == LPFC_SLI_REV4) mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi]; if (phba->sli_rev >= LPFC_SLI_REV3) mb->un.varRegLogin.vpi = phba->vpi_ids[vpi]; mb->un.varRegLogin.did = did; mb->mbxOwner = OWN_HOST; /* Get a buffer to hold NPorts Service Parameters */ rc = lpfc_mbox_rsrc_prep(phba, pmb); if (rc) { mb->mbxCommand = MBX_REG_LOGIN64; /* REG_LOGIN: no buffers */ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, " "rpi x%x\n", vpi, did, rpi); return 1; } /* Copy param's into a new buffer */ mp = pmb->ctx_buf; sparam = mp->virt; memcpy(sparam, param, sizeof (struct serv_parm)); /* Finish initializing the mailbox. */ mb->mbxCommand = MBX_REG_LOGIN64; mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys); mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys); return 0; } /** * lpfc_unreg_login - Prepare a mailbox command for unregistering remote login * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @rpi: remote port identifier * @pmb: pointer to the driver internal queue element for mailbox command. * * The unregistration login mailbox command is used to unregister an N_Port * or F_Port login. This command frees an RPI context in the HBA. It has the * effect of performing an implicit N_Port/F_Port logout. * * This routine prepares the mailbox command for unregistering remote port * login. * * For SLI4 ports, the rpi passed to this function must be the physical * rpi value, not the logical index. **/ void lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregLogin.rpi = rpi; mb->un.varUnregLogin.rsvd1 = 0; if (phba->sli_rev >= LPFC_SLI_REV3) mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi]; mb->mbxCommand = MBX_UNREG_LOGIN; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA. * @vport: pointer to a vport object. * * This routine sends mailbox command to unregister all active RPIs for * a vport. **/ void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { /* * For SLI4 functions, the rpi field is overloaded for * the vport context unreg all. This routine passes * 0 for the rpi field in lpfc_unreg_login for compatibility * with SLI3 and then overrides the rpi field with the * expected value for SLI4. */ lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi], mbox); mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000; mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->ctx_ndlp = NULL; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) mempool_free(mbox, phba->mbox_mem_pool); } } /** * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier * @vport: pointer to a vport object. * @pmb: pointer to the driver internal queue element for mailbox command. * * The registration vport identifier mailbox command is used to activate a * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the * N_Port_ID against the information in the selected virtual N_Port context * block and marks it active to allow normal processing of IOCB commands and * received unsolicited exchanges. * * This routine prepares the mailbox command for registering a virtual N_Port. **/ void lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_hba *phba = vport->phba; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); /* * Set the re-reg VPI bit for f/w to update the MAC address. */ if ((phba->sli_rev == LPFC_SLI_REV4) && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) mb->un.varRegVpi.upd = 1; mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi]; mb->un.varRegVpi.sid = vport->fc_myDID; if (phba->sli_rev == LPFC_SLI_REV4) mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi]; else mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname, sizeof(struct lpfc_name)); mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]); mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]); mb->mbxCommand = MBX_REG_VPI; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_unreg_vpi - Prepare a mailbox command for unregistering vport id * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @pmb: pointer to the driver internal queue element for mailbox command. * * The unregistration vport identifier mailbox command is used to inactivate * a virtual N_Port. The driver must have logged out and unregistered all * remote N_Ports to abort any activity on the virtual N_Port. The HBA will * unregisters any default RPIs associated with the specified vpi, aborting * any active exchanges. The HBA will post the mailbox response after making * the virtual N_Port inactive. * * This routine prepares the mailbox command for unregistering a virtual * N_Port. **/ void lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); if (phba->sli_rev == LPFC_SLI_REV3) mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi]; else if (phba->sli_rev >= LPFC_SLI_REV4) mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi]; mb->mbxCommand = MBX_UNREG_VPI; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_config_pcb_setup - Set up IOCB rings in the Port Control Block (PCB) * @phba: pointer to lpfc hba data structure. * * This routine sets up and initializes the IOCB rings in the Port Control * Block (PCB). **/ static void lpfc_config_pcb_setup(struct lpfc_hba * phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; PCB_t *pcbp = phba->pcb; dma_addr_t pdma_addr; uint32_t offset; uint32_t iocbCnt = 0; int i; pcbp->maxRing = (psli->num_rings - 1); for (i = 0; i < psli->num_rings; i++) { pring = &psli->sli3_ring[i]; pring->sli.sli3.sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE : SLI2_IOCB_CMD_SIZE; pring->sli.sli3.sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE : SLI2_IOCB_RSP_SIZE; /* A ring MUST have both cmd and rsp entries defined to be valid */ if ((pring->sli.sli3.numCiocb == 0) || (pring->sli.sli3.numRiocb == 0)) { pcbp->rdsc[i].cmdEntries = 0; pcbp->rdsc[i].rspEntries = 0; pcbp->rdsc[i].cmdAddrHigh = 0; pcbp->rdsc[i].rspAddrHigh = 0; pcbp->rdsc[i].cmdAddrLow = 0; pcbp->rdsc[i].rspAddrLow = 0; pring->sli.sli3.cmdringaddr = NULL; pring->sli.sli3.rspringaddr = NULL; continue; } /* Command ring setup for ring */ pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt]; pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb; offset = (uint8_t *) &phba->IOCBs[iocbCnt] - (uint8_t *) phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); iocbCnt += pring->sli.sli3.numCiocb; /* Response ring setup for ring */ pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt]; pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb; offset = (uint8_t *)&phba->IOCBs[iocbCnt] - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr); pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr); iocbCnt += pring->sli.sli3.numRiocb; } } /** * lpfc_read_rev - Prepare a mailbox command for reading HBA revision * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The read revision mailbox command is used to read the revision levels of * the HBA components. These components include hardware units, resident * firmware, and available firmware. HBAs that supports SLI-3 mode of * operation provide different response information depending on the version * requested by the driver. * * This routine prepares the mailbox command for reading HBA revision * information. **/ void lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRdRev.cv = 1; mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ mb->mbxCommand = MBX_READ_REV; mb->mbxOwner = OWN_HOST; return; } void lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_mqe *mqe; switch (mb->mbxCommand) { case MBX_READ_REV: mqe = &pmb->u.mqe; lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name, mqe->un.read_rev.fw_name, 16); lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name, mqe->un.read_rev.ulp_fw_name, 16); break; default: break; } return; } /** * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. * @hbq_desc: pointer to the HBQ selection profile descriptor. * * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs * the Sequence Length Test using the fields in the Selection Profile 2 * extension in words 20:31. **/ static void lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb, struct lpfc_hbq_init *hbq_desc) { hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt; hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen; hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff; } /** * lpfc_build_hbq_profile3 - Set up the HBQ Selection Profile 3 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. * @hbq_desc: pointer to the HBQ selection profile descriptor. * * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs * the Sequence Length Test and Byte Field Test using the fields in the * Selection Profile 3 extension in words 20:31. **/ static void lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb, struct lpfc_hbq_init *hbq_desc) { hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt; hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen; hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff; hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff; memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch, sizeof(hbqmb->profiles.profile3.cmdmatch)); } /** * lpfc_build_hbq_profile5 - Set up the HBQ Selection Profile 5 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. * @hbq_desc: pointer to the HBQ selection profile descriptor. * * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The * HBA tests the initial frame of an incoming sequence using the frame's * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test * and Byte Field Test using the fields in the Selection Profile 5 extension * words 20:31. **/ static void lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb, struct lpfc_hbq_init *hbq_desc) { hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt; hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen; hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff; hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff; memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch, sizeof(hbqmb->profiles.profile5.cmdmatch)); } /** * lpfc_config_hbq - Prepare a mailbox command for configuring an HBQ * @phba: pointer to lpfc hba data structure. * @id: HBQ identifier. * @hbq_desc: pointer to the HBA descriptor data structure. * @hbq_entry_index: index of the HBQ entry data structures. * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure HBQ (Host Buffer Queue) mailbox command is used to configure * an HBQ. The configuration binds events that require buffers to a particular * ring and HBQ based on a selection profile. * * This routine prepares the mailbox command for configuring an HBQ. **/ void lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id, struct lpfc_hbq_init *hbq_desc, uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) { int i; MAILBOX_t *mb = &pmb->u.mb; struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); hbqmb->hbqId = id; hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */ hbqmb->recvNotify = hbq_desc->rn; /* Receive * Notification */ hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks * # in words 0-19 */ hbqmb->profile = hbq_desc->profile; /* Selection profile: * 0 = all, * 7 = logentry */ hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring * e.g. Ring0=b0001, * ring2=b0100 */ hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4 * or 5 */ hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this * HBQ will be used * for LogEntry * buffers */ hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) + hbq_entry_index * sizeof(struct lpfc_hbq_entry); hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys); mb->mbxCommand = MBX_CONFIG_HBQ; mb->mbxOwner = OWN_HOST; /* Copy info for profiles 2,3,5. Other * profiles this area is reserved */ if (hbq_desc->profile == 2) lpfc_build_hbq_profile2(hbqmb, hbq_desc); else if (hbq_desc->profile == 3) lpfc_build_hbq_profile3(hbqmb, hbq_desc); else if (hbq_desc->profile == 5) lpfc_build_hbq_profile5(hbqmb, hbq_desc); /* Return if no rctl / type masks for this HBQ */ if (!hbq_desc->mask_count) return; /* Otherwise we setup specific rctl / type masks for this HBQ */ for (i = 0; i < hbq_desc->mask_count; i++) { hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch; hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask; hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch; hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask; } return; } /** * lpfc_config_ring - Prepare a mailbox command for configuring an IOCB ring * @phba: pointer to lpfc hba data structure. * @ring: ring number/index * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure ring mailbox command is used to configure an IOCB ring. This * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the * ring. This is used to map incoming sequences to a particular ring whose * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not * attempt to configure a ring whose number is greater than the number * specified in the Port Control Block (PCB). It is an error to issue the * configure ring command more than once with the same ring number. The HBA * returns an error if the driver attempts this. * * This routine prepares the mailbox command for configuring IOCB ring. **/ void lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) { int i; MAILBOX_t *mb = &pmb->u.mb; struct lpfc_sli *psli; struct lpfc_sli_ring *pring; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varCfgRing.ring = ring; mb->un.varCfgRing.maxOrigXchg = 0; mb->un.varCfgRing.maxRespXchg = 0; mb->un.varCfgRing.recvNotify = 1; psli = &phba->sli; pring = &psli->sli3_ring[ring]; mb->un.varCfgRing.numMask = pring->num_mask; mb->mbxCommand = MBX_CONFIG_RING; mb->mbxOwner = OWN_HOST; /* Is this ring configured for a specific profile */ if (pring->prt[0].profile) { mb->un.varCfgRing.profile = pring->prt[0].profile; return; } /* Otherwise we setup specific rctl / type masks for this ring */ for (i = 0; i < pring->num_mask; i++) { mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl; if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ) mb->un.varCfgRing.rrRegs[i].rmask = 0xff; else mb->un.varCfgRing.rrRegs[i].rmask = 0xfe; mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type; mb->un.varCfgRing.rrRegs[i].tmask = 0xff; } return; } /** * lpfc_config_port - Prepare a mailbox command for configuring port * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure port mailbox command is used to identify the Port Control * Block (PCB) in the driver memory. After this command is issued, the * driver must not access the mailbox in the HBA without first resetting * the HBA. The HBA may copy the PCB information to internal storage for * subsequent use; the driver can not change the PCB information unless it * resets the HBA. * * This routine prepares the mailbox command for configuring port. **/ void lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; MAILBOX_t *mb = &pmb->u.mb; dma_addr_t pdma_addr; uint32_t bar_low, bar_high; size_t offset; struct lpfc_hgp hgp; int i; uint32_t pgp_offset; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_CONFIG_PORT; mb->mbxOwner = OWN_HOST; mb->un.varCfgPort.pcbLen = sizeof(PCB_t); offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); /* Always Host Group Pointer is in SLIM */ mb->un.varCfgPort.hps = 1; /* If HBA supports SLI=3 ask for it */ if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { if (phba->cfg_enable_bg) mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); if (phba->max_vpi && phba->cfg_enable_npiv && phba->vpd.sli3Feat.cmv) { mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI; mb->un.varCfgPort.cmv = 1; } else mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; } else phba->sli_rev = LPFC_SLI_REV2; mb->un.varCfgPort.sli_mode = phba->sli_rev; /* If this is an SLI3 port, configure async status notification. */ if (phba->sli_rev == LPFC_SLI_REV3) mb->un.varCfgPort.casabt = 1; /* Now setup pcb */ phba->pcb->type = TYPE_NATIVE_SLI2; phba->pcb->feature = FEATURE_INITIAL_SLI2; /* Setup Mailbox pointers */ phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE; offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr); phba->pcb->mbAddrLow = putPaddrLow(pdma_addr); /* * Setup Host Group ring pointer. * * For efficiency reasons, the ring get/put pointers can be * placed in adapter memory (SLIM) rather than in host memory. * This allows firmware to avoid PCI reads/writes when updating * and checking pointers. * * The firmware recognizes the use of SLIM memory by comparing * the address of the get/put pointers structure with that of * the SLIM BAR (BAR0). * * Caution: be sure to use the PCI config space value of BAR0/BAR1 * (the hardware's view of the base address), not the OS's * value of pci_resource_start() as the OS value may be a cookie * for ioremap/iomap. */ pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low); pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high); /* * Set up HGP - Port Memory * * The port expects the host get/put pointers to reside in memory * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes) * area of SLIM. In SLI-2 mode, there's an additional 16 reserved * words (0x40 bytes). This area is not reserved if HBQs are * configured in SLI-3. * * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80 * RR0Get 0xc4 0x84 * CR1Put 0xc8 0x88 * RR1Get 0xcc 0x8c * CR2Put 0xd0 0x90 * RR2Get 0xd4 0x94 * CR3Put 0xd8 0x98 * RR3Get 0xdc 0x9c * * Reserved 0xa0-0xbf * If HBQs configured: * HBQ 0 Put ptr 0xc0 * HBQ 1 Put ptr 0xc4 * HBQ 2 Put ptr 0xc8 * ...... * HBQ(M-1)Put Pointer 0xc0+(M-1)*4 * */ if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) { phba->host_gp = (struct lpfc_hgp __iomem *) &phba->mbox->us.s2.host[0]; phba->hbq_put = NULL; offset = (uint8_t *)&phba->mbox->us.s2.host - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr); phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr); } else { /* Always Host Group Pointer is in SLIM */ mb->un.varCfgPort.hps = 1; if (phba->sli_rev == 3) { phba->host_gp = &mb_slim->us.s3.host[0]; phba->hbq_put = &mb_slim->us.s3.hbq_put[0]; } else { phba->host_gp = &mb_slim->us.s2.host[0]; phba->hbq_put = NULL; } /* mask off BAR0's flag bits 0 - 3 */ phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + (void __iomem *)phba->host_gp - (void __iomem *)phba->MBslimaddr; if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) phba->pcb->hgpAddrHigh = bar_high; else phba->pcb->hgpAddrHigh = 0; /* write HGP data to SLIM at the required longword offset */ memset(&hgp, 0, sizeof(struct lpfc_hgp)); for (i = 0; i < phba->sli.num_rings; i++) { lpfc_memcpy_to_slim(phba->host_gp + i, &hgp, sizeof(*phba->host_gp)); } } /* Setup Port Group offset */ if (phba->sli_rev == 3) pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s3_pgp.port); else pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port); pdma_addr = phba->slim2p.phys + pgp_offset; phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr); phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr); /* Use callback routine to setp rings in the pcb */ lpfc_config_pcb_setup(phba); /* special handling for LC HBAs */ if (lpfc_is_LC_HBA(phba->pcidev->device)) { uint32_t hbainit[5]; lpfc_hba_init(phba, hbainit); memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20); } /* Swap PCB if needed */ lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t)); } /** * lpfc_kill_board - Prepare a mailbox command for killing board * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The kill board mailbox command is used to tell firmware to perform a * graceful shutdown of a channel on a specified board to prepare for reset. * When the kill board mailbox command is received, the ER3 bit is set to 1 * in the Host Status register and the ER Attention bit is set to 1 in the * Host Attention register of the HBA function that received the kill board * command. * * This routine prepares the mailbox command for killing the board in * preparation for a graceful shutdown. **/ void lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_KILL_BOARD; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_mbox_put - Put a mailbox cmd into the tail of driver's mailbox queue * @phba: pointer to lpfc hba data structure. * @mbq: pointer to the driver internal queue element for mailbox command. * * Driver maintains a internal mailbox command queue implemented as a linked * list. When a mailbox command is issued, it shall be put into the mailbox * command queue such that they shall be processed orderly as HBA can process * one mailbox command at a time. **/ void lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) { struct lpfc_sli *psli; psli = &phba->sli; list_add_tail(&mbq->list, &psli->mboxq); psli->mboxq_cnt++; return; } /** * lpfc_mbox_get - Remove a mailbox cmd from the head of driver's mailbox queue * @phba: pointer to lpfc hba data structure. * * Driver maintains a internal mailbox command queue implemented as a linked * list. When a mailbox command is issued, it shall be put into the mailbox * command queue such that they shall be processed orderly as HBA can process * one mailbox command at a time. After HBA finished processing a mailbox * command, the driver will remove a pending mailbox command from the head of * the mailbox command queue and send to the HBA for processing. * * Return codes * pointer to the driver internal queue element for mailbox command. **/ LPFC_MBOXQ_t * lpfc_mbox_get(struct lpfc_hba * phba) { LPFC_MBOXQ_t *mbq = NULL; struct lpfc_sli *psli = &phba->sli; list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list); if (mbq) psli->mboxq_cnt--; return mbq; } /** * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list * @phba: pointer to lpfc hba data structure. * @mbq: pointer to the driver internal queue element for mailbox command. * * This routine put the completed mailbox command into the mailbox command * complete list. This is the unlocked version of the routine. The mailbox * complete list is used by the driver worker thread to process mailbox * complete callback functions outside the driver interrupt handler. **/ void __lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) { list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); } /** * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list * @phba: pointer to lpfc hba data structure. * @mbq: pointer to the driver internal queue element for mailbox command. * * This routine put the completed mailbox command into the mailbox command * complete list. This is the locked version of the routine. The mailbox * complete list is used by the driver worker thread to process mailbox * complete callback functions outside the driver interrupt handler. **/ void lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) { unsigned long iflag; /* This function expects to be called from interrupt context */ spin_lock_irqsave(&phba->hbalock, iflag); __lpfc_mbox_cmpl_put(phba, mbq); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } /** * lpfc_mbox_cmd_check - Check the validality of a mailbox command * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to the driver internal queue element for mailbox command. * * This routine is to check whether a mailbox command is valid to be issued. * This check will be performed by both the mailbox issue API when a client * is to issue a mailbox command to the mailbox transport. * * Return 0 - pass the check, -ENODEV - fail the check **/ int lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { /* Mailbox command that have a completion handler must also have a * vport specified. */ if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) { if (!mboxq->vport) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, "1814 Mbox x%x failed, no vport\n", mboxq->u.mb.mbxCommand); dump_stack(); return -ENODEV; } } return 0; } /** * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command * @phba: pointer to lpfc hba data structure. * * This routine is to check whether the HBA device is ready for posting a * mailbox command. It is used by the mailbox transport API at the time the * to post a mailbox command to the device. * * Return 0 - pass the check, -ENODEV - fail the check **/ int lpfc_mbox_dev_check(struct lpfc_hba *phba) { /* If the PCI channel is in offline state, do not issue mbox */ if (unlikely(pci_channel_offline(phba->pcidev))) return -ENODEV; /* If the HBA is in error state, do not issue mbox */ if (phba->link_state == LPFC_HBA_ERROR) return -ENODEV; return 0; } /** * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to the driver internal queue element for mailbox command. * * This routine retrieves the proper timeout value according to the mailbox * command code. * * Return codes * Timeout value to be used for the given mailbox command **/ int lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { MAILBOX_t *mbox = &mboxq->u.mb; uint8_t subsys, opcode; switch (mbox->mbxCommand) { case MBX_WRITE_NV: /* 0x03 */ case MBX_DUMP_MEMORY: /* 0x17 */ case MBX_UPDATE_CFG: /* 0x1B */ case MBX_DOWN_LOAD: /* 0x1C */ case MBX_DEL_LD_ENTRY: /* 0x1D */ case MBX_WRITE_VPARMS: /* 0x32 */ case MBX_LOAD_AREA: /* 0x81 */ case MBX_WRITE_WWN: /* 0x98 */ case MBX_LOAD_EXP_ROM: /* 0x9C */ case MBX_ACCESS_VDATA: /* 0xA5 */ return LPFC_MBOX_TMO_FLASH_CMD; case MBX_SLI4_CONFIG: /* 0x9b */ subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq); opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq); if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) { switch (opcode) { case LPFC_MBOX_OPCODE_READ_OBJECT: case LPFC_MBOX_OPCODE_WRITE_OBJECT: case LPFC_MBOX_OPCODE_READ_OBJECT_LIST: case LPFC_MBOX_OPCODE_DELETE_OBJECT: case LPFC_MBOX_OPCODE_GET_PROFILE_LIST: case LPFC_MBOX_OPCODE_SET_ACT_PROFILE: case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG: case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG: case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG: case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES: case LPFC_MBOX_OPCODE_SEND_ACTIVATION: case LPFC_MBOX_OPCODE_RESET_LICENSES: case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG: case LPFC_MBOX_OPCODE_GET_VPD_DATA: case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG: return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; } } if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) { switch (opcode) { case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS: return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; } } return LPFC_MBOX_SLI4_CONFIG_TMO; } return LPFC_MBOX_TMO; } /** * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command * @mbox: pointer to lpfc mbox command. * @sgentry: sge entry index. * @phyaddr: physical address for the sge * @length: Length of the sge. * * This routine sets up an entry in the non-embedded mailbox command at the sge * index location. **/ void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry, dma_addr_t phyaddr, uint32_t length) { struct lpfc_mbx_nembed_cmd *nembed_sge; nembed_sge = (struct lpfc_mbx_nembed_cmd *) &mbox->u.mqe.un.nembed_cmd; nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr); nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr); nembed_sge->sge[sgentry].length = length; } /** * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command * @mbox: pointer to lpfc mbox command. * @sgentry: sge entry index. * @sge: pointer to lpfc mailbox sge to load into. * * This routine gets an entry from the non-embedded mailbox command at the sge * index location. **/ void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry, struct lpfc_mbx_sge *sge) { struct lpfc_mbx_nembed_cmd *nembed_sge; nembed_sge = (struct lpfc_mbx_nembed_cmd *) &mbox->u.mqe.un.nembed_cmd; sge->pa_lo = nembed_sge->sge[sgentry].pa_lo; sge->pa_hi = nembed_sge->sge[sgentry].pa_hi; sge->length = nembed_sge->sge[sgentry].length; } /** * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command * @phba: pointer to lpfc hba data structure. * @mbox: pointer to lpfc mbox command. * * This routine cleans up and releases an SLI4 mailbox command that was * configured using lpfc_sli4_config. It accounts for the embedded and * non-embedded config types. **/ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox) { struct lpfc_mbx_sli4_config *sli4_cfg; struct lpfc_mbx_sge sge; dma_addr_t phyaddr; uint32_t sgecount, sgentry; sli4_cfg = &mbox->u.mqe.un.sli4_config; /* For embedded mbox command, just free the mbox command */ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { mempool_free(mbox, phba->mbox_mem_pool); return; } /* For non-embedded mbox command, we need to free the pages first */ sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr); /* There is nothing we can do if there is no sge address array */ if (unlikely(!mbox->sge_array)) { mempool_free(mbox, phba->mbox_mem_pool); return; } /* Each non-embedded DMA memory was allocated in the length of a page */ for (sgentry = 0; sgentry < sgecount; sgentry++) { lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge); phyaddr = getPaddr(sge.pa_hi, sge.pa_lo); dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, mbox->sge_array->addr[sgentry], phyaddr); } /* Free the sge address array memory */ kfree(mbox->sge_array); /* Finally, free the mailbox command itself */ mempool_free(mbox, phba->mbox_mem_pool); } /** * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command * @phba: pointer to lpfc hba data structure. * @mbox: pointer to lpfc mbox command. * @subsystem: The sli4 config sub mailbox subsystem. * @opcode: The sli4 config sub mailbox command opcode. * @length: Length of the sli4 config mailbox command (including sub-header). * @emb: True if embedded mbox command should be setup. * * This routine sets up the header fields of SLI4 specific mailbox command * for sending IOCTL command. * * Return: the actual length of the mbox command allocated (mostly useful * for none embedded mailbox command). **/ int lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb) { struct lpfc_mbx_sli4_config *sli4_config; union lpfc_sli4_cfg_shdr *cfg_shdr = NULL; uint32_t alloc_len; uint32_t resid_len; uint32_t pagen, pcount; void *viraddr; dma_addr_t phyaddr; /* Set up SLI4 mailbox command header fields */ memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG); /* Set up SLI4 ioctl command header fields */ sli4_config = &mbox->u.mqe.un.sli4_config; /* Setup for the embedded mbox command */ if (emb) { /* Set up main header fields */ bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1); sli4_config->header.cfg_mhdr.payload_length = length; /* Set up sub-header fields following main header */ bf_set(lpfc_mbox_hdr_opcode, &sli4_config->header.cfg_shdr.request, opcode); bf_set(lpfc_mbox_hdr_subsystem, &sli4_config->header.cfg_shdr.request, subsystem); sli4_config->header.cfg_shdr.request.request_length = length - LPFC_MBX_CMD_HDR_LENGTH; return length; } /* Setup for the non-embedded mbox command */ pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE; pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; /* Allocate record for keeping SGE virtual addresses */ mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), GFP_KERNEL); if (!mbox->sge_array) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2527 Failed to allocate non-embedded SGE " "array.\n"); return 0; } for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { /* The DMA memory is always allocated in the length of a * page even though the last SGE might not fill up to a * page, this is used as a priori size of SLI4_PAGE_SIZE for * the later DMA memory free. */ viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, &phyaddr, GFP_KERNEL); /* In case of malloc fails, proceed with whatever we have */ if (!viraddr) break; mbox->sge_array->addr[pagen] = viraddr; /* Keep the first page for later sub-header construction */ if (pagen == 0) cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr; resid_len = length - alloc_len; if (resid_len > SLI4_PAGE_SIZE) { lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, SLI4_PAGE_SIZE); alloc_len += SLI4_PAGE_SIZE; } else { lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, resid_len); alloc_len = length; } } /* Set up main header fields in mailbox command */ sli4_config->header.cfg_mhdr.payload_length = alloc_len; bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen); /* Set up sub-header fields into the first page */ if (pagen > 0) { bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode); bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem); cfg_shdr->request.request_length = alloc_len - sizeof(union lpfc_sli4_cfg_shdr); } /* The sub-header is in DMA memory, which needs endian converstion */ if (cfg_shdr) lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, sizeof(union lpfc_sli4_cfg_shdr)); return alloc_len; } /** * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent. * @phba: pointer to lpfc hba data structure. * @mbox: pointer to an allocated lpfc mbox resource. * @exts_count: the number of extents, if required, to allocate. * @rsrc_type: the resource extent type. * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED. * * This routine completes the subcommand header for SLI4 resource extent * mailbox commands. It is called after lpfc_sli4_config. The caller must * pass an allocated mailbox and the attributes required to initialize the * mailbox correctly. * * Return: the actual length of the mbox command allocated. **/ int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t exts_count, uint16_t rsrc_type, bool emb) { uint8_t opcode = 0; struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL; void *virtaddr = NULL; /* Set up SLI4 ioctl command header fields */ if (emb == LPFC_SLI4_MBX_NEMBED) { /* Get the first SGE entry from the non-embedded DMA memory */ virtaddr = mbox->sge_array->addr[0]; if (virtaddr == NULL) return 1; n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; } /* * The resource type is common to all extent Opcodes and resides in the * same position. */ if (emb == LPFC_SLI4_MBX_EMBED) bf_set(lpfc_mbx_alloc_rsrc_extents_type, &mbox->u.mqe.un.alloc_rsrc_extents.u.req, rsrc_type); else { /* This is DMA data. Byteswap is required. */ bf_set(lpfc_mbx_alloc_rsrc_extents_type, n_rsrc_extnt, rsrc_type); lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4, &n_rsrc_extnt->word4, sizeof(uint32_t)); } /* Complete the initialization for the particular Opcode. */ opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox); switch (opcode) { case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT: if (emb == LPFC_SLI4_MBX_EMBED) bf_set(lpfc_mbx_alloc_rsrc_extents_cnt, &mbox->u.mqe.un.alloc_rsrc_extents.u.req, exts_count); else bf_set(lpfc_mbx_alloc_rsrc_extents_cnt, n_rsrc_extnt, exts_count); break; case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT: case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO: case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT: /* Initialization is complete.*/ break; default: lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2929 Resource Extent Opcode x%x is " "unsupported\n", opcode); return 1; } return 0; } /** * lpfc_sli_config_mbox_subsys_get - Get subsystem from a sli_config mbox cmd * @phba: pointer to lpfc hba data structure. * @mbox: pointer to lpfc mbox command queue entry. * * This routine gets the subsystem from a SLI4 specific SLI_CONFIG mailbox * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if the * sub-header is not present, subsystem LPFC_MBOX_SUBSYSTEM_NA (0x0) shall * be returned. **/ uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { struct lpfc_mbx_sli4_config *sli4_cfg; union lpfc_sli4_cfg_shdr *cfg_shdr; if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) return LPFC_MBOX_SUBSYSTEM_NA; sli4_cfg = &mbox->u.mqe.un.sli4_config; /* For embedded mbox command, get opcode from embedded sub-header*/ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request); } /* For non-embedded mbox command, get opcode from first dma page */ if (unlikely(!mbox->sge_array)) return LPFC_MBOX_SUBSYSTEM_NA; cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request); } /** * lpfc_sli_config_mbox_opcode_get - Get opcode from a sli_config mbox cmd * @phba: pointer to lpfc hba data structure. * @mbox: pointer to lpfc mbox command queue entry. * * This routine gets the opcode from a SLI4 specific SLI_CONFIG mailbox * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if * the sub-header is not present, opcode LPFC_MBOX_OPCODE_NA (0x0) be * returned. **/ uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { struct lpfc_mbx_sli4_config *sli4_cfg; union lpfc_sli4_cfg_shdr *cfg_shdr; if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) return LPFC_MBOX_OPCODE_NA; sli4_cfg = &mbox->u.mqe.un.sli4_config; /* For embedded mbox command, get opcode from embedded sub-header*/ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); } /* For non-embedded mbox command, get opcode from first dma page */ if (unlikely(!mbox->sge_array)) return LPFC_MBOX_OPCODE_NA; cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); } /** * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to lpfc mbox command. * @fcf_index: index to fcf table. * * This routine routine allocates and constructs non-embedded mailbox command * for reading a FCF table entry referred by @fcf_index. * * Return: pointer to the mailbox command constructed if successful, otherwise * NULL. **/ int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba, struct lpfcMboxq *mboxq, uint16_t fcf_index) { void *virt_addr; uint8_t *bytep; struct lpfc_mbx_sge sge; uint32_t alloc_len, req_len; struct lpfc_mbx_read_fcf_tbl *read_fcf; if (!mboxq) return -ENOMEM; req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, LPFC_SLI4_MBX_NEMBED); if (alloc_len < req_len) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0291 Allocated DMA memory size (x%x) is " "less than the requested DMA memory " "size (x%x)\n", alloc_len, req_len); return -ENOMEM; } /* Get the first SGE entry from the non-embedded DMA memory. This * routine only uses a single SGE. */ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); virt_addr = mboxq->sge_array->addr[0]; read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; /* Set up command fields */ bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); /* Perform necessary endian conversion */ bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); return 0; } /** * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to lpfc mbox command. * * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES * mailbox command. **/ void lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) { /* Set up SLI4 mailbox command header fields */ memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS); /* Set up host requested features. */ bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1); /* Enable DIF (block guard) only if configured to do so. */ if (phba->cfg_enable_bg) bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); /* Enable NPIV only if configured to do so. */ if (phba->max_vpi && phba->cfg_enable_npiv) bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1); if (phba->nvmet_support) { bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1); /* iaab/iaar NOT set for now */ bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0); bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0); } /* Enable Application Services Header for appheader VMID */ if (phba->cfg_vmid_app_header) { bf_set(lpfc_mbx_rq_ftr_rq_ashdr, &mboxq->u.mqe.un.req_ftrs, 1); bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 1); } return; } /** * lpfc_init_vfi - Initialize the INIT_VFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @vport: Vport associated with the VF. * * This routine initializes @mbox to all zeros and then fills in the mailbox * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI * in the context of an FCF. The driver issues this command to setup a VFI * before issuing a FLOGI to login to the VSAN. The driver should also issue a * REG_VFI after a successful VSAN login. **/ void lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) { struct lpfc_mbx_init_vfi *init_vfi; memset(mbox, 0, sizeof(*mbox)); mbox->vport = vport; init_vfi = &mbox->u.mqe.un.init_vfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI); bf_set(lpfc_init_vfi_vr, init_vfi, 1); bf_set(lpfc_init_vfi_vt, init_vfi, 1); bf_set(lpfc_init_vfi_vp, init_vfi, 1); bf_set(lpfc_init_vfi_vfi, init_vfi, vport->phba->sli4_hba.vfi_ids[vport->vfi]); bf_set(lpfc_init_vfi_vpi, init_vfi, vport->phba->vpi_ids[vport->vpi]); bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); } /** * lpfc_reg_vfi - Initialize the REG_VFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @vport: vport associated with the VF. * @phys: BDE DMA bus address used to send the service parameters to the HBA. * * This routine initializes @mbox to all zeros and then fills in the mailbox * fields from @vport, and uses @buf as a DMAable buffer to send the vport's * fc service parameters to the HBA for this VFI. REG_VFI configures virtual * fabrics identified by VFI in the context of an FCF. **/ void lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) { struct lpfc_mbx_reg_vfi *reg_vfi; struct lpfc_hba *phba = vport->phba; uint8_t bbscn_fabric = 0, bbscn_max = 0, bbscn_def = 0; memset(mbox, 0, sizeof(*mbox)); reg_vfi = &mbox->u.mqe.un.reg_vfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); bf_set(lpfc_reg_vfi_vfi, reg_vfi, phba->sli4_hba.vfi_ids[vport->vfi]); bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi); bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]); memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); reg_vfi->e_d_tov = phba->fc_edtov; reg_vfi->r_a_tov = phba->fc_ratov; if (phys) { reg_vfi->bde.addrHigh = putPaddrHigh(phys); reg_vfi->bde.addrLow = putPaddrLow(phys); reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; } bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); /* Only FC supports upd bit */ if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) && (vport->fc_flag & FC_VFI_REGISTERED) && (!phba->fc_topology_changed)) bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 0); bf_set(lpfc_reg_vfi_bbscn, reg_vfi, 0); bbscn_fabric = (phba->fc_fabparam.cmn.bbRcvSizeMsb >> 4) & 0xF; if (phba->bbcredit_support && phba->cfg_enable_bbcr && bbscn_fabric != 0) { bbscn_max = bf_get(lpfc_bbscn_max, &phba->sli4_hba.bbscn_params); if (bbscn_fabric <= bbscn_max) { bbscn_def = bf_get(lpfc_bbscn_def, &phba->sli4_hba.bbscn_params); if (bbscn_fabric > bbscn_def) bf_set(lpfc_reg_vfi_bbscn, reg_vfi, bbscn_fabric); else bf_set(lpfc_reg_vfi_bbscn, reg_vfi, bbscn_def); bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 1); } } lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, "3134 Register VFI, mydid:x%x, fcfi:%d, " " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x" " port_state:x%x topology chg:%d bbscn_fabric :%d\n", vport->fc_myDID, phba->fcf.fcfi, phba->sli4_hba.vfi_ids[vport->vfi], phba->vpi_ids[vport->vpi], reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag, vport->port_state, phba->fc_topology_changed, bbscn_fabric); } /** * lpfc_init_vpi - Initialize the INIT_VPI mailbox command * @phba: pointer to the hba structure to init the VPI for. * @mbox: pointer to lpfc mbox command to initialize. * @vpi: VPI to be initialized. * * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the * command to activate a virtual N_Port. The HBA assigns a MAC address to use * with the virtual N Port. The SLI Host issues this command before issuing a * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a * successful virtual NPort login. **/ void lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi) { memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, phba->vpi_ids[vpi]); bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi, phba->sli4_hba.vfi_ids[phba->pport->vfi]); } /** * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @vport: vport associated with the VF. * * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric * (logical NPort) into the inactive state. The SLI Host must have logged out * and unregistered all remote N_Ports to abort any activity on the virtual * fabric. The SLI Port posts the mailbox response after marking the virtual * fabric inactive. **/ void lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) { memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vport->phba->sli4_hba.vfi_ids[vport->vfi]); } /** * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23 * @phba: pointer to the hba structure containing. * @mbox: pointer to lpfc mbox command to initialize. * * This function create a SLI4 dump mailbox command to dump configure * region 23. **/ int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox) { struct lpfc_dmabuf *mp = NULL; MAILBOX_t *mb; int rc; memset(mbox, 0, sizeof(*mbox)); mb = &mbox->u.mb; rc = lpfc_mbox_rsrc_prep(phba, mbox); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "2569 %s: memory allocation failed\n", __func__); return 1; } mb->mbxCommand = MBX_DUMP_MEMORY; mb->un.varDmp.type = DMP_NV_PARAMS; mb->un.varDmp.region_id = DMP_REGION_23; mb->un.varDmp.sli4_length = DMP_RGN23_SIZE; mp = mbox->ctx_buf; mb->un.varWords[3] = putPaddrLow(mp->phys); mb->un.varWords[4] = putPaddrHigh(mp->phys); return 0; } static void lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { MAILBOX_t *mb; int rc = FAILURE; struct lpfc_rdp_context *rdp_context = (struct lpfc_rdp_context *)(mboxq->ctx_ndlp); mb = &mboxq->u.mb; if (mb->mbxStatus) goto mbx_failed; memcpy(&rdp_context->link_stat, &mb->un.varRdLnk, sizeof(READ_LNK_VAR)); rc = SUCCESS; mbx_failed: lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); rdp_context->cmpl(phba, rdp_context, rc); } static void lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)mbox->ctx_buf; struct lpfc_rdp_context *rdp_context = (struct lpfc_rdp_context *)(mbox->ctx_ndlp); if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) goto error_mbox_free; lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, DMP_SFF_PAGE_A2_SIZE); lpfc_read_lnk_stat(phba, mbox); mbox->vport = rdp_context->ndlp->vport; /* Save the dma buffer for cleanup in the final completion. */ mbox->ctx_buf = mp; mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat; mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED) goto error_mbox_free; return; error_mbox_free: lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); rdp_context->cmpl(phba, rdp_context, FAILURE); } void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { int rc; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); struct lpfc_rdp_context *rdp_context = (struct lpfc_rdp_context *)(mbox->ctx_ndlp); if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) goto error; lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, DMP_SFF_PAGE_A0_SIZE); memset(mbox, 0, sizeof(*mbox)); memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); INIT_LIST_HEAD(&mp->list); /* save address for completion */ mbox->ctx_buf = mp; mbox->vport = rdp_context->ndlp->vport; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); bf_set(lpfc_mbx_memory_dump_type3_type, &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); bf_set(lpfc_mbx_memory_dump_type3_link, &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); bf_set(lpfc_mbx_memory_dump_type3_page_no, &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); bf_set(lpfc_mbx_memory_dump_type3_length, &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2; mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) goto error; return; error: lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); rdp_context->cmpl(phba, rdp_context, FAILURE); } /* * lpfc_sli4_dump_page_a0 - Dump sli4 read SFP Diagnostic. * @phba: pointer to the hba structure containing. * @mbox: pointer to lpfc mbox command to initialize. * * This function create a SLI4 dump mailbox command to dump configure * type 3 page 0xA0. */ int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox) { int rc; struct lpfc_dmabuf *mp = NULL; memset(mbox, 0, sizeof(*mbox)); rc = lpfc_mbox_rsrc_prep(phba, mbox); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "3569 dump type 3 page 0xA0 allocation failed\n"); return 1; } bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); bf_set(lpfc_mbx_memory_dump_type3_type, &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); bf_set(lpfc_mbx_memory_dump_type3_link, &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); bf_set(lpfc_mbx_memory_dump_type3_page_no, &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0); bf_set(lpfc_mbx_memory_dump_type3_length, &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); mp = mbox->ctx_buf; mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); return 0; } /** * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command * @phba: pointer to the hba structure containing the FCF index and RQ ID. * @mbox: pointer to lpfc mbox command to initialize. * * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The * SLI Host uses the command to activate an FCF after it has acquired FCF * information via a READ_FCF mailbox command. This mailbox command also is used * to indicate where received unsolicited frames from this FCF will be sent. By * default this routine will set up the FCF to forward all unsolicited frames * to the RQ ID passed in the @phba. This can be overridden by the caller for * more complicated setups. **/ void lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) { struct lpfc_mbx_reg_fcfi *reg_fcfi; memset(mbox, 0, sizeof(*mbox)); reg_fcfi = &mbox->u.mqe.un.reg_fcfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI); if (phba->nvmet_support == 0) { bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id); /* Match everything - rq_id0 */ bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, 0); bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0); bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, 0); bf_set(lpfc_reg_fcfi_rctl_mask0, reg_fcfi, 0); bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); /* addr mode is bit wise inverted value of fcf addr_mode */ bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3); } else { /* This is ONLY for NVMET MRQ == 1 */ if (phba->cfg_nvmet_mrq != 1) return; bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id); /* Match type FCP - rq_id0 */ bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, FC_TYPE_FCP); bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0xff); bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, FC_RCTL_DD_UNSOL_CMD); bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id); /* Match everything else - rq_id1 */ bf_set(lpfc_reg_fcfi_type_match1, reg_fcfi, 0); bf_set(lpfc_reg_fcfi_type_mask1, reg_fcfi, 0); bf_set(lpfc_reg_fcfi_rctl_match1, reg_fcfi, 0); bf_set(lpfc_reg_fcfi_rctl_mask1, reg_fcfi, 0); } bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.current_rec.fcf_indx); if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) { bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.current_rec.vlan_id); } } /** * lpfc_reg_fcfi_mrq - Initialize the REG_FCFI_MRQ mailbox command * @phba: pointer to the hba structure containing the FCF index and RQ ID. * @mbox: pointer to lpfc mbox command to initialize. * @mode: 0 to register FCFI, 1 to register MRQs * * The REG_FCFI_MRQ mailbox command supports Fibre Channel Forwarders (FCFs). * The SLI Host uses the command to activate an FCF after it has acquired FCF * information via a READ_FCF mailbox command. This mailbox command also is used * to indicate where received unsolicited frames from this FCF will be sent. By * default this routine will set up the FCF to forward all unsolicited frames * to the RQ ID passed in the @phba. This can be overridden by the caller for * more complicated setups. **/ void lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode) { struct lpfc_mbx_reg_fcfi_mrq *reg_fcfi; /* This is ONLY for MRQ */ if (phba->cfg_nvmet_mrq <= 1) return; memset(mbox, 0, sizeof(*mbox)); reg_fcfi = &mbox->u.mqe.un.reg_fcfi_mrq; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI_MRQ); if (mode == 0) { bf_set(lpfc_reg_fcfi_mrq_info_index, reg_fcfi, phba->fcf.current_rec.fcf_indx); if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) { bf_set(lpfc_reg_fcfi_mrq_vv, reg_fcfi, 1); bf_set(lpfc_reg_fcfi_mrq_vlan_tag, reg_fcfi, phba->fcf.current_rec.vlan_id); } return; } bf_set(lpfc_reg_fcfi_mrq_rq_id0, reg_fcfi, phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id); /* Match NVME frames of type FCP (protocol NVME) - rq_id0 */ bf_set(lpfc_reg_fcfi_mrq_type_match0, reg_fcfi, FC_TYPE_FCP); bf_set(lpfc_reg_fcfi_mrq_type_mask0, reg_fcfi, 0xff); bf_set(lpfc_reg_fcfi_mrq_rctl_match0, reg_fcfi, FC_RCTL_DD_UNSOL_CMD); bf_set(lpfc_reg_fcfi_mrq_rctl_mask0, reg_fcfi, 0xff); bf_set(lpfc_reg_fcfi_mrq_ptc0, reg_fcfi, 1); bf_set(lpfc_reg_fcfi_mrq_pt0, reg_fcfi, 1); bf_set(lpfc_reg_fcfi_mrq_policy, reg_fcfi, 3); /* NVME connection id */ bf_set(lpfc_reg_fcfi_mrq_mode, reg_fcfi, 1); bf_set(lpfc_reg_fcfi_mrq_filter, reg_fcfi, 1); /* rq_id0 */ bf_set(lpfc_reg_fcfi_mrq_npairs, reg_fcfi, phba->cfg_nvmet_mrq); bf_set(lpfc_reg_fcfi_mrq_rq_id1, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id); /* Match everything - rq_id1 */ bf_set(lpfc_reg_fcfi_mrq_type_match1, reg_fcfi, 0); bf_set(lpfc_reg_fcfi_mrq_type_mask1, reg_fcfi, 0); bf_set(lpfc_reg_fcfi_mrq_rctl_match1, reg_fcfi, 0); bf_set(lpfc_reg_fcfi_mrq_rctl_mask1, reg_fcfi, 0); bf_set(lpfc_reg_fcfi_mrq_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_mrq_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); } /** * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @fcfi: FCFI to be unregistered. * * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). * The SLI Host uses the command to inactivate an FCFI. **/ void lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi) { memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI); bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi); } /** * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @ndlp: The nodelist structure that describes the RPI to resume. * * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a * link event. **/ void lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = ndlp->phba; struct lpfc_mbx_resume_rpi *resume_rpi; memset(mbox, 0, sizeof(*mbox)); resume_rpi = &mbox->u.mqe.un.resume_rpi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); bf_set(lpfc_resume_rpi_index, resume_rpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); resume_rpi->event_tag = ndlp->phba->fc_eventTag; }
linux-master
drivers/scsi/lpfc/lpfc_mbox.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/export.h> #include <linux/delay.h> #include <asm/unaligned.h> #include <linux/t10-pi.h> #include <linux/crc-t10dif.h> #include <linux/blk-cgroup.h> #include <net/checksum.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include "lpfc_version.h" #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc.h" #include "lpfc_scsi.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #define LPFC_RESET_WAIT 2 #define LPFC_ABORT_WAIT 2 static char *dif_op_str[] = { "PROT_NORMAL", "PROT_READ_INSERT", "PROT_WRITE_STRIP", "PROT_READ_STRIP", "PROT_WRITE_INSERT", "PROT_READ_PASS", "PROT_WRITE_PASS", }; struct scsi_dif_tuple { __be16 guard_tag; /* Checksum */ __be16 app_tag; /* Opaque storage */ __be32 ref_tag; /* Target LBA or indirect LBA */ }; static struct lpfc_rport_data * lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; if (vport->phba->cfg_fof) return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; else return (struct lpfc_rport_data *)sdev->hostdata; } static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb); static void lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); static int lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); /** * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. * @phba: Pointer to HBA object. * @lpfc_cmd: lpfc scsi command object pointer. * * This function is called from the lpfc_prep_task_mgmt_cmd function to * set the last bit in the response sge entry. **/ static void lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; if (sgl) { sgl += 1; sgl->word2 = le32_to_cpu(sgl->word2); bf_set(lpfc_sli4_sge_last, sgl, 1); sgl->word2 = cpu_to_le32(sgl->word2); } } /** * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread * @phba: The Hba for which this call is being executed. * * This routine is called when there is resource error in driver or firmware. * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine * posts at most 1 event each second. This routine wakes up worker thread of * @phba to process WORKER_RAM_DOWN_EVENT event. * * This routine should be called with no lock held. **/ void lpfc_rampdown_queue_depth(struct lpfc_hba *phba) { unsigned long flags; uint32_t evt_posted; unsigned long expires; spin_lock_irqsave(&phba->hbalock, flags); atomic_inc(&phba->num_rsrc_err); phba->last_rsrc_error_time = jiffies; expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; if (time_after(expires, jiffies)) { spin_unlock_irqrestore(&phba->hbalock, flags); return; } phba->last_ramp_down_time = jiffies; spin_unlock_irqrestore(&phba->hbalock, flags); spin_lock_irqsave(&phba->pport->work_port_lock, flags); evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; if (!evt_posted) phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); if (!evt_posted) lpfc_worker_wake_up(phba); return; } /** * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler * @phba: The Hba for which this call is being executed. * * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker * thread.This routine reduces queue depth for all scsi device on each vport * associated with @phba. **/ void lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct Scsi_Host *shost; struct scsi_device *sdev; unsigned long new_queue_depth; unsigned long num_rsrc_err, num_cmd_success; int i; num_rsrc_err = atomic_read(&phba->num_rsrc_err); num_cmd_success = atomic_read(&phba->num_cmd_success); /* * The error and success command counters are global per * driver instance. If another handler has already * operated on this error event, just exit. */ if (num_rsrc_err == 0) return; vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { new_queue_depth = sdev->queue_depth * num_rsrc_err / (num_rsrc_err + num_cmd_success); if (!new_queue_depth) new_queue_depth = sdev->queue_depth - 1; else new_queue_depth = sdev->queue_depth - new_queue_depth; scsi_change_queue_depth(sdev, new_queue_depth); } } lpfc_destroy_vport_work_array(phba, vports); atomic_set(&phba->num_rsrc_err, 0); atomic_set(&phba->num_cmd_success, 0); } /** * lpfc_scsi_dev_block - set all scsi hosts to block state * @phba: Pointer to HBA context object. * * This function walks vport list and set each SCSI host to block state * by invoking fc_remote_port_delete() routine. This function is invoked * with EEH when device's PCI slot has been permanently disabled. **/ void lpfc_scsi_dev_block(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct Scsi_Host *shost; struct scsi_device *sdev; struct fc_rport *rport; int i; vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { rport = starget_to_rport(scsi_target(sdev)); fc_remote_port_delete(rport); } } lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec * @vport: The virtual port for which this call being executed. * @num_to_alloc: The requested number of buffers to allocate. * * This routine allocates a scsi buffer for device with SLI-3 interface spec, * the scsi buffer contains all the necessary information needed to initiate * a SCSI I/O. The non-DMAable buffer region contains information to build * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, * and the initial BPL. In addition to allocating memory, the FCP CMND and * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. * * Return codes: * int - number of scsi buffers that were allocated. * 0 = failure, less than num_to_alloc is a partial failure. **/ static int lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) { struct lpfc_hba *phba = vport->phba; struct lpfc_io_buf *psb; struct ulp_bde64 *bpl; IOCB_t *iocb; dma_addr_t pdma_phys_fcp_cmd; dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_sgl; uint16_t iotag; int bcnt, bpl_size; bpl_size = phba->cfg_sg_dma_buf_size - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", num_to_alloc, phba->cfg_sg_dma_buf_size, (int)sizeof(struct fcp_cmnd), (int)sizeof(struct fcp_rsp), bpl_size); for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL); if (!psb) break; /* * Get memory from the pci pool to map the virt space to pci * bus space for an I/O. The DMA buffer includes space for the * struct fcp_cmnd, struct fcp_rsp and the number of bde's * necessary to support the sg_tablesize. */ psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); break; } /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { dma_pool_free(phba->lpfc_sg_dma_buf_pool, psb->data, psb->dma_handle); kfree(psb); break; } psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP; psb->fcp_cmnd = psb->data; psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp); /* Initialize local short-hand pointers. */ bpl = (struct ulp_bde64 *)psb->dma_sgl; pdma_phys_fcp_cmd = psb->dma_handle; pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp); /* * The first two bdes are the FCP_CMD and FCP_RSP. The balance * are sg list bdes. Initialize the first two and leave the * rest for queuecommand. */ bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); /* Setup the physical region for the FCP RSP */ bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); /* * Since the IOCB for the FCP I/O is built into this * lpfc_scsi_buf, initialize it with all known data now. */ iocb = &psb->cur_iocbq.iocb; iocb->un.fcpi64.bdl.ulpIoTag32 = 0; if ((phba->sli_rev == 3) && !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { /* fill in immediate fcp command BDE */ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, unsli3.fcp_ext.icd); iocb->un.fcpi64.bdl.addrHigh = 0; iocb->ulpBdeCount = 0; iocb->ulpLe = 0; /* fill in response BDE */ iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = sizeof(struct fcp_rsp); iocb->unsli3.fcp_ext.rbde.addrLow = putPaddrLow(pdma_phys_fcp_rsp); iocb->unsli3.fcp_ext.rbde.addrHigh = putPaddrHigh(pdma_phys_fcp_rsp); } else { iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_sgl); iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_sgl); iocb->ulpBdeCount = 1; iocb->ulpLe = 1; } iocb->ulpClass = CLASS3; psb->status = IOSTAT_SUCCESS; /* Put it back into the SCSI buffer list */ psb->cur_iocbq.io_buf = psb; spin_lock_init(&psb->buf_lock); lpfc_release_scsi_buf_s3(phba, psb); } return bcnt; } /** * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport * @vport: pointer to lpfc vport data structure. * * This routine is invoked by the vport cleanup for deletions and the cleanup * for an ndlp on removal. **/ void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_io_buf *psb, *next_psb; struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; int idx; if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; spin_lock_irqsave(&phba->hbalock, iflag); for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; spin_lock(&qp->abts_io_buf_list_lock); list_for_each_entry_safe(psb, next_psb, &qp->lpfc_abts_io_buf_list, list) { if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) continue; if (psb->rdata && psb->rdata->pnode && psb->rdata->pnode->vport == vport) psb->rdata = NULL; } spin_unlock(&qp->abts_io_buf_list_lock); } spin_unlock_irqrestore(&phba->hbalock, iflag); } /** * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort * @phba: pointer to lpfc hba data structure. * @axri: pointer to the fcp xri abort wcqe structure. * @idx: index into hdwq * * This routine is invoked by the worker thread to process a SLI4 fast-path * FCP or NVME aborted xri. **/ void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri, int idx) { u16 xri = 0; u16 rxid = 0; struct lpfc_io_buf *psb, *next_psb; struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; struct lpfc_iocbq *iocbq; int i; struct lpfc_nodelist *ndlp; int rrq_empty = 0; struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; struct scsi_cmnd *cmd; int offline = 0; if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; offline = pci_channel_offline(phba->pcidev); if (!offline) { xri = bf_get(lpfc_wcqe_xa_xri, axri); rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); } qp = &phba->sli4_hba.hdwq[idx]; spin_lock_irqsave(&phba->hbalock, iflag); spin_lock(&qp->abts_io_buf_list_lock); list_for_each_entry_safe(psb, next_psb, &qp->lpfc_abts_io_buf_list, list) { if (offline) xri = psb->cur_iocbq.sli4_xritag; if (psb->cur_iocbq.sli4_xritag == xri) { list_del_init(&psb->list); psb->flags &= ~LPFC_SBUF_XBUSY; psb->status = IOSTAT_SUCCESS; if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) { qp->abts_nvme_io_bufs--; spin_unlock(&qp->abts_io_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); if (!offline) { lpfc_sli4_nvme_xri_aborted(phba, axri, psb); return; } lpfc_sli4_nvme_pci_offline_aborted(phba, psb); spin_lock_irqsave(&phba->hbalock, iflag); spin_lock(&qp->abts_io_buf_list_lock); continue; } qp->abts_scsi_io_bufs--; spin_unlock(&qp->abts_io_buf_list_lock); if (psb->rdata && psb->rdata->pnode) ndlp = psb->rdata->pnode; else ndlp = NULL; rrq_empty = list_empty(&phba->active_rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflag); if (ndlp && !offline) { lpfc_set_rrq_active(phba, ndlp, psb->cur_iocbq.sli4_lxritag, rxid, 1); lpfc_sli4_abts_err_handler(phba, ndlp, axri); } if (phba->cfg_fcp_wait_abts_rsp || offline) { spin_lock_irqsave(&psb->buf_lock, iflag); cmd = psb->pCmd; psb->pCmd = NULL; spin_unlock_irqrestore(&psb->buf_lock, iflag); /* The sdev is not guaranteed to be valid post * scsi_done upcall. */ if (cmd) scsi_done(cmd); /* * We expect there is an abort thread waiting * for command completion wake up the thread. */ spin_lock_irqsave(&psb->buf_lock, iflag); psb->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; if (psb->waitq) wake_up(psb->waitq); spin_unlock_irqrestore(&psb->buf_lock, iflag); } lpfc_release_scsi_buf_s4(phba, psb); if (rrq_empty) lpfc_worker_wake_up(phba); if (!offline) return; spin_lock_irqsave(&phba->hbalock, iflag); spin_lock(&qp->abts_io_buf_list_lock); continue; } } spin_unlock(&qp->abts_io_buf_list_lock); if (!offline) { for (i = 1; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; if (!(iocbq->cmd_flag & LPFC_IO_FCP) || (iocbq->cmd_flag & LPFC_IO_LIBDFC)) continue; if (iocbq->sli4_xritag != xri) continue; psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); psb->flags &= ~LPFC_SBUF_XBUSY; spin_unlock_irqrestore(&phba->hbalock, iflag); if (!list_empty(&pring->txq)) lpfc_worker_wake_up(phba); return; } } spin_unlock_irqrestore(&phba->hbalock, iflag); } /** * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA * @phba: The HBA for which this call is being executed. * @ndlp: pointer to a node-list data structure. * @cmnd: Pointer to scsi_cmnd data structure. * * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list * and returns to caller. * * Return codes: * NULL - Error * Pointer to lpfc_scsi_buf - Success **/ static struct lpfc_io_buf * lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, struct scsi_cmnd *cmnd) { struct lpfc_io_buf *lpfc_cmd = NULL; struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; unsigned long iflag = 0; spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf, list); if (!lpfc_cmd) { spin_lock(&phba->scsi_buf_list_put_lock); list_splice(&phba->lpfc_scsi_buf_list_put, &phba->lpfc_scsi_buf_list_get); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf, list); spin_unlock(&phba->scsi_buf_list_put_lock); } spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { atomic_inc(&ndlp->cmd_pending); lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; } return lpfc_cmd; } /** * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA * @phba: The HBA for which this call is being executed. * @ndlp: pointer to a node-list data structure. * @cmnd: Pointer to scsi_cmnd data structure. * * This routine removes a scsi buffer from head of @hdwq io_buf_list * and returns to caller. * * Return codes: * NULL - Error * Pointer to lpfc_scsi_buf - Success **/ static struct lpfc_io_buf * lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, struct scsi_cmnd *cmnd) { struct lpfc_io_buf *lpfc_cmd; struct lpfc_sli4_hdw_queue *qp; struct sli4_sge *sgl; dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_fcp_cmd; uint32_t cpu, idx; int tag; struct fcp_cmd_rsp_buf *tmp = NULL; cpu = raw_smp_processor_id(); if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); idx = blk_mq_unique_tag_to_hwq(tag); } else { idx = phba->sli4_hba.cpu_map[cpu].hdwq; } lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx, !phba->cfg_xri_rebalancing); if (!lpfc_cmd) { qp = &phba->sli4_hba.hdwq[idx]; qp->empty_io_bufs++; return NULL; } /* Setup key fields in buffer that may have been changed * if other protocols used this buffer. */ lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP; lpfc_cmd->prot_seg_cnt = 0; lpfc_cmd->seg_cnt = 0; lpfc_cmd->timeout = 0; lpfc_cmd->flags = 0; lpfc_cmd->start_time = jiffies; lpfc_cmd->waitq = NULL; lpfc_cmd->cpu = cpu; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS lpfc_cmd->prot_data_type = 0; #endif tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd); if (!tmp) { lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq); return NULL; } lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; lpfc_cmd->fcp_rsp = tmp->fcp_rsp; /* * The first two SGEs are the FCP_CMD and FCP_RSP. * The balance are sg list bdes. Initialize the * first two and leave the rest for queuecommand. */ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); sgl->word2 = le32_to_cpu(sgl->word2); bf_set(lpfc_sli4_sge_last, sgl, 0); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); sgl++; /* Setup the physical region for the FCP RSP */ pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); sgl->word2 = le32_to_cpu(sgl->word2); bf_set(lpfc_sli4_sge_last, sgl, 1); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); if (lpfc_ndlp_check_qdepth(phba, ndlp)) { atomic_inc(&ndlp->cmd_pending); lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; } return lpfc_cmd; } /** * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA * @phba: The HBA for which this call is being executed. * @ndlp: pointer to a node-list data structure. * @cmnd: Pointer to scsi_cmnd data structure. * * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list * and returns to caller. * * Return codes: * NULL - Error * Pointer to lpfc_scsi_buf - Success **/ static struct lpfc_io_buf* lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, struct scsi_cmnd *cmnd) { return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd); } /** * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is being released. * * This routine releases @psb scsi buffer by adding it to tail of @phba * lpfc_scsi_buf_list list. **/ static void lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb) { unsigned long iflag = 0; psb->seg_cnt = 0; psb->prot_seg_cnt = 0; spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); psb->pCmd = NULL; psb->cur_iocbq.cmd_flag = LPFC_IO_FCP; list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } /** * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is being released. * * This routine releases @psb scsi buffer by adding it to tail of @hdwq * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer * and cannot be reused for at least RA_TOV amount of time if it was * aborted. **/ static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) { struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; psb->seg_cnt = 0; psb->prot_seg_cnt = 0; qp = psb->hdwq; if (psb->flags & LPFC_SBUF_XBUSY) { spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); if (!phba->cfg_fcp_wait_abts_rsp) psb->pCmd = NULL; list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list); qp->abts_scsi_io_bufs++; spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); } else { lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp); } } /** * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is being released. * * This routine releases @psb scsi buffer by adding it to tail of @phba * lpfc_scsi_buf_list list. **/ static void lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) { if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) atomic_dec(&psb->ndlp->cmd_pending); psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; phba->lpfc_release_scsi_buf(phba, psb); } /** * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB * @data: A pointer to the immediate command data portion of the IOCB. * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. * * The routine copies the entire FCP command from @fcp_cmnd to @data while * byte swapping the data to big endian format for transmission on the wire. **/ static void lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd) { int i, j; for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); i += sizeof(uint32_t), j++) { ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); } } /** * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. * * This routine does the pci dma mapping for scatter-gather list of scsi cmnd * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans * through sg elements and format the bde. This routine also initializes all * IOCB fields which are dependent on scsi command request buffer. * * Return codes: * 1 - Error * 0 - Success **/ static int lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; dma_addr_t physaddr; uint32_t num_bde = 0; int nseg, datadir = scsi_cmnd->sc_data_direction; /* * There are three possibilities here - use scatter-gather segment, use * the single mapping, or neither. Start the lpfc command prep by * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first * data bde entry. */ bpl += 2; if (scsi_sg_count(scsi_cmnd)) { /* * The driver stores the segment count returned from dma_map_sg * because this a count of dma-mappings used to map the use_sg * pages. They are not guaranteed to be the same for those * architectures that implement an IOMMU. */ nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), scsi_sg_count(scsi_cmnd), datadir); if (unlikely(!nseg)) return 1; lpfc_cmd->seg_cnt = nseg; if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9064 BLKGRD: %s: Too many sg segments" " from dma_map_sg. Config %d, seg_cnt" " %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 2; } /* * The driver established a maximum scatter-gather segment count * during probe that limits the number of sg elements in any * single scsi command. Just run through the seg_cnt and format * the bde's. * When using SLI-3 the driver will try to fit all the BDEs into * the IOCB. If it can't then the BDEs get added to a BPL as it * does for SLI-2 mode. */ scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { physaddr = sg_dma_address(sgel); if (phba->sli_rev == 3 && !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && !(iocbq->cmd_flag & DSS_SECURITY_OP) && nseg <= LPFC_EXT_DATA_BDE_COUNT) { data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; data_bde->tus.f.bdeSize = sg_dma_len(sgel); data_bde->addrLow = putPaddrLow(physaddr); data_bde->addrHigh = putPaddrHigh(physaddr); data_bde++; } else { bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; bpl->tus.f.bdeSize = sg_dma_len(sgel); bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); bpl++; } } } /* * Finish initializing those IOCB fields that are dependent on the * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is * explicitly reinitialized and for SLI-3 the extended bde count is * explicitly reinitialized since all iocb memory resources are reused. */ if (phba->sli_rev == 3 && !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && !(iocbq->cmd_flag & DSS_SECURITY_OP)) { if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { /* * The extended IOCB format can only fit 3 BDE or a BPL. * This I/O has more than 3 BDE so the 1st data bde will * be a BPL that is filled in here. */ physaddr = lpfc_cmd->dma_handle; data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; data_bde->tus.f.bdeSize = (num_bde * sizeof(struct ulp_bde64)); physaddr += (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + (2 * sizeof(struct ulp_bde64))); data_bde->addrHigh = putPaddrHigh(physaddr); data_bde->addrLow = putPaddrLow(physaddr); /* ebde count includes the response bde and data bpl */ iocb_cmd->unsli3.fcp_ext.ebde_count = 2; } else { /* ebde count includes the response bde and data bdes */ iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); } } else { iocb_cmd->un.fcpi64.bdl.bdeSize = ((num_bde + 2) * sizeof(struct ulp_bde64)); iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); } fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); /* * Due to difference in data length between DIF/non-DIF paths, * we need to set word 4 of IOCB here */ iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); return 0; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS /* Return BG_ERR_INIT if error injection is detected by Initiator */ #define BG_ERR_INIT 0x1 /* Return BG_ERR_TGT if error injection is detected by Target */ #define BG_ERR_TGT 0x2 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ #define BG_ERR_SWAP 0x10 /* * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for * error injection */ #define BG_ERR_CHECK 0x20 /** * lpfc_bg_err_inject - Determine if we should inject an error * @phba: The Hba for which this call is being executed. * @sc: The SCSI command to examine * @reftag: (out) BlockGuard reference tag for transmitted data * @apptag: (out) BlockGuard application tag for transmitted data * @new_guard: (in) Value to replace CRC with if needed * * Returns BG_ERR_* bit mask or 0 if request ignored **/ static int lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) { struct scatterlist *sgpe; /* s/g prot entry */ struct lpfc_io_buf *lpfc_cmd = NULL; struct scsi_dif_tuple *src = NULL; struct lpfc_nodelist *ndlp; struct lpfc_rport_data *rdata; uint32_t op = scsi_get_prot_op(sc); uint32_t blksize; uint32_t numblks; u32 lba; int rc = 0; int blockoff = 0; if (op == SCSI_PROT_NORMAL) return 0; sgpe = scsi_prot_sglist(sc); lba = scsi_prot_ref_tag(sc); /* First check if we need to match the LBA */ if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { blksize = scsi_prot_interval(sc); numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; /* Make sure we have the right LBA if one is specified */ if (phba->lpfc_injerr_lba < (u64)lba || (phba->lpfc_injerr_lba >= (u64)(lba + numblks))) return 0; if (sgpe) { blockoff = phba->lpfc_injerr_lba - (u64)lba; numblks = sg_dma_len(sgpe) / sizeof(struct scsi_dif_tuple); if (numblks < blockoff) blockoff = numblks; } } /* Next check if we need to match the remote NPortID or WWPN */ rdata = lpfc_rport_data_from_scsi_device(sc->device); if (rdata && rdata->pnode) { ndlp = rdata->pnode; /* Make sure we have the right NPortID if one is specified */ if (phba->lpfc_injerr_nportid && (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) return 0; /* * Make sure we have the right WWPN if one is specified. * wwn[0] should be a non-zero NAA in a good WWPN. */ if (phba->lpfc_injerr_wwpn.u.wwn[0] && (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name)) != 0)) return 0; } /* Setup a ptr to the protection data if the SCSI host provides it */ if (sgpe) { src = (struct scsi_dif_tuple *)sg_virt(sgpe); src += blockoff; lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble; } /* Should we change the Reference Tag */ if (reftag) { if (phba->lpfc_injerr_wref_cnt) { switch (op) { case SCSI_PROT_WRITE_PASS: if (src) { /* * For WRITE_PASS, force the error * to be sent on the wire. It should * be detected by the Target. * If blockoff != 0 error will be * inserted in middle of the IO. */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9076 BLKGRD: Injecting reftag error: " "write lba x%lx + x%x oldrefTag x%x\n", (unsigned long)lba, blockoff, be32_to_cpu(src->ref_tag)); /* * Save the old ref_tag so we can * restore it on completion. */ if (lpfc_cmd) { lpfc_cmd->prot_data_type = LPFC_INJERR_REFTAG; lpfc_cmd->prot_data_segment = src; lpfc_cmd->prot_data = src->ref_tag; } src->ref_tag = cpu_to_be32(0xDEADBEEF); phba->lpfc_injerr_wref_cnt--; if (phba->lpfc_injerr_wref_cnt == 0) { phba->lpfc_injerr_nportid = 0; phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; memset(&phba->lpfc_injerr_wwpn, 0, sizeof(struct lpfc_name)); } rc = BG_ERR_TGT | BG_ERR_CHECK; break; } fallthrough; case SCSI_PROT_WRITE_INSERT: /* * For WRITE_INSERT, force the error * to be sent on the wire. It should be * detected by the Target. */ /* DEADBEEF will be the reftag on the wire */ *reftag = 0xDEADBEEF; phba->lpfc_injerr_wref_cnt--; if (phba->lpfc_injerr_wref_cnt == 0) { phba->lpfc_injerr_nportid = 0; phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; memset(&phba->lpfc_injerr_wwpn, 0, sizeof(struct lpfc_name)); } rc = BG_ERR_TGT | BG_ERR_CHECK; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9078 BLKGRD: Injecting reftag error: " "write lba x%lx\n", (unsigned long)lba); break; case SCSI_PROT_WRITE_STRIP: /* * For WRITE_STRIP and WRITE_PASS, * force the error on data * being copied from SLI-Host to SLI-Port. */ *reftag = 0xDEADBEEF; phba->lpfc_injerr_wref_cnt--; if (phba->lpfc_injerr_wref_cnt == 0) { phba->lpfc_injerr_nportid = 0; phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; memset(&phba->lpfc_injerr_wwpn, 0, sizeof(struct lpfc_name)); } rc = BG_ERR_INIT; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9077 BLKGRD: Injecting reftag error: " "write lba x%lx\n", (unsigned long)lba); break; } } if (phba->lpfc_injerr_rref_cnt) { switch (op) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_READ_STRIP: case SCSI_PROT_READ_PASS: /* * For READ_STRIP and READ_PASS, force the * error on data being read off the wire. It * should force an IO error to the driver. */ *reftag = 0xDEADBEEF; phba->lpfc_injerr_rref_cnt--; if (phba->lpfc_injerr_rref_cnt == 0) { phba->lpfc_injerr_nportid = 0; phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; memset(&phba->lpfc_injerr_wwpn, 0, sizeof(struct lpfc_name)); } rc = BG_ERR_INIT; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9079 BLKGRD: Injecting reftag error: " "read lba x%lx\n", (unsigned long)lba); break; } } } /* Should we change the Application Tag */ if (apptag) { if (phba->lpfc_injerr_wapp_cnt) { switch (op) { case SCSI_PROT_WRITE_PASS: if (src) { /* * For WRITE_PASS, force the error * to be sent on the wire. It should * be detected by the Target. * If blockoff != 0 error will be * inserted in middle of the IO. */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9080 BLKGRD: Injecting apptag error: " "write lba x%lx + x%x oldappTag x%x\n", (unsigned long)lba, blockoff, be16_to_cpu(src->app_tag)); /* * Save the old app_tag so we can * restore it on completion. */ if (lpfc_cmd) { lpfc_cmd->prot_data_type = LPFC_INJERR_APPTAG; lpfc_cmd->prot_data_segment = src; lpfc_cmd->prot_data = src->app_tag; } src->app_tag = cpu_to_be16(0xDEAD); phba->lpfc_injerr_wapp_cnt--; if (phba->lpfc_injerr_wapp_cnt == 0) { phba->lpfc_injerr_nportid = 0; phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; memset(&phba->lpfc_injerr_wwpn, 0, sizeof(struct lpfc_name)); } rc = BG_ERR_TGT | BG_ERR_CHECK; break; } fallthrough; case SCSI_PROT_WRITE_INSERT: /* * For WRITE_INSERT, force the * error to be sent on the wire. It should be * detected by the Target. */ /* DEAD will be the apptag on the wire */ *apptag = 0xDEAD; phba->lpfc_injerr_wapp_cnt--; if (phba->lpfc_injerr_wapp_cnt == 0) { phba->lpfc_injerr_nportid = 0; phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; memset(&phba->lpfc_injerr_wwpn, 0, sizeof(struct lpfc_name)); } rc = BG_ERR_TGT | BG_ERR_CHECK; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0813 BLKGRD: Injecting apptag error: " "write lba x%lx\n", (unsigned long)lba); break; case SCSI_PROT_WRITE_STRIP: /* * For WRITE_STRIP and WRITE_PASS, * force the error on data * being copied from SLI-Host to SLI-Port. */ *apptag = 0xDEAD; phba->lpfc_injerr_wapp_cnt--; if (phba->lpfc_injerr_wapp_cnt == 0) { phba->lpfc_injerr_nportid = 0; phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; memset(&phba->lpfc_injerr_wwpn, 0, sizeof(struct lpfc_name)); } rc = BG_ERR_INIT; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0812 BLKGRD: Injecting apptag error: " "write lba x%lx\n", (unsigned long)lba); break; } } if (phba->lpfc_injerr_rapp_cnt) { switch (op) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_READ_STRIP: case SCSI_PROT_READ_PASS: /* * For READ_STRIP and READ_PASS, force the * error on data being read off the wire. It * should force an IO error to the driver. */ *apptag = 0xDEAD; phba->lpfc_injerr_rapp_cnt--; if (phba->lpfc_injerr_rapp_cnt == 0) { phba->lpfc_injerr_nportid = 0; phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; memset(&phba->lpfc_injerr_wwpn, 0, sizeof(struct lpfc_name)); } rc = BG_ERR_INIT; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0814 BLKGRD: Injecting apptag error: " "read lba x%lx\n", (unsigned long)lba); break; } } } /* Should we change the Guard Tag */ if (new_guard) { if (phba->lpfc_injerr_wgrd_cnt) { switch (op) { case SCSI_PROT_WRITE_PASS: rc = BG_ERR_CHECK; fallthrough; case SCSI_PROT_WRITE_INSERT: /* * For WRITE_INSERT, force the * error to be sent on the wire. It should be * detected by the Target. */ phba->lpfc_injerr_wgrd_cnt--; if (phba->lpfc_injerr_wgrd_cnt == 0) { phba->lpfc_injerr_nportid = 0; phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; memset(&phba->lpfc_injerr_wwpn, 0, sizeof(struct lpfc_name)); } rc |= BG_ERR_TGT | BG_ERR_SWAP; /* Signals the caller to swap CRC->CSUM */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0817 BLKGRD: Injecting guard error: " "write lba x%lx\n", (unsigned long)lba); break; case SCSI_PROT_WRITE_STRIP: /* * For WRITE_STRIP and WRITE_PASS, * force the error on data * being copied from SLI-Host to SLI-Port. */ phba->lpfc_injerr_wgrd_cnt--; if (phba->lpfc_injerr_wgrd_cnt == 0) { phba->lpfc_injerr_nportid = 0; phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; memset(&phba->lpfc_injerr_wwpn, 0, sizeof(struct lpfc_name)); } rc = BG_ERR_INIT | BG_ERR_SWAP; /* Signals the caller to swap CRC->CSUM */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0816 BLKGRD: Injecting guard error: " "write lba x%lx\n", (unsigned long)lba); break; } } if (phba->lpfc_injerr_rgrd_cnt) { switch (op) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_READ_STRIP: case SCSI_PROT_READ_PASS: /* * For READ_STRIP and READ_PASS, force the * error on data being read off the wire. It * should force an IO error to the driver. */ phba->lpfc_injerr_rgrd_cnt--; if (phba->lpfc_injerr_rgrd_cnt == 0) { phba->lpfc_injerr_nportid = 0; phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; memset(&phba->lpfc_injerr_wwpn, 0, sizeof(struct lpfc_name)); } rc = BG_ERR_INIT | BG_ERR_SWAP; /* Signals the caller to swap CRC->CSUM */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0818 BLKGRD: Injecting guard error: " "read lba x%lx\n", (unsigned long)lba); } } } return rc; } #endif /** * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with * the specified SCSI command. * @phba: The Hba for which this call is being executed. * @sc: The SCSI command to examine * @txop: (out) BlockGuard operation for transmitted data * @rxop: (out) BlockGuard operation for received data * * Returns: zero on success; non-zero if tx and/or rx op cannot be determined * **/ static int lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint8_t *txop, uint8_t *rxop) { uint8_t ret = 0; if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: *rxop = BG_OP_IN_NODIF_OUT_CSUM; *txop = BG_OP_IN_CSUM_OUT_NODIF; break; case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: *rxop = BG_OP_IN_CRC_OUT_NODIF; *txop = BG_OP_IN_NODIF_OUT_CRC; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: *rxop = BG_OP_IN_CRC_OUT_CSUM; *txop = BG_OP_IN_CSUM_OUT_CRC; break; case SCSI_PROT_NORMAL: default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9063 BLKGRD: Bad op/guard:%d/IP combination\n", scsi_get_prot_op(sc)); ret = 1; break; } } else { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: *rxop = BG_OP_IN_CRC_OUT_NODIF; *txop = BG_OP_IN_NODIF_OUT_CRC; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: *rxop = BG_OP_IN_CRC_OUT_CRC; *txop = BG_OP_IN_CRC_OUT_CRC; break; case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: *rxop = BG_OP_IN_NODIF_OUT_CRC; *txop = BG_OP_IN_CRC_OUT_NODIF; break; case SCSI_PROT_NORMAL: default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9075 BLKGRD: Bad op/guard:%d/CRC combination\n", scsi_get_prot_op(sc)); ret = 1; break; } } return ret; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS /** * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with * the specified SCSI command in order to force a guard tag error. * @phba: The Hba for which this call is being executed. * @sc: The SCSI command to examine * @txop: (out) BlockGuard operation for transmitted data * @rxop: (out) BlockGuard operation for received data * * Returns: zero on success; non-zero if tx and/or rx op cannot be determined * **/ static int lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint8_t *txop, uint8_t *rxop) { if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: *rxop = BG_OP_IN_NODIF_OUT_CRC; *txop = BG_OP_IN_CRC_OUT_NODIF; break; case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: *rxop = BG_OP_IN_CSUM_OUT_NODIF; *txop = BG_OP_IN_NODIF_OUT_CSUM; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: *rxop = BG_OP_IN_CSUM_OUT_CRC; *txop = BG_OP_IN_CRC_OUT_CSUM; break; case SCSI_PROT_NORMAL: default: break; } } else { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: *rxop = BG_OP_IN_CSUM_OUT_NODIF; *txop = BG_OP_IN_NODIF_OUT_CSUM; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: *rxop = BG_OP_IN_CSUM_OUT_CSUM; *txop = BG_OP_IN_CSUM_OUT_CSUM; break; case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: *rxop = BG_OP_IN_NODIF_OUT_CSUM; *txop = BG_OP_IN_CSUM_OUT_NODIF; break; case SCSI_PROT_NORMAL: default: break; } } return 0; } #endif /** * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data * @phba: The Hba for which this call is being executed. * @sc: pointer to scsi command we're working on * @bpl: pointer to buffer list for protection groups * @datasegcnt: number of segments of data that have been dma mapped * * This function sets up BPL buffer list for protection groups of * type LPFC_PG_TYPE_NO_DIF * * This is usually used when the HBA is instructed to generate * DIFs and insert them into data stream (or strip DIF from * incoming data stream) * * The buffer list consists of just one protection group described * below: * +-------------------------+ * start of prot group --> | PDE_5 | * +-------------------------+ * | PDE_6 | * +-------------------------+ * | Data BDE | * +-------------------------+ * |more Data BDE's ... (opt)| * +-------------------------+ * * * Note: Data s/g buffers have been dma mapped * * Returns the number of BDEs added to the BPL. **/ static int lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct ulp_bde64 *bpl, int datasegcnt) { struct scatterlist *sgde = NULL; /* s/g data entry */ struct lpfc_pde5 *pde5 = NULL; struct lpfc_pde6 *pde6 = NULL; dma_addr_t physaddr; int i = 0, num_bde = 0, status; int datadir = sc->sc_data_direction; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint32_t rc; #endif uint32_t checking = 1; uint32_t reftag; uint8_t txop, rxop; status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); if (status) goto out; /* extract some info from the scsi command for pde*/ reftag = scsi_prot_ref_tag(sc); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); if (rc) { if (rc & BG_ERR_SWAP) lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); if (rc & BG_ERR_CHECK) checking = 0; } #endif /* setup PDE5 with what we have */ pde5 = (struct lpfc_pde5 *) bpl; memset(pde5, 0, sizeof(struct lpfc_pde5)); bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); /* Endianness conversion if necessary for PDE5 */ pde5->word0 = cpu_to_le32(pde5->word0); pde5->reftag = cpu_to_le32(reftag); /* advance bpl and increment bde count */ num_bde++; bpl++; pde6 = (struct lpfc_pde6 *) bpl; /* setup PDE6 with the rest of the info */ memset(pde6, 0, sizeof(struct lpfc_pde6)); bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); /* * We only need to check the data on READs, for WRITEs * protection data is automatically generated, not checked. */ if (datadir == DMA_FROM_DEVICE) { if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) bf_set(pde6_ce, pde6, checking); else bf_set(pde6_ce, pde6, 0); if (sc->prot_flags & SCSI_PROT_REF_CHECK) bf_set(pde6_re, pde6, checking); else bf_set(pde6_re, pde6, 0); } bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); bf_set(pde6_apptagval, pde6, 0); /* Endianness conversion if necessary for PDE6 */ pde6->word0 = cpu_to_le32(pde6->word0); pde6->word1 = cpu_to_le32(pde6->word1); pde6->word2 = cpu_to_le32(pde6->word2); /* advance bpl and increment bde count */ num_bde++; bpl++; /* assumption: caller has already run dma_map_sg on command data */ scsi_for_each_sg(sc, sgde, datasegcnt, i) { physaddr = sg_dma_address(sgde); bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); bpl->tus.f.bdeSize = sg_dma_len(sgde); if (datadir == DMA_TO_DEVICE) bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; else bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; num_bde++; } out: return num_bde; } /** * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data * @phba: The Hba for which this call is being executed. * @sc: pointer to scsi command we're working on * @bpl: pointer to buffer list for protection groups * @datacnt: number of segments of data that have been dma mapped * @protcnt: number of segment of protection data that have been dma mapped * * This function sets up BPL buffer list for protection groups of * type LPFC_PG_TYPE_DIF * * This is usually used when DIFs are in their own buffers, * separate from the data. The HBA can then by instructed * to place the DIFs in the outgoing stream. For read operations, * The HBA could extract the DIFs and place it in DIF buffers. * * The buffer list for this type consists of one or more of the * protection groups described below: * +-------------------------+ * start of first prot group --> | PDE_5 | * +-------------------------+ * | PDE_6 | * +-------------------------+ * | PDE_7 (Prot BDE) | * +-------------------------+ * | Data BDE | * +-------------------------+ * |more Data BDE's ... (opt)| * +-------------------------+ * start of new prot group --> | PDE_5 | * +-------------------------+ * | ... | * +-------------------------+ * * Note: It is assumed that both data and protection s/g buffers have been * mapped for DMA * * Returns the number of BDEs added to the BPL. **/ static int lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct ulp_bde64 *bpl, int datacnt, int protcnt) { struct scatterlist *sgde = NULL; /* s/g data entry */ struct scatterlist *sgpe = NULL; /* s/g prot entry */ struct lpfc_pde5 *pde5 = NULL; struct lpfc_pde6 *pde6 = NULL; struct lpfc_pde7 *pde7 = NULL; dma_addr_t dataphysaddr, protphysaddr; unsigned short curr_prot = 0; unsigned int split_offset; unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; unsigned int protgrp_blks, protgrp_bytes; unsigned int remainder, subtotal; int status; int datadir = sc->sc_data_direction; unsigned char pgdone = 0, alldone = 0; unsigned blksize; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint32_t rc; #endif uint32_t checking = 1; uint32_t reftag; uint8_t txop, rxop; int num_bde = 0; sgpe = scsi_prot_sglist(sc); sgde = scsi_sglist(sc); if (!sgpe || !sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9020 Invalid s/g entry: data=x%px prot=x%px\n", sgpe, sgde); return 0; } status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); if (status) goto out; /* extract some info from the scsi command */ blksize = scsi_prot_interval(sc); reftag = scsi_prot_ref_tag(sc); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); if (rc) { if (rc & BG_ERR_SWAP) lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); if (rc & BG_ERR_CHECK) checking = 0; } #endif split_offset = 0; do { /* Check to see if we ran out of space */ if (num_bde >= (phba->cfg_total_seg_cnt - 2)) return num_bde + 3; /* setup PDE5 with what we have */ pde5 = (struct lpfc_pde5 *) bpl; memset(pde5, 0, sizeof(struct lpfc_pde5)); bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); /* Endianness conversion if necessary for PDE5 */ pde5->word0 = cpu_to_le32(pde5->word0); pde5->reftag = cpu_to_le32(reftag); /* advance bpl and increment bde count */ num_bde++; bpl++; pde6 = (struct lpfc_pde6 *) bpl; /* setup PDE6 with the rest of the info */ memset(pde6, 0, sizeof(struct lpfc_pde6)); bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) bf_set(pde6_ce, pde6, checking); else bf_set(pde6_ce, pde6, 0); if (sc->prot_flags & SCSI_PROT_REF_CHECK) bf_set(pde6_re, pde6, checking); else bf_set(pde6_re, pde6, 0); bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); bf_set(pde6_apptagval, pde6, 0); /* Endianness conversion if necessary for PDE6 */ pde6->word0 = cpu_to_le32(pde6->word0); pde6->word1 = cpu_to_le32(pde6->word1); pde6->word2 = cpu_to_le32(pde6->word2); /* advance bpl and increment bde count */ num_bde++; bpl++; /* setup the first BDE that points to protection buffer */ protphysaddr = sg_dma_address(sgpe) + protgroup_offset; protgroup_len = sg_dma_len(sgpe) - protgroup_offset; /* must be integer multiple of the DIF block length */ BUG_ON(protgroup_len % 8); pde7 = (struct lpfc_pde7 *) bpl; memset(pde7, 0, sizeof(struct lpfc_pde7)); bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); protgrp_blks = protgroup_len / 8; protgrp_bytes = protgrp_blks * blksize; /* check if this pde is crossing the 4K boundary; if so split */ if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); protgroup_offset += protgroup_remainder; protgrp_blks = protgroup_remainder / 8; protgrp_bytes = protgrp_blks * blksize; } else { protgroup_offset = 0; curr_prot++; } num_bde++; /* setup BDE's for data blocks associated with DIF data */ pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { /* Check to see if we ran out of space */ if (num_bde >= phba->cfg_total_seg_cnt) return num_bde + 1; if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9065 BLKGRD:%s Invalid data segment\n", __func__); return 0; } bpl++; dataphysaddr = sg_dma_address(sgde) + split_offset; bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); remainder = sg_dma_len(sgde) - split_offset; if ((subtotal + remainder) <= protgrp_bytes) { /* we can use this whole buffer */ bpl->tus.f.bdeSize = remainder; split_offset = 0; if ((subtotal + remainder) == protgrp_bytes) pgdone = 1; } else { /* must split this buffer with next prot grp */ bpl->tus.f.bdeSize = protgrp_bytes - subtotal; split_offset += bpl->tus.f.bdeSize; } subtotal += bpl->tus.f.bdeSize; if (datadir == DMA_TO_DEVICE) bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; else bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; bpl->tus.w = le32_to_cpu(bpl->tus.w); num_bde++; if (split_offset) break; /* Move to the next s/g segment if possible */ sgde = sg_next(sgde); } if (protgroup_offset) { /* update the reference tag */ reftag += protgrp_blks; bpl++; continue; } /* are we done ? */ if (curr_prot == protcnt) { alldone = 1; } else if (curr_prot < protcnt) { /* advance to next prot buffer */ sgpe = sg_next(sgpe); bpl++; /* update the reference tag */ reftag += protgrp_blks; } else { /* if we're here, we have a bug */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9054 BLKGRD: bug in %s\n", __func__); } } while (!alldone); out: return num_bde; } /** * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data * @phba: The Hba for which this call is being executed. * @sc: pointer to scsi command we're working on * @sgl: pointer to buffer list for protection groups * @datasegcnt: number of segments of data that have been dma mapped * @lpfc_cmd: lpfc scsi command object pointer. * * This function sets up SGL buffer list for protection groups of * type LPFC_PG_TYPE_NO_DIF * * This is usually used when the HBA is instructed to generate * DIFs and insert them into data stream (or strip DIF from * incoming data stream) * * The buffer list consists of just one protection group described * below: * +-------------------------+ * start of prot group --> | DI_SEED | * +-------------------------+ * | Data SGE | * +-------------------------+ * |more Data SGE's ... (opt)| * +-------------------------+ * * * Note: Data s/g buffers have been dma mapped * * Returns the number of SGEs added to the SGL. **/ static int lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datasegcnt, struct lpfc_io_buf *lpfc_cmd) { struct scatterlist *sgde = NULL; /* s/g data entry */ struct sli4_sge_diseed *diseed = NULL; dma_addr_t physaddr; int i = 0, num_sge = 0, status; uint32_t reftag; uint8_t txop, rxop; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint32_t rc; #endif uint32_t checking = 1; uint32_t dma_len; uint32_t dma_offset = 0; struct sli4_hybrid_sgl *sgl_xtra = NULL; int j; bool lsp_just_set = false; status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); if (status) goto out; /* extract some info from the scsi command for pde*/ reftag = scsi_prot_ref_tag(sc); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); if (rc) { if (rc & BG_ERR_SWAP) lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); if (rc & BG_ERR_CHECK) checking = 0; } #endif /* setup DISEED with what we have */ diseed = (struct sli4_sge_diseed *) sgl; memset(diseed, 0, sizeof(struct sli4_sge_diseed)); bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); /* Endianness conversion if necessary */ diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; /* * We only need to check the data on READs, for WRITEs * protection data is automatically generated, not checked. */ if (sc->sc_data_direction == DMA_FROM_DEVICE) { if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); else bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); if (sc->prot_flags & SCSI_PROT_REF_CHECK) bf_set(lpfc_sli4_sge_dif_re, diseed, checking); else bf_set(lpfc_sli4_sge_dif_re, diseed, 0); } /* setup DISEED with the rest of the info */ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); bf_set(lpfc_sli4_sge_dif_me, diseed, 0); /* Endianness conversion if necessary for DISEED */ diseed->word2 = cpu_to_le32(diseed->word2); diseed->word3 = cpu_to_le32(diseed->word3); /* advance bpl and increment sge count */ num_sge++; sgl++; /* assumption: caller has already run dma_map_sg on command data */ sgde = scsi_sglist(sc); j = 3; for (i = 0; i < datasegcnt; i++) { /* clear it */ sgl->word2 = 0; /* do we need to expand the segment */ if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && ((datasegcnt - 1) != i)) { /* set LSP type */ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); if (unlikely(!sgl_xtra)) { lpfc_cmd->seg_cnt = 0; return 0; } sgl->addr_lo = cpu_to_le32(putPaddrLow( sgl_xtra->dma_phys_sgl)); sgl->addr_hi = cpu_to_le32(putPaddrHigh( sgl_xtra->dma_phys_sgl)); } else { bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); } if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { if ((datasegcnt - 1) == i) bf_set(lpfc_sli4_sge_last, sgl, 1); physaddr = sg_dma_address(sgde); dma_len = sg_dma_len(sgde); sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(dma_len); dma_offset += dma_len; sgde = sg_next(sgde); sgl++; num_sge++; lsp_just_set = false; } else { sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; i = i - 1; lsp_just_set = true; } j++; } out: return num_sge; } /** * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data * @phba: The Hba for which this call is being executed. * @sc: pointer to scsi command we're working on * @sgl: pointer to buffer list for protection groups * @datacnt: number of segments of data that have been dma mapped * @protcnt: number of segment of protection data that have been dma mapped * @lpfc_cmd: lpfc scsi command object pointer. * * This function sets up SGL buffer list for protection groups of * type LPFC_PG_TYPE_DIF * * This is usually used when DIFs are in their own buffers, * separate from the data. The HBA can then by instructed * to place the DIFs in the outgoing stream. For read operations, * The HBA could extract the DIFs and place it in DIF buffers. * * The buffer list for this type consists of one or more of the * protection groups described below: * +-------------------------+ * start of first prot group --> | DISEED | * +-------------------------+ * | DIF (Prot SGE) | * +-------------------------+ * | Data SGE | * +-------------------------+ * |more Data SGE's ... (opt)| * +-------------------------+ * start of new prot group --> | DISEED | * +-------------------------+ * | ... | * +-------------------------+ * * Note: It is assumed that both data and protection s/g buffers have been * mapped for DMA * * Returns the number of SGEs added to the SGL. **/ static int lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datacnt, int protcnt, struct lpfc_io_buf *lpfc_cmd) { struct scatterlist *sgde = NULL; /* s/g data entry */ struct scatterlist *sgpe = NULL; /* s/g prot entry */ struct sli4_sge_diseed *diseed = NULL; dma_addr_t dataphysaddr, protphysaddr; unsigned short curr_prot = 0; unsigned int split_offset; unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; unsigned int protgrp_blks, protgrp_bytes; unsigned int remainder, subtotal; int status; unsigned char pgdone = 0, alldone = 0; unsigned blksize; uint32_t reftag; uint8_t txop, rxop; uint32_t dma_len; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint32_t rc; #endif uint32_t checking = 1; uint32_t dma_offset = 0; int num_sge = 0, j = 2; struct sli4_hybrid_sgl *sgl_xtra = NULL; sgpe = scsi_prot_sglist(sc); sgde = scsi_sglist(sc); if (!sgpe || !sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9082 Invalid s/g entry: data=x%px prot=x%px\n", sgpe, sgde); return 0; } status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); if (status) goto out; /* extract some info from the scsi command */ blksize = scsi_prot_interval(sc); reftag = scsi_prot_ref_tag(sc); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); if (rc) { if (rc & BG_ERR_SWAP) lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); if (rc & BG_ERR_CHECK) checking = 0; } #endif split_offset = 0; do { /* Check to see if we ran out of space */ if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) && !(phba->cfg_xpsgl)) return num_sge + 3; /* DISEED and DIF have to be together */ if (!((j + 1) % phba->border_sge_num) || !((j + 2) % phba->border_sge_num) || !((j + 3) % phba->border_sge_num)) { sgl->word2 = 0; /* set LSP type */ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); if (unlikely(!sgl_xtra)) { goto out; } else { sgl->addr_lo = cpu_to_le32(putPaddrLow( sgl_xtra->dma_phys_sgl)); sgl->addr_hi = cpu_to_le32(putPaddrHigh( sgl_xtra->dma_phys_sgl)); } sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; j = 0; } /* setup DISEED with what we have */ diseed = (struct sli4_sge_diseed *) sgl; memset(diseed, 0, sizeof(struct sli4_sge_diseed)); bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); /* Endianness conversion if necessary */ diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) { bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); } else { bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); /* * When in this mode, the hardware will replace * the guard tag from the host with a * newly generated good CRC for the wire. * Switch to raw mode here to avoid this * behavior. What the host sends gets put on the wire. */ if (txop == BG_OP_IN_CRC_OUT_CRC) { txop = BG_OP_RAW_MODE; rxop = BG_OP_RAW_MODE; } } if (sc->prot_flags & SCSI_PROT_REF_CHECK) bf_set(lpfc_sli4_sge_dif_re, diseed, checking); else bf_set(lpfc_sli4_sge_dif_re, diseed, 0); /* setup DISEED with the rest of the info */ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); bf_set(lpfc_sli4_sge_dif_me, diseed, 0); /* Endianness conversion if necessary for DISEED */ diseed->word2 = cpu_to_le32(diseed->word2); diseed->word3 = cpu_to_le32(diseed->word3); /* advance sgl and increment bde count */ num_sge++; sgl++; j++; /* setup the first BDE that points to protection buffer */ protphysaddr = sg_dma_address(sgpe) + protgroup_offset; protgroup_len = sg_dma_len(sgpe) - protgroup_offset; /* must be integer multiple of the DIF block length */ BUG_ON(protgroup_len % 8); /* Now setup DIF SGE */ sgl->word2 = 0; bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF); sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = 0; protgrp_blks = protgroup_len / 8; protgrp_bytes = protgrp_blks * blksize; /* check if DIF SGE is crossing the 4K boundary; if so split */ if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) { protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff); protgroup_offset += protgroup_remainder; protgrp_blks = protgroup_remainder / 8; protgrp_bytes = protgrp_blks * blksize; } else { protgroup_offset = 0; curr_prot++; } num_sge++; /* setup SGE's for data blocks associated with DIF data */ pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ sgl++; j++; while (!pgdone) { /* Check to see if we ran out of space */ if ((num_sge >= phba->cfg_total_seg_cnt) && !phba->cfg_xpsgl) return num_sge + 1; if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9086 BLKGRD:%s Invalid data segment\n", __func__); return 0; } if (!((j + 1) % phba->border_sge_num)) { sgl->word2 = 0; /* set LSP type */ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); if (unlikely(!sgl_xtra)) { goto out; } else { sgl->addr_lo = cpu_to_le32( putPaddrLow(sgl_xtra->dma_phys_sgl)); sgl->addr_hi = cpu_to_le32( putPaddrHigh(sgl_xtra->dma_phys_sgl)); } sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32( phba->cfg_sg_dma_buf_size); sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; } else { dataphysaddr = sg_dma_address(sgde) + split_offset; remainder = sg_dma_len(sgde) - split_offset; if ((subtotal + remainder) <= protgrp_bytes) { /* we can use this whole buffer */ dma_len = remainder; split_offset = 0; if ((subtotal + remainder) == protgrp_bytes) pgdone = 1; } else { /* must split this buffer with next * prot grp */ dma_len = protgrp_bytes - subtotal; split_offset += dma_len; } subtotal += dma_len; sgl->word2 = 0; sgl->addr_lo = cpu_to_le32(putPaddrLow( dataphysaddr)); sgl->addr_hi = cpu_to_le32(putPaddrHigh( dataphysaddr)); bf_set(lpfc_sli4_sge_last, sgl, 0); bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); sgl->sge_len = cpu_to_le32(dma_len); dma_offset += dma_len; num_sge++; if (split_offset) { sgl++; j++; break; } /* Move to the next s/g segment if possible */ sgde = sg_next(sgde); sgl++; } j++; } if (protgroup_offset) { /* update the reference tag */ reftag += protgrp_blks; continue; } /* are we done ? */ if (curr_prot == protcnt) { /* mark the last SGL */ sgl--; bf_set(lpfc_sli4_sge_last, sgl, 1); alldone = 1; } else if (curr_prot < protcnt) { /* advance to next prot buffer */ sgpe = sg_next(sgpe); /* update the reference tag */ reftag += protgrp_blks; } else { /* if we're here, we have a bug */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9085 BLKGRD: bug in %s\n", __func__); } } while (!alldone); out: return num_sge; } /** * lpfc_prot_group_type - Get prtotection group type of SCSI command * @phba: The Hba for which this call is being executed. * @sc: pointer to scsi command we're working on * * Given a SCSI command that supports DIF, determine composition of protection * groups involved in setting up buffer lists * * Returns: Protection group type (with or without DIF) * **/ static int lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) { int ret = LPFC_PG_TYPE_INVALID; unsigned char op = scsi_get_prot_op(sc); switch (op) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: ret = LPFC_PG_TYPE_NO_DIF; break; case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: ret = LPFC_PG_TYPE_DIF_BUF; break; default: if (phba) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9021 Unsupported protection op:%d\n", op); break; } return ret; } /** * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be adjusted. * * Adjust the data length to account for how much data * is actually on the wire. * * returns the adjusted data length **/ static int lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct scsi_cmnd *sc = lpfc_cmd->pCmd; int fcpdl; fcpdl = scsi_bufflen(sc); /* Check if there is protection data on the wire */ if (sc->sc_data_direction == DMA_FROM_DEVICE) { /* Read check for protection data */ if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) return fcpdl; } else { /* Write check for protection data */ if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) return fcpdl; } /* * If we are in DIF Type 1 mode every data block has a 8 byte * DIF (trailer) attached to it. Must ajust FCP data length * to account for the protection data. */ fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8; return fcpdl; } /** * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be prep'ed. * * This is the protection/DIF aware version of * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the * two functions eventually, but for now, it's here. * RETURNS 0 - SUCCESS, * 1 - Failed DMA map, retry. * 2 - Invalid scsi cmd or prot-type. Do not rety. **/ static int lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; uint32_t num_bde = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; int fcpdl; int ret = 1; struct lpfc_vport *vport = phba->pport; /* * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd * fcp_rsp regions to the first data bde entry */ bpl += 2; if (scsi_sg_count(scsi_cmnd)) { /* * The driver stores the segment count returned from dma_map_sg * because this a count of dma-mappings used to map the use_sg * pages. They are not guaranteed to be the same for those * architectures that implement an IOMMU. */ datasegcnt = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), scsi_sg_count(scsi_cmnd), datadir); if (unlikely(!datasegcnt)) return 1; lpfc_cmd->seg_cnt = datasegcnt; /* First check if data segment count from SCSI Layer is good */ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); ret = 2; goto err; } prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: /* Here we need to add a PDE5 and PDE6 to the count */ if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) { ret = 2; goto err; } num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, datasegcnt); /* we should have 2 or more entries in buffer list */ if (num_bde < 2) { ret = 2; goto err; } break; case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared * for DMA */ protsegcnt = dma_map_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), scsi_prot_sg_count(scsi_cmnd), datadir); if (unlikely(!protsegcnt)) { scsi_dma_unmap(scsi_cmnd); return 1; } lpfc_cmd->prot_seg_cnt = protsegcnt; /* * There is a minimun of 4 BPLs used for every * protection data segment. */ if ((lpfc_cmd->prot_seg_cnt * 4) > (phba->cfg_total_seg_cnt - 2)) { ret = 2; goto err; } num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, datasegcnt, protsegcnt); /* we should have 3 or more entries in buffer list */ if ((num_bde < 3) || (num_bde > phba->cfg_total_seg_cnt)) { ret = 2; goto err; } break; case LPFC_PG_TYPE_INVALID: default: scsi_dma_unmap(scsi_cmnd); lpfc_cmd->seg_cnt = 0; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9022 Unexpected protection group %i\n", prot_group_type); return 2; } } /* * Finish initializing those IOCB fields that are dependent on the * scsi_cmnd request_buffer. Note that the bdeSize is explicitly * reinitialized since all iocb memory resources are used many times * for transmit, receive, and continuation bpl's. */ iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpLe = 1; fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* * Due to difference in data length between DIF/non-DIF paths, * we need to set word 4 of IOCB here */ iocb_cmd->un.fcpi.fcpi_parm = fcpdl; /* * For First burst, we may need to adjust the initial transfer * length for DIF */ if (iocb_cmd->un.fcpi.fcpi_XRdy && (fcpdl < vport->cfg_first_burst_size)) iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; return 0; err: if (lpfc_cmd->seg_cnt) scsi_dma_unmap(scsi_cmnd); if (lpfc_cmd->prot_seg_cnt) dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), scsi_prot_sg_count(scsi_cmnd), scsi_cmnd->sc_data_direction); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9023 Cannot setup S/G List for HBA" "IO segs %d/%d BPL %d SCSI %d: %d %d\n", lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, prot_group_type, num_bde); lpfc_cmd->seg_cnt = 0; lpfc_cmd->prot_seg_cnt = 0; return ret; } /* * This function calcuates the T10 DIF guard tag * on the specified data using a CRC algorithmn * using crc_t10dif. */ static uint16_t lpfc_bg_crc(uint8_t *data, int count) { uint16_t crc = 0; uint16_t x; crc = crc_t10dif(data, count); x = cpu_to_be16(crc); return x; } /* * This function calcuates the T10 DIF guard tag * on the specified data using a CSUM algorithmn * using ip_compute_csum. */ static uint16_t lpfc_bg_csum(uint8_t *data, int count) { uint16_t ret; ret = ip_compute_csum(data, count); return ret; } /* * This function examines the protection data to try to determine * what type of T10-DIF error occurred. */ static void lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct scatterlist *sgpe; /* s/g prot entry */ struct scatterlist *sgde; /* s/g data entry */ struct scsi_cmnd *cmd = lpfc_cmd->pCmd; struct scsi_dif_tuple *src = NULL; uint8_t *data_src = NULL; uint16_t guard_tag; uint16_t start_app_tag, app_tag; uint32_t start_ref_tag, ref_tag; int prot, protsegcnt; int err_type, len, data_len; int chk_ref, chk_app, chk_guard; uint16_t sum; unsigned blksize; err_type = BGS_GUARD_ERR_MASK; sum = 0; guard_tag = 0; /* First check to see if there is protection data to examine */ prot = scsi_get_prot_op(cmd); if ((prot == SCSI_PROT_READ_STRIP) || (prot == SCSI_PROT_WRITE_INSERT) || (prot == SCSI_PROT_NORMAL)) goto out; /* Currently the driver just supports ref_tag and guard_tag checking */ chk_ref = 1; chk_app = 0; chk_guard = 0; /* Setup a ptr to the protection data provided by the SCSI host */ sgpe = scsi_prot_sglist(cmd); protsegcnt = lpfc_cmd->prot_seg_cnt; if (sgpe && protsegcnt) { /* * We will only try to verify guard tag if the segment * data length is a multiple of the blksize. */ sgde = scsi_sglist(cmd); blksize = scsi_prot_interval(cmd); data_src = (uint8_t *)sg_virt(sgde); data_len = sgde->length; if ((data_len & (blksize - 1)) == 0) chk_guard = 1; src = (struct scsi_dif_tuple *)sg_virt(sgpe); start_ref_tag = scsi_prot_ref_tag(cmd); start_app_tag = src->app_tag; len = sgpe->length; while (src && protsegcnt) { while (len) { /* * First check to see if a protection data * check is valid */ if ((src->ref_tag == T10_PI_REF_ESCAPE) || (src->app_tag == T10_PI_APP_ESCAPE)) { start_ref_tag++; goto skipit; } /* First Guard Tag checking */ if (chk_guard) { guard_tag = src->guard_tag; if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM) sum = lpfc_bg_csum(data_src, blksize); else sum = lpfc_bg_crc(data_src, blksize); if ((guard_tag != sum)) { err_type = BGS_GUARD_ERR_MASK; goto out; } } /* Reference Tag checking */ ref_tag = be32_to_cpu(src->ref_tag); if (chk_ref && (ref_tag != start_ref_tag)) { err_type = BGS_REFTAG_ERR_MASK; goto out; } start_ref_tag++; /* App Tag checking */ app_tag = src->app_tag; if (chk_app && (app_tag != start_app_tag)) { err_type = BGS_APPTAG_ERR_MASK; goto out; } skipit: len -= sizeof(struct scsi_dif_tuple); if (len < 0) len = 0; src++; data_src += blksize; data_len -= blksize; /* * Are we at the end of the Data segment? * The data segment is only used for Guard * tag checking. */ if (chk_guard && (data_len == 0)) { chk_guard = 0; sgde = sg_next(sgde); if (!sgde) goto out; data_src = (uint8_t *)sg_virt(sgde); data_len = sgde->length; if ((data_len & (blksize - 1)) == 0) chk_guard = 1; } } /* Goto the next Protection data segment */ sgpe = sg_next(sgpe); if (sgpe) { src = (struct scsi_dif_tuple *)sg_virt(sgpe); len = sgpe->length; } else { src = NULL; } protsegcnt--; } } out: if (err_type == BGS_GUARD_ERR_MASK) { scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); set_host_byte(cmd, DID_ABORT); phba->bg_guard_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9069 BLKGRD: reftag %x grd_tag err %x != %x\n", scsi_prot_ref_tag(cmd), sum, guard_tag); } else if (err_type == BGS_REFTAG_ERR_MASK) { scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); set_host_byte(cmd, DID_ABORT); phba->bg_reftag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9066 BLKGRD: reftag %x ref_tag err %x != %x\n", scsi_prot_ref_tag(cmd), ref_tag, start_ref_tag); } else if (err_type == BGS_APPTAG_ERR_MASK) { scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); set_host_byte(cmd, DID_ABORT); phba->bg_apptag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9041 BLKGRD: reftag %x app_tag err %x != %x\n", scsi_prot_ref_tag(cmd), app_tag, start_app_tag); } } /* * This function checks for BlockGuard errors detected by * the HBA. In case of errors, the ASC/ASCQ fields in the * sense buffer will be set accordingly, paired with * ILLEGAL_REQUEST to signal to the kernel that the HBA * detected corruption. * * Returns: * 0 - No error found * 1 - BlockGuard error found * -1 - Internal error (bad profile, ...etc) */ static int lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *pIocbOut) { struct scsi_cmnd *cmd = lpfc_cmd->pCmd; struct sli3_bg_fields *bgf; int ret = 0; struct lpfc_wcqe_complete *wcqe; u32 status; u32 bghm = 0; u32 bgstat = 0; u64 failing_sector = 0; if (phba->sli_rev == LPFC_SLI_REV4) { wcqe = &pIocbOut->wcqe_cmpl; status = bf_get(lpfc_wcqe_c_status, wcqe); if (status == CQE_STATUS_DI_ERROR) { /* Guard Check failed */ if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) bgstat |= BGS_GUARD_ERR_MASK; /* AppTag Check failed */ if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) bgstat |= BGS_APPTAG_ERR_MASK; /* RefTag Check failed */ if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) bgstat |= BGS_REFTAG_ERR_MASK; /* Check to see if there was any good data before the * error */ if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK; bghm = wcqe->total_data_placed; } /* * Set ALL the error bits to indicate we don't know what * type of error it is. */ if (!bgstat) bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | BGS_GUARD_ERR_MASK); } } else { bgf = &pIocbOut->iocb.unsli3.sli3_bg; bghm = bgf->bghm; bgstat = bgf->bgstat; } if (lpfc_bgs_get_invalid_prof(bgstat)) { cmd->result = DID_ERROR << 16; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9072 BLKGRD: Invalid BG Profile in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], scsi_prot_ref_tag(cmd), scsi_logical_block_count(cmd), bgstat, bghm); ret = (-1); goto out; } if (lpfc_bgs_get_uninit_dif_block(bgstat)) { cmd->result = DID_ERROR << 16; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9073 BLKGRD: Invalid BG PDIF Block in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], scsi_prot_ref_tag(cmd), scsi_logical_block_count(cmd), bgstat, bghm); ret = (-1); goto out; } if (lpfc_bgs_get_guard_err(bgstat)) { ret = 1; scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); set_host_byte(cmd, DID_ABORT); phba->bg_guard_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9055 BLKGRD: Guard Tag error in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], scsi_prot_ref_tag(cmd), scsi_logical_block_count(cmd), bgstat, bghm); } if (lpfc_bgs_get_reftag_err(bgstat)) { ret = 1; scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); set_host_byte(cmd, DID_ABORT); phba->bg_reftag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9056 BLKGRD: Ref Tag error in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], scsi_prot_ref_tag(cmd), scsi_logical_block_count(cmd), bgstat, bghm); } if (lpfc_bgs_get_apptag_err(bgstat)) { ret = 1; scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); set_host_byte(cmd, DID_ABORT); phba->bg_apptag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9061 BLKGRD: App Tag error in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], scsi_prot_ref_tag(cmd), scsi_logical_block_count(cmd), bgstat, bghm); } if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { /* * setup sense data descriptor 0 per SPC-4 as an information * field, and put the failing LBA in it. * This code assumes there was also a guard/app/ref tag error * indication. */ cmd->sense_buffer[7] = 0xc; /* Additional sense length */ cmd->sense_buffer[8] = 0; /* Information descriptor type */ cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ cmd->sense_buffer[10] = 0x80; /* Validity bit */ /* bghm is a "on the wire" FC frame based count */ switch (scsi_get_prot_op(cmd)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: bghm /= cmd->device->sector_size; break; case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: bghm /= (cmd->device->sector_size + sizeof(struct scsi_dif_tuple)); break; } failing_sector = scsi_get_lba(cmd); failing_sector += bghm; /* Descriptor Information */ put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); } if (!ret) { /* No error was reported - problem in FW? */ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9057 BLKGRD: Unknown error in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], scsi_prot_ref_tag(cmd), scsi_logical_block_count(cmd), bgstat, bghm); /* Calculate what type of error it was */ lpfc_calc_bg_err(phba, lpfc_cmd); } out: return ret; } /** * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. * * This routine does the pci dma mapping for scatter-gather list of scsi cmnd * field of @lpfc_cmd for device with SLI-4 interface spec. * * Return codes: * 2 - Error - Do not retry * 1 - Error - Retry * 0 - Success **/ static int lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; struct sli4_sge *first_data_sgl; struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; struct lpfc_vport *vport = phba->pport; union lpfc_wqe128 *wqe = &pwqeq->wqe; dma_addr_t physaddr; uint32_t dma_len; uint32_t dma_offset = 0; int nseg, i, j; struct ulp_bde64 *bde; bool lsp_just_set = false; struct sli4_hybrid_sgl *sgl_xtra = NULL; /* * There are three possibilities here - use scatter-gather segment, use * the single mapping, or neither. Start the lpfc command prep by * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first * data bde entry. */ if (scsi_sg_count(scsi_cmnd)) { /* * The driver stores the segment count returned from dma_map_sg * because this a count of dma-mappings used to map the use_sg * pages. They are not guaranteed to be the same for those * architectures that implement an IOMMU. */ nseg = scsi_dma_map(scsi_cmnd); if (unlikely(nseg <= 0)) return 1; sgl += 1; /* clear the last flag in the fcp_rsp map entry */ sgl->word2 = le32_to_cpu(sgl->word2); bf_set(lpfc_sli4_sge_last, sgl, 0); sgl->word2 = cpu_to_le32(sgl->word2); sgl += 1; first_data_sgl = sgl; lpfc_cmd->seg_cnt = nseg; if (!phba->cfg_xpsgl && lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9074 BLKGRD:" " %s: Too many sg segments from " "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 2; } /* * The driver established a maximum scatter-gather segment count * during probe that limits the number of sg elements in any * single scsi command. Just run through the seg_cnt and format * the sge's. * When using SLI-3 the driver will try to fit all the BDEs into * the IOCB. If it can't then the BDEs get added to a BPL as it * does for SLI-2 mode. */ /* for tracking segment boundaries */ sgel = scsi_sglist(scsi_cmnd); j = 2; for (i = 0; i < nseg; i++) { sgl->word2 = 0; if (nseg == 1) { bf_set(lpfc_sli4_sge_last, sgl, 1); bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); } else { bf_set(lpfc_sli4_sge_last, sgl, 0); /* do we need to expand the segment */ if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && ((nseg - 1) != i)) { /* set LSP type */ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); sgl_xtra = lpfc_get_sgl_per_hdwq( phba, lpfc_cmd); if (unlikely(!sgl_xtra)) { lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } sgl->addr_lo = cpu_to_le32(putPaddrLow( sgl_xtra->dma_phys_sgl)); sgl->addr_hi = cpu_to_le32(putPaddrHigh( sgl_xtra->dma_phys_sgl)); } else { bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); } } if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { if ((nseg - 1) == i) bf_set(lpfc_sli4_sge_last, sgl, 1); physaddr = sg_dma_address(sgel); dma_len = sg_dma_len(sgel); sgl->addr_lo = cpu_to_le32(putPaddrLow( physaddr)); sgl->addr_hi = cpu_to_le32(putPaddrHigh( physaddr)); bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(dma_len); dma_offset += dma_len; sgel = sg_next(sgel); sgl++; lsp_just_set = false; } else { sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32( phba->cfg_sg_dma_buf_size); sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; i = i - 1; lsp_just_set = true; } j++; } /* PBDE support for first data SGE only. * For FCoE, we key off Performance Hints. * For FC, we key off lpfc_enable_pbde. */ if (nseg == 1 && ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || phba->cfg_enable_pbde)) { /* Words 13-15 */ bde = (struct ulp_bde64 *) &wqe->words[13]; bde->addrLow = first_data_sgl->addr_lo; bde->addrHigh = first_data_sgl->addr_hi; bde->tus.f.bdeSize = le32_to_cpu(first_data_sgl->sge_len); bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; bde->tus.w = cpu_to_le32(bde->tus.w); /* Word 11 - set PBDE bit */ bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); } else { memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); /* Word 11 - PBDE bit disabled by default template */ } } else { sgl += 1; /* set the last flag in the fcp_rsp map entry */ sgl->word2 = le32_to_cpu(sgl->word2); bf_set(lpfc_sli4_sge_last, sgl, 1); sgl->word2 = cpu_to_le32(sgl->word2); if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || phba->cfg_enable_pbde) { bde = (struct ulp_bde64 *) &wqe->words[13]; memset(bde, 0, (sizeof(uint32_t) * 3)); } } /* * Finish initializing those IOCB fields that are dependent on the * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is * explicitly reinitialized. * all iocb memory resources are reused. */ fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); /* Set first-burst provided it was successfully negotiated */ if (!(phba->hba_flag & HBA_FCOE_MODE) && vport->cfg_first_burst_size && scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { u32 init_len, total_len; total_len = be32_to_cpu(fcp_cmnd->fcpDl); init_len = min(total_len, vport->cfg_first_burst_size); /* Word 4 & 5 */ wqe->fcp_iwrite.initial_xfer_len = init_len; wqe->fcp_iwrite.total_xfer_len = total_len; } else { /* Word 4 */ wqe->fcp_iwrite.total_xfer_len = be32_to_cpu(fcp_cmnd->fcpDl); } /* * If the OAS driver feature is enabled and the lun is enabled for * OAS, set the oas iocb related flags. */ if ((phba->cfg_fof) && ((struct lpfc_device_data *) scsi_cmnd->device->hostdata)->oas_enabled) { lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) scsi_cmnd->device->hostdata)->priority; /* Word 10 */ bf_set(wqe_oas, &wqe->generic.wqe_com, 1); bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); if (lpfc_cmd->cur_iocbq.priority) bf_set(wqe_ccp, &wqe->generic.wqe_com, (lpfc_cmd->cur_iocbq.priority << 1)); else bf_set(wqe_ccp, &wqe->generic.wqe_com, (phba->cfg_XLanePriority << 1)); } return 0; } /** * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. * * This is the protection/DIF aware version of * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the * two functions eventually, but for now, it's here * Return codes: * 2 - Error - Do not retry * 1 - Error - Retry * 0 - Success **/ static int lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl); struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; union lpfc_wqe128 *wqe = &pwqeq->wqe; uint32_t num_sge = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; int fcpdl; int ret = 1; struct lpfc_vport *vport = phba->pport; /* * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd * fcp_rsp regions to the first data sge entry */ if (scsi_sg_count(scsi_cmnd)) { /* * The driver stores the segment count returned from dma_map_sg * because this a count of dma-mappings used to map the use_sg * pages. They are not guaranteed to be the same for those * architectures that implement an IOMMU. */ datasegcnt = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), scsi_sg_count(scsi_cmnd), datadir); if (unlikely(!datasegcnt)) return 1; sgl += 1; /* clear the last flag in the fcp_rsp map entry */ sgl->word2 = le32_to_cpu(sgl->word2); bf_set(lpfc_sli4_sge_last, sgl, 0); sgl->word2 = cpu_to_le32(sgl->word2); sgl += 1; lpfc_cmd->seg_cnt = datasegcnt; /* First check if data segment count from SCSI Layer is good */ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt && !phba->cfg_xpsgl) { WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); ret = 2; goto err; } prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: /* Here we need to add a DISEED to the count */ if (((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt) && !phba->cfg_xpsgl) { ret = 2; goto err; } num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, datasegcnt, lpfc_cmd); /* we should have 2 or more entries in buffer list */ if (num_sge < 2) { ret = 2; goto err; } break; case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared * for DMA */ protsegcnt = dma_map_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), scsi_prot_sg_count(scsi_cmnd), datadir); if (unlikely(!protsegcnt)) { scsi_dma_unmap(scsi_cmnd); return 1; } lpfc_cmd->prot_seg_cnt = protsegcnt; /* * There is a minimun of 3 SGEs used for every * protection data segment. */ if (((lpfc_cmd->prot_seg_cnt * 3) > (phba->cfg_total_seg_cnt - 2)) && !phba->cfg_xpsgl) { ret = 2; goto err; } num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, datasegcnt, protsegcnt, lpfc_cmd); /* we should have 3 or more entries in buffer list */ if (num_sge < 3 || (num_sge > phba->cfg_total_seg_cnt && !phba->cfg_xpsgl)) { ret = 2; goto err; } break; case LPFC_PG_TYPE_INVALID: default: scsi_dma_unmap(scsi_cmnd); lpfc_cmd->seg_cnt = 0; lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9083 Unexpected protection group %i\n", prot_group_type); return 2; } } switch (scsi_get_prot_op(scsi_cmnd)) { case SCSI_PROT_WRITE_STRIP: case SCSI_PROT_READ_STRIP: lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP; break; case SCSI_PROT_WRITE_INSERT: case SCSI_PROT_READ_INSERT: lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT; break; case SCSI_PROT_WRITE_PASS: case SCSI_PROT_READ_PASS: lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS; break; } fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* Set first-burst provided it was successfully negotiated */ if (!(phba->hba_flag & HBA_FCOE_MODE) && vport->cfg_first_burst_size && scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { u32 init_len, total_len; total_len = be32_to_cpu(fcp_cmnd->fcpDl); init_len = min(total_len, vport->cfg_first_burst_size); /* Word 4 & 5 */ wqe->fcp_iwrite.initial_xfer_len = init_len; wqe->fcp_iwrite.total_xfer_len = total_len; } else { /* Word 4 */ wqe->fcp_iwrite.total_xfer_len = be32_to_cpu(fcp_cmnd->fcpDl); } /* * If the OAS driver feature is enabled and the lun is enabled for * OAS, set the oas iocb related flags. */ if ((phba->cfg_fof) && ((struct lpfc_device_data *) scsi_cmnd->device->hostdata)->oas_enabled) { lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); /* Word 10 */ bf_set(wqe_oas, &wqe->generic.wqe_com, 1); bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); bf_set(wqe_ccp, &wqe->generic.wqe_com, (phba->cfg_XLanePriority << 1)); } /* Word 7. DIF Flags */ if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS) bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP) bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT) bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT); return 0; err: if (lpfc_cmd->seg_cnt) scsi_dma_unmap(scsi_cmnd); if (lpfc_cmd->prot_seg_cnt) dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), scsi_prot_sg_count(scsi_cmnd), scsi_cmnd->sc_data_direction); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9084 Cannot setup S/G List for HBA " "IO segs %d/%d SGL %d SCSI %d: %d %d %d\n", lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, prot_group_type, num_sge, ret); lpfc_cmd->seg_cnt = 0; lpfc_cmd->prot_seg_cnt = 0; return ret; } /** * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. * * This routine wraps the actual DMA mapping function pointer from the * lpfc_hba struct. * * Return codes: * 1 - Error * 0 - Success **/ static inline int lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); } /** * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer * using BlockGuard. * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. * * This routine wraps the actual DMA mapping function pointer from the * lpfc_hba struct. * * Return codes: * 1 - Error * 0 - Success **/ static inline int lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } /** * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi * buffer * @vport: Pointer to vport object. * @lpfc_cmd: The scsi buffer which is going to be mapped. * @tmo: Timeout value for IO * * This routine initializes IOCB/WQE data structure from scsi command * * Return codes: * 1 - Error * 0 - Success **/ static inline int lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, uint8_t tmo) { return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo); } /** * lpfc_send_scsi_error_event - Posts an event when there is SCSI error * @phba: Pointer to hba context object. * @vport: Pointer to vport object. * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. * @fcpi_parm: FCP Initiator parameter. * * This function posts an event when there is a SCSI command reporting * error from the scsi device. **/ static void lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) { struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; uint32_t resp_info = fcprsp->rspStatus2; uint32_t scsi_status = fcprsp->rspStatus3; struct lpfc_fast_path_event *fast_path_evt = NULL; struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; unsigned long flags; if (!pnode) return; /* If there is queuefull or busy condition send a scsi event */ if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || (cmnd->result == SAM_STAT_BUSY)) { fast_path_evt = lpfc_alloc_fast_evt(phba); if (!fast_path_evt) return; fast_path_evt->un.scsi_evt.event_type = FC_REG_SCSI_EVENT; fast_path_evt->un.scsi_evt.subcategory = (cmnd->result == SAM_STAT_TASK_SET_FULL) ? LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; memcpy(&fast_path_evt->un.scsi_evt.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); memcpy(&fast_path_evt->un.scsi_evt.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { fast_path_evt = lpfc_alloc_fast_evt(phba); if (!fast_path_evt) return; fast_path_evt->un.check_cond_evt.scsi_event.event_type = FC_REG_SCSI_EVENT; fast_path_evt->un.check_cond_evt.scsi_event.subcategory = LPFC_EVENT_CHECK_COND; fast_path_evt->un.check_cond_evt.scsi_event.lun = cmnd->device->lun; memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); fast_path_evt->un.check_cond_evt.sense_key = cmnd->sense_buffer[2] & 0xf; fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && fcpi_parm && ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || ((scsi_status == SAM_STAT_GOOD) && !(resp_info & (RESID_UNDER | RESID_OVER))))) { /* * If status is good or resid does not match with fcp_param and * there is valid fcpi_parm, then there is a read_check error */ fast_path_evt = lpfc_alloc_fast_evt(phba); if (!fast_path_evt) return; fast_path_evt->un.read_check_error.header.event_type = FC_REG_FABRIC_EVENT; fast_path_evt->un.read_check_error.header.subcategory = LPFC_EVENT_FCPRDCHKERR; memcpy(&fast_path_evt->un.read_check_error.header.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); memcpy(&fast_path_evt->un.read_check_error.header.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); fast_path_evt->un.read_check_error.lun = cmnd->device->lun; fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; fast_path_evt->un.read_check_error.fcpiparam = fcpi_parm; } else return; fast_path_evt->vport = vport; spin_lock_irqsave(&phba->hbalock, flags); list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_worker_wake_up(phba); return; } /** * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev * @phba: The HBA for which this call is being executed. * @psb: The scsi buffer which is going to be un-mapped. * * This routine does DMA un-mapping of scatter gather list of scsi command * field of @lpfc_cmd for device with SLI-3 interface spec. **/ static void lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) { /* * There are only two special cases to consider. (1) the scsi command * requested scatter-gather usage or (2) the scsi command allocated * a request buffer, but did not request use_sg. There is a third * case, but it does not require resource deallocation. */ if (psb->seg_cnt > 0) scsi_dma_unmap(psb->pCmd); if (psb->prot_seg_cnt > 0) dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), scsi_prot_sg_count(psb->pCmd), psb->pCmd->sc_data_direction); } /** * lpfc_unblock_requests - allow further commands to be queued. * @phba: pointer to phba object * * For single vport, just call scsi_unblock_requests on physical port. * For multiple vports, send scsi_unblock_requests for all the vports. */ void lpfc_unblock_requests(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct Scsi_Host *shost; int i; if (phba->sli_rev == LPFC_SLI_REV4 && !phba->sli4_hba.max_cfg_param.vpi_used) { shost = lpfc_shost_from_vport(phba->pport); scsi_unblock_requests(shost); return; } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); scsi_unblock_requests(shost); } lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_block_requests - prevent further commands from being queued. * @phba: pointer to phba object * * For single vport, just call scsi_block_requests on physical port. * For multiple vports, send scsi_block_requests for all the vports. */ void lpfc_block_requests(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct Scsi_Host *shost; int i; if (atomic_read(&phba->cmf_stop_io)) return; if (phba->sli_rev == LPFC_SLI_REV4 && !phba->sli4_hba.max_cfg_param.vpi_used) { shost = lpfc_shost_from_vport(phba->pport); scsi_block_requests(shost); return; } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); scsi_block_requests(shost); } lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion * @phba: The HBA for which this call is being executed. * @time: The latency of the IO that completed (in ns) * @size: The size of the IO that completed * @shost: SCSI host the IO completed on (NULL for a NVME IO) * * The routine adjusts the various Burst and Bandwidth counters used in * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT, * that means the IO was never issued to the HBA, so this routine is * just being called to cleanup the counter from a previous * lpfc_update_cmf_cmd call. */ int lpfc_update_cmf_cmpl(struct lpfc_hba *phba, uint64_t time, uint32_t size, struct Scsi_Host *shost) { struct lpfc_cgn_stat *cgs; if (time != LPFC_CGN_NOT_SENT) { /* lat is ns coming in, save latency in us */ if (time < 1000) time = 1; else time = div_u64(time + 500, 1000); /* round it */ cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id()); atomic64_add(size, &cgs->rcv_bytes); atomic64_add(time, &cgs->rx_latency); atomic_inc(&cgs->rx_io_cnt); } return 0; } /** * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission * @phba: The HBA for which this call is being executed. * @size: The size of the IO that will be issued * * The routine adjusts the various Burst and Bandwidth counters used in * Congestion management and E2E. */ int lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size) { uint64_t total; struct lpfc_cgn_stat *cgs; int cpu; /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */ if (phba->cmf_active_mode == LPFC_CFG_MANAGED && phba->cmf_max_bytes_per_interval) { total = 0; for_each_present_cpu(cpu) { cgs = per_cpu_ptr(phba->cmf_stat, cpu); total += atomic64_read(&cgs->total_bytes); } if (total >= phba->cmf_max_bytes_per_interval) { if (!atomic_xchg(&phba->cmf_bw_wait, 1)) { lpfc_block_requests(phba); phba->cmf_last_ts = lpfc_calc_cmf_latency(phba); } atomic_inc(&phba->cmf_busy); return -EBUSY; } if (size > atomic_read(&phba->rx_max_read_cnt)) atomic_set(&phba->rx_max_read_cnt, size); } cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id()); atomic64_add(size, &cgs->total_bytes); return 0; } /** * lpfc_handle_fcp_err - FCP response handler * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: Pointer to lpfc_io_buf data structure. * @fcpi_parm: FCP Initiator parameter. * * This routine is called to process response IOCB with status field * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command * based upon SCSI and FCP error. **/ static void lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) { struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; uint32_t resp_info = fcprsp->rspStatus2; uint32_t scsi_status = fcprsp->rspStatus3; uint32_t *lp; uint32_t host_status = DID_OK; uint32_t rsplen = 0; uint32_t fcpDl; uint32_t logit = LOG_FCP | LOG_FCP_ERROR; /* * If this is a task management command, there is no * scsi packet associated with this lpfc_cmd. The driver * consumes it. */ if (fcpcmd->fcpCntl2) { scsi_status = 0; goto out; } if (resp_info & RSP_LEN_VALID) { rsplen = be32_to_cpu(fcprsp->rspRspLen); if (rsplen != 0 && rsplen != 4 && rsplen != 8) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2719 Invalid response length: " "tgt x%x lun x%llx cmnd x%x rsplen " "x%x\n", cmnd->device->id, cmnd->device->lun, cmnd->cmnd[0], rsplen); host_status = DID_ERROR; goto out; } if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2757 Protocol failure detected during " "processing of FCP I/O op: " "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n", cmnd->device->id, cmnd->device->lun, cmnd->cmnd[0], fcprsp->rspInfo3); host_status = DID_ERROR; goto out; } } if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); if (snslen > SCSI_SENSE_BUFFERSIZE) snslen = SCSI_SENSE_BUFFERSIZE; if (resp_info & RSP_LEN_VALID) rsplen = be32_to_cpu(fcprsp->rspRspLen); memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); } lp = (uint32_t *)cmnd->sense_buffer; /* special handling for under run conditions */ if (!scsi_status && (resp_info & RESID_UNDER)) { /* don't log under runs if fcp set... */ if (vport->cfg_log_verbose & LOG_FCP) logit = LOG_FCP_ERROR; /* unless operator says so */ if (vport->cfg_log_verbose & LOG_FCP_UNDER) logit = LOG_FCP_UNDER; } lpfc_printf_vlog(vport, KERN_WARNING, logit, "9024 FCP command x%x failed: x%x SNS x%x x%x " "Data: x%x x%x x%x x%x x%x\n", cmnd->cmnd[0], scsi_status, be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, be32_to_cpu(fcprsp->rspResId), be32_to_cpu(fcprsp->rspSnsLen), be32_to_cpu(fcprsp->rspRspLen), fcprsp->rspInfo3); scsi_set_resid(cmnd, 0); fcpDl = be32_to_cpu(fcpcmd->fcpDl); if (resp_info & RESID_UNDER) { scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, "9025 FCP Underrun, expected %d, " "residual %d Data: x%x x%x x%x\n", fcpDl, scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], cmnd->underflow); /* * If there is an under run, check if under run reported by * storage array is same as the under run reported by HBA. * If this is not same, there is a dropped frame. */ if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, "9026 FCP Read Check Error " "and Underrun Data: x%x x%x x%x x%x\n", fcpDl, scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0]); scsi_set_resid(cmnd, scsi_bufflen(cmnd)); host_status = DID_ERROR; } /* * The cmnd->underflow is the minimum number of bytes that must * be transferred for this command. Provided a sense condition * is not present, make sure the actual amount transferred is at * least the underflow value or fail. */ if (!(resp_info & SNS_LEN_VALID) && (scsi_status == SAM_STAT_GOOD) && (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "9027 FCP command x%x residual " "underrun converted to error " "Data: x%x x%x x%x\n", cmnd->cmnd[0], scsi_bufflen(cmnd), scsi_get_resid(cmnd), cmnd->underflow); host_status = DID_ERROR; } } else if (resp_info & RESID_OVER) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "9028 FCP command x%x residual overrun error. " "Data: x%x x%x\n", cmnd->cmnd[0], scsi_bufflen(cmnd), scsi_get_resid(cmnd)); host_status = DID_ERROR; /* * Check SLI validation that all the transfer was actually done * (fcpi_parm should be zero). Apply check only to reads. */ } else if (fcpi_parm) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, "9029 FCP %s Check Error Data: " "x%x x%x x%x x%x x%x\n", ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ? "Read" : "Write"), fcpDl, be32_to_cpu(fcprsp->rspResId), fcpi_parm, cmnd->cmnd[0], scsi_status); /* There is some issue with the LPe12000 that causes it * to miscalculate the fcpi_parm and falsely trip this * recovery logic. Detect this case and don't error when true. */ if (fcpi_parm > fcpDl) goto out; switch (scsi_status) { case SAM_STAT_GOOD: case SAM_STAT_CHECK_CONDITION: /* Fabric dropped a data frame. Fail any successful * command in which we detected dropped frames. * A status of good or some check conditions could * be considered a successful command. */ host_status = DID_ERROR; break; } scsi_set_resid(cmnd, scsi_bufflen(cmnd)); } out: cmnd->result = host_status << 16 | scsi_status; lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm); } /** * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO * @phba: The hba for which this call is being executed. * @pwqeIn: The command WQE for the scsi cmnd. * @pwqeOut: Pointer to driver response WQE object. * * This routine assigns scsi command result by looking into response WQE * status field appropriately. This routine handles QUEUE FULL condition as * well by ramping down device queue depth. **/ static void lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, struct lpfc_iocbq *pwqeOut) { struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf; struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl; struct lpfc_vport *vport = pwqeIn->vport; struct lpfc_rport_data *rdata; struct lpfc_nodelist *ndlp; struct scsi_cmnd *cmd; unsigned long flags; struct lpfc_fast_path_event *fast_path_evt; struct Scsi_Host *shost; u32 logit = LOG_FCP; u32 idx; u32 lat; u8 wait_xb_clr = 0; /* Sanity check on return of outstanding command */ if (!lpfc_cmd) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "9032 Null lpfc_cmd pointer. No " "release, skip completion\n"); return; } rdata = lpfc_cmd->rdata; ndlp = rdata->pnode; /* Sanity check on return of outstanding command */ cmd = lpfc_cmd->pCmd; if (!cmd) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "9042 I/O completion: Not an active IO\n"); lpfc_release_scsi_buf(phba, lpfc_cmd); return; } /* Guard against abort handler being called at same time */ spin_lock(&lpfc_cmd->buf_lock); idx = lpfc_cmd->cur_iocbq.hba_wqidx; if (phba->sli4_hba.hdwq) phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); #endif shost = cmd->device->host; lpfc_cmd->status = bf_get(lpfc_wcqe_c_status, wcqe); lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; if (bf_get(lpfc_wcqe_c_xb, wcqe)) { lpfc_cmd->flags |= LPFC_SBUF_XBUSY; if (phba->cfg_fcp_wait_abts_rsp) wait_xb_clr = 1; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_cmd->prot_data_type) { struct scsi_dif_tuple *src = NULL; src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; /* * Used to restore any changes to protection * data for error injection. */ switch (lpfc_cmd->prot_data_type) { case LPFC_INJERR_REFTAG: src->ref_tag = lpfc_cmd->prot_data; break; case LPFC_INJERR_APPTAG: src->app_tag = (uint16_t)lpfc_cmd->prot_data; break; case LPFC_INJERR_GUARD: src->guard_tag = (uint16_t)lpfc_cmd->prot_data; break; default: break; } lpfc_cmd->prot_data = 0; lpfc_cmd->prot_data_type = 0; lpfc_cmd->prot_data_segment = NULL; } #endif if (unlikely(lpfc_cmd->status)) { if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && !lpfc_cmd->fcp_rsp->rspStatus3 && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && !(vport->cfg_log_verbose & LOG_FCP_UNDER)) logit = 0; else logit = LOG_FCP | LOG_FCP_UNDER; lpfc_printf_vlog(vport, KERN_WARNING, logit, "9034 FCP cmd x%x failed <%d/%lld> " "status: x%x result: x%x " "sid: x%x did: x%x oxid: x%x " "Data: x%x x%x x%x\n", cmd->cmnd[0], cmd->device ? cmd->device->id : 0xffff, cmd->device ? cmd->device->lun : 0xffff, lpfc_cmd->status, lpfc_cmd->result, vport->fc_myDID, (ndlp) ? ndlp->nlp_DID : 0, lpfc_cmd->cur_iocbq.sli4_xritag, wcqe->parameter, wcqe->total_data_placed, lpfc_cmd->cur_iocbq.iotag); } switch (lpfc_cmd->status) { case CQE_STATUS_SUCCESS: cmd->result = DID_OK << 16; break; case CQE_STATUS_FCP_RSP_FAILURE: lpfc_handle_fcp_err(vport, lpfc_cmd, pwqeIn->wqe.fcp_iread.total_xfer_len - wcqe->total_data_placed); break; case CQE_STATUS_NPORT_BSY: case CQE_STATUS_FABRIC_BSY: cmd->result = DID_TRANSPORT_DISRUPTED << 16; fast_path_evt = lpfc_alloc_fast_evt(phba); if (!fast_path_evt) break; fast_path_evt->un.fabric_evt.event_type = FC_REG_FABRIC_EVENT; fast_path_evt->un.fabric_evt.subcategory = (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; if (ndlp) { memcpy(&fast_path_evt->un.fabric_evt.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); memcpy(&fast_path_evt->un.fabric_evt.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); } fast_path_evt->vport = vport; fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; spin_lock_irqsave(&phba->hbalock, flags); list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_worker_wake_up(phba); lpfc_printf_vlog(vport, KERN_WARNING, logit, "9035 Fabric/Node busy FCP cmd x%x failed" " <%d/%lld> " "status: x%x result: x%x " "sid: x%x did: x%x oxid: x%x " "Data: x%x x%x x%x\n", cmd->cmnd[0], cmd->device ? cmd->device->id : 0xffff, cmd->device ? cmd->device->lun : 0xffff, lpfc_cmd->status, lpfc_cmd->result, vport->fc_myDID, (ndlp) ? ndlp->nlp_DID : 0, lpfc_cmd->cur_iocbq.sli4_xritag, wcqe->parameter, wcqe->total_data_placed, lpfc_cmd->cur_iocbq.iocb.ulpIoTag); break; case CQE_STATUS_DI_ERROR: if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) lpfc_cmd->result = IOERR_RX_DMA_FAILED; else lpfc_cmd->result = IOERR_TX_DMA_FAILED; lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_BG, "9048 DI Error xri x%x status x%x DI ext " "status x%x data placed x%x\n", lpfc_cmd->cur_iocbq.sli4_xritag, lpfc_cmd->status, wcqe->parameter, wcqe->total_data_placed); if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { /* BG enabled cmd. Parse BG error */ lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut); break; } cmd->result = DID_ERROR << 16; lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9040 DI Error on unprotected cmd\n"); break; case CQE_STATUS_REMOTE_STOP: if (ndlp) { /* This I/O was aborted by the target, we don't * know the rxid and because we did not send the * ABTS we cannot generate and RRQ. */ lpfc_set_rrq_active(phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag, 0, 0); } fallthrough; case CQE_STATUS_LOCAL_REJECT: if (lpfc_cmd->result & IOERR_DRVR_MASK) lpfc_cmd->status = IOSTAT_DRIVER_REJECT; if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { cmd->result = DID_NO_CONNECT << 16; break; } if (lpfc_cmd->result == IOERR_INVALID_RPI || lpfc_cmd->result == IOERR_LINK_DOWN || lpfc_cmd->result == IOERR_NO_RESOURCES || lpfc_cmd->result == IOERR_ABORT_REQUESTED || lpfc_cmd->result == IOERR_RPI_SUSPENDED || lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { cmd->result = DID_TRANSPORT_DISRUPTED << 16; break; } lpfc_printf_vlog(vport, KERN_WARNING, logit, "9036 Local Reject FCP cmd x%x failed" " <%d/%lld> " "status: x%x result: x%x " "sid: x%x did: x%x oxid: x%x " "Data: x%x x%x x%x\n", cmd->cmnd[0], cmd->device ? cmd->device->id : 0xffff, cmd->device ? cmd->device->lun : 0xffff, lpfc_cmd->status, lpfc_cmd->result, vport->fc_myDID, (ndlp) ? ndlp->nlp_DID : 0, lpfc_cmd->cur_iocbq.sli4_xritag, wcqe->parameter, wcqe->total_data_placed, lpfc_cmd->cur_iocbq.iocb.ulpIoTag); fallthrough; default: cmd->result = DID_ERROR << 16; lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "9037 FCP Completion Error: xri %x " "status x%x result x%x [x%x] " "placed x%x\n", lpfc_cmd->cur_iocbq.sli4_xritag, lpfc_cmd->status, lpfc_cmd->result, wcqe->parameter, wcqe->total_data_placed); } if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { u32 *lp = (u32 *)cmd->sense_buffer; lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "9039 Iodone <%d/%llu> cmd x%px, error " "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n", cmd->device->id, cmd->device->lun, cmd, cmd->result, *lp, *(lp + 3), (cmd->device->sector_size) ? (u64)scsi_get_lba(cmd) : 0, cmd->retries, scsi_get_resid(cmd)); } if (vport->cfg_max_scsicmpl_time && time_after(jiffies, lpfc_cmd->start_time + msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { spin_lock_irqsave(shost->host_lock, flags); if (ndlp) { if (ndlp->cmd_qdepth > atomic_read(&ndlp->cmd_pending) && (atomic_read(&ndlp->cmd_pending) > LPFC_MIN_TGT_QDEPTH) && (cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == WRITE_10)) ndlp->cmd_qdepth = atomic_read(&ndlp->cmd_pending); ndlp->last_change_time = jiffies; } spin_unlock_irqrestore(shost->host_lock, flags); } lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_cmd->ts_cmd_start) { lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp; lpfc_cmd->ts_data_io = ktime_get_ns(); phba->ktime_last_cmd = lpfc_cmd->ts_data_io; lpfc_io_ktime(phba, lpfc_cmd); } #endif if (likely(!wait_xb_clr)) lpfc_cmd->pCmd = NULL; spin_unlock(&lpfc_cmd->buf_lock); /* Check if IO qualified for CMF */ if (phba->cmf_active_mode != LPFC_CFG_OFF && cmd->sc_data_direction == DMA_FROM_DEVICE && (scsi_sg_count(cmd))) { /* Used when calculating average latency */ lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start; lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost); } if (wait_xb_clr) goto out; /* The sdev is not guaranteed to be valid post scsi_done upcall. */ scsi_done(cmd); /* * If there is an abort thread waiting for command completion * wake up the thread. */ spin_lock(&lpfc_cmd->buf_lock); lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; if (lpfc_cmd->waitq) wake_up(lpfc_cmd->waitq); spin_unlock(&lpfc_cmd->buf_lock); out: lpfc_release_scsi_buf(phba, lpfc_cmd); } /** * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine * @phba: The Hba for which this call is being executed. * @pIocbIn: The command IOCBQ for the scsi cmnd. * @pIocbOut: The response IOCBQ for the scsi cmnd. * * This routine assigns scsi command result by looking into response IOCB * status field appropriately. This routine handles QUEUE FULL condition as * well by ramping down device queue depth. **/ static void lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct lpfc_iocbq *pIocbOut) { struct lpfc_io_buf *lpfc_cmd = (struct lpfc_io_buf *) pIocbIn->io_buf; struct lpfc_vport *vport = pIocbIn->vport; struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_nodelist *pnode = rdata->pnode; struct scsi_cmnd *cmd; unsigned long flags; struct lpfc_fast_path_event *fast_path_evt; struct Scsi_Host *shost; int idx; uint32_t logit = LOG_FCP; /* Guard against abort handler being called at same time */ spin_lock(&lpfc_cmd->buf_lock); /* Sanity check on return of outstanding command */ cmd = lpfc_cmd->pCmd; if (!cmd || !phba) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2621 IO completion: Not an active IO\n"); spin_unlock(&lpfc_cmd->buf_lock); return; } idx = lpfc_cmd->cur_iocbq.hba_wqidx; if (phba->sli4_hba.hdwq) phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); #endif shost = cmd->device->host; lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); lpfc_cmd->status = pIocbOut->iocb.ulpStatus; /* pick up SLI4 exchange busy status from HBA */ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY) lpfc_cmd->flags |= LPFC_SBUF_XBUSY; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_cmd->prot_data_type) { struct scsi_dif_tuple *src = NULL; src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; /* * Used to restore any changes to protection * data for error injection. */ switch (lpfc_cmd->prot_data_type) { case LPFC_INJERR_REFTAG: src->ref_tag = lpfc_cmd->prot_data; break; case LPFC_INJERR_APPTAG: src->app_tag = (uint16_t)lpfc_cmd->prot_data; break; case LPFC_INJERR_GUARD: src->guard_tag = (uint16_t)lpfc_cmd->prot_data; break; default: break; } lpfc_cmd->prot_data = 0; lpfc_cmd->prot_data_type = 0; lpfc_cmd->prot_data_segment = NULL; } #endif if (unlikely(lpfc_cmd->status)) { if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && (lpfc_cmd->result & IOERR_DRVR_MASK)) lpfc_cmd->status = IOSTAT_DRIVER_REJECT; else if (lpfc_cmd->status >= IOSTAT_CNT) lpfc_cmd->status = IOSTAT_DEFAULT; if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && !lpfc_cmd->fcp_rsp->rspStatus3 && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && !(vport->cfg_log_verbose & LOG_FCP_UNDER)) logit = 0; else logit = LOG_FCP | LOG_FCP_UNDER; lpfc_printf_vlog(vport, KERN_WARNING, logit, "9030 FCP cmd x%x failed <%d/%lld> " "status: x%x result: x%x " "sid: x%x did: x%x oxid: x%x " "Data: x%x x%x\n", cmd->cmnd[0], cmd->device ? cmd->device->id : 0xffff, cmd->device ? cmd->device->lun : 0xffff, lpfc_cmd->status, lpfc_cmd->result, vport->fc_myDID, (pnode) ? pnode->nlp_DID : 0, phba->sli_rev == LPFC_SLI_REV4 ? lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, pIocbOut->iocb.ulpContext, lpfc_cmd->cur_iocbq.iocb.ulpIoTag); switch (lpfc_cmd->status) { case IOSTAT_FCP_RSP_ERROR: /* Call FCP RSP handler to determine result */ lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut->iocb.un.fcpi.fcpi_parm); break; case IOSTAT_NPORT_BSY: case IOSTAT_FABRIC_BSY: cmd->result = DID_TRANSPORT_DISRUPTED << 16; fast_path_evt = lpfc_alloc_fast_evt(phba); if (!fast_path_evt) break; fast_path_evt->un.fabric_evt.event_type = FC_REG_FABRIC_EVENT; fast_path_evt->un.fabric_evt.subcategory = (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; if (pnode) { memcpy(&fast_path_evt->un.fabric_evt.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); memcpy(&fast_path_evt->un.fabric_evt.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); } fast_path_evt->vport = vport; fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; spin_lock_irqsave(&phba->hbalock, flags); list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_worker_wake_up(phba); break; case IOSTAT_LOCAL_REJECT: case IOSTAT_REMOTE_STOP: if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { cmd->result = DID_NO_CONNECT << 16; break; } if (lpfc_cmd->result == IOERR_INVALID_RPI || lpfc_cmd->result == IOERR_NO_RESOURCES || lpfc_cmd->result == IOERR_ABORT_REQUESTED || lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { cmd->result = DID_TRANSPORT_DISRUPTED << 16; break; } if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || lpfc_cmd->result == IOERR_TX_DMA_FAILED) && pIocbOut->iocb.unsli3.sli3_bg.bgstat) { if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { /* * This is a response for a BG enabled * cmd. Parse BG error */ lpfc_parse_bg_err(phba, lpfc_cmd, pIocbOut); break; } else { lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, "9031 non-zero BGSTAT " "on unprotected cmd\n"); } } if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) && (phba->sli_rev == LPFC_SLI_REV4) && pnode) { /* This IO was aborted by the target, we don't * know the rxid and because we did not send the * ABTS we cannot generate and RRQ. */ lpfc_set_rrq_active(phba, pnode, lpfc_cmd->cur_iocbq.sli4_lxritag, 0, 0); } fallthrough; default: cmd->result = DID_ERROR << 16; break; } if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) cmd->result = DID_TRANSPORT_DISRUPTED << 16 | SAM_STAT_BUSY; } else cmd->result = DID_OK << 16; if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { uint32_t *lp = (uint32_t *)cmd->sense_buffer; lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0710 Iodone <%d/%llu> cmd x%px, error " "x%x SNS x%x x%x Data: x%x x%x\n", cmd->device->id, cmd->device->lun, cmd, cmd->result, *lp, *(lp + 3), cmd->retries, scsi_get_resid(cmd)); } if (vport->cfg_max_scsicmpl_time && time_after(jiffies, lpfc_cmd->start_time + msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { spin_lock_irqsave(shost->host_lock, flags); if (pnode) { if (pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) && (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) && ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10))) pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending); pnode->last_change_time = jiffies; } spin_unlock_irqrestore(shost->host_lock, flags); } lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_cmd->pCmd = NULL; spin_unlock(&lpfc_cmd->buf_lock); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_cmd->ts_cmd_start) { lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp; lpfc_cmd->ts_data_io = ktime_get_ns(); phba->ktime_last_cmd = lpfc_cmd->ts_data_io; lpfc_io_ktime(phba, lpfc_cmd); } #endif /* The sdev is not guaranteed to be valid post scsi_done upcall. */ scsi_done(cmd); /* * If there is an abort thread waiting for command completion * wake up the thread. */ spin_lock(&lpfc_cmd->buf_lock); lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; if (lpfc_cmd->waitq) wake_up(lpfc_cmd->waitq); spin_unlock(&lpfc_cmd->buf_lock); lpfc_release_scsi_buf(phba, lpfc_cmd); } /** * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO * @vport: Pointer to vport object. * @lpfc_cmd: The scsi buffer which is going to be prep'ed. * @tmo: timeout value for the IO * * Based on the data-direction of the command, initialize IOCB * in the I/O buffer. Fill in the IOCB fields which are independent * of the scsi buffer * * RETURNS 0 - SUCCESS, **/ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, uint8_t tmo) { IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq; struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; int datadir = scsi_cmnd->sc_data_direction; u32 fcpdl; piocbq->iocb.un.fcpi.fcpi_XRdy = 0; /* * There are three possibilities here - use scatter-gather segment, use * the single mapping, or neither. Start the lpfc command prep by * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first * data bde entry. */ if (scsi_sg_count(scsi_cmnd)) { if (datadir == DMA_TO_DEVICE) { iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; if (vport->cfg_first_burst_size && (pnode->nlp_flag & NLP_FIRSTBURST)) { u32 xrdy_len; fcpdl = scsi_bufflen(scsi_cmnd); xrdy_len = min(fcpdl, vport->cfg_first_burst_size); piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len; } fcp_cmnd->fcpCntl3 = WRITE_DATA; } else { iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; fcp_cmnd->fcpCntl3 = READ_DATA; } } else { iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; iocb_cmd->un.fcpi.fcpi_parm = 0; iocb_cmd->ulpPU = 0; fcp_cmnd->fcpCntl3 = 0; } /* * Finish initializing those IOCB fields that are independent * of the scsi_cmnd request_buffer */ piocbq->iocb.ulpContext = pnode->nlp_rpi; if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) piocbq->iocb.ulpFCP2Rcvy = 1; else piocbq->iocb.ulpFCP2Rcvy = 0; piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); piocbq->io_buf = lpfc_cmd; if (!piocbq->cmd_cmpl) piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl; piocbq->iocb.ulpTimeout = tmo; piocbq->vport = vport; return 0; } /** * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO * @vport: Pointer to vport object. * @lpfc_cmd: The scsi buffer which is going to be prep'ed. * @tmo: timeout value for the IO * * Based on the data-direction of the command copy WQE template * to I/O buffer WQE. Fill in the WQE fields which are independent * of the scsi buffer * * RETURNS 0 - SUCCESS, **/ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, uint8_t tmo) { struct lpfc_hba *phba = vport->phba; struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct lpfc_sli4_hdw_queue *hdwq = NULL; struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; union lpfc_wqe128 *wqe = &pwqeq->wqe; u16 idx = lpfc_cmd->hdwq_no; int datadir = scsi_cmnd->sc_data_direction; hdwq = &phba->sli4_hba.hdwq[idx]; /* Initialize 64 bytes only */ memset(wqe, 0, sizeof(union lpfc_wqe128)); /* * There are three possibilities here - use scatter-gather segment, use * the single mapping, or neither. */ if (scsi_sg_count(scsi_cmnd)) { if (datadir == DMA_TO_DEVICE) { /* From the iwrite template, initialize words 7 - 11 */ memcpy(&wqe->words[7], &lpfc_iwrite_cmd_template.words[7], sizeof(uint32_t) * 5); fcp_cmnd->fcpCntl3 = WRITE_DATA; if (hdwq) hdwq->scsi_cstat.output_requests++; } else { /* From the iread template, initialize words 7 - 11 */ memcpy(&wqe->words[7], &lpfc_iread_cmd_template.words[7], sizeof(uint32_t) * 5); /* Word 7 */ bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo); fcp_cmnd->fcpCntl3 = READ_DATA; if (hdwq) hdwq->scsi_cstat.input_requests++; /* For a CMF Managed port, iod must be zero'ed */ if (phba->cmf_active_mode == LPFC_CFG_MANAGED) bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_NONE); } } else { /* From the icmnd template, initialize words 4 - 11 */ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], sizeof(uint32_t) * 8); /* Word 7 */ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo); fcp_cmnd->fcpCntl3 = 0; if (hdwq) hdwq->scsi_cstat.control_requests++; } /* * Finish initializing those WQE fields that are independent * of the request_buffer */ /* Word 3 */ bf_set(payload_offset_len, &wqe->fcp_icmd, sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); /* Word 7*/ if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) bf_set(wqe_erp, &wqe->generic.wqe_com, 1); bf_set(wqe_class, &wqe->generic.wqe_com, (pnode->nlp_fcp_info & 0x0f)); /* Word 8 */ wqe->generic.wqe_com.abort_tag = pwqeq->iotag; /* Word 9 */ bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); pwqeq->vport = vport; pwqeq->io_buf = lpfc_cmd; pwqeq->hba_wqidx = lpfc_cmd->hdwq_no; pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl; return 0; } /** * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: The scsi command which needs to send. * @pnode: Pointer to lpfc_nodelist. * * This routine initializes fcp_cmnd and iocb data structure from scsi command * to transfer for device with SLI3 interface spec. **/ static int lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, struct lpfc_nodelist *pnode) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; u8 *ptr; if (!pnode) return 0; lpfc_cmd->fcp_rsp->rspSnsLen = 0; /* clear task management bits */ lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; int_to_scsilun(lpfc_cmd->pCmd->device->lun, &lpfc_cmd->fcp_cmnd->fcp_lun); ptr = &fcp_cmnd->fcpCdb[0]; memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { ptr += scsi_cmnd->cmd_len; memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); } fcp_cmnd->fcpCntl1 = SIMPLE_Q; lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout); return 0; } /** * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: Pointer to lpfc_io_buf data structure. * @lun: Logical unit number. * @task_mgmt_cmd: SCSI task management command. * * This routine creates FCP information unit corresponding to @task_mgmt_cmd * for device with SLI-3 interface spec. * * Return codes: * 0 - Error * 1 - Success **/ static int lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, u64 lun, u8 task_mgmt_cmd) { struct lpfc_iocbq *piocbq; IOCB_t *piocb; struct fcp_cmnd *fcp_cmnd; struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_nodelist *ndlp = rdata->pnode; if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) return 0; piocbq = &(lpfc_cmd->cur_iocbq); piocbq->vport = vport; piocb = &piocbq->iocb; fcp_cmnd = lpfc_cmd->fcp_cmnd; /* Clear out any old data in the FCP command area */ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); int_to_scsilun(lun, &fcp_cmnd->fcp_lun); fcp_cmnd->fcpCntl2 = task_mgmt_cmd; if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); piocb->ulpCommand = CMD_FCP_ICMND64_CR; piocb->ulpContext = ndlp->nlp_rpi; piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); piocb->ulpPU = 0; piocb->un.fcpi.fcpi_parm = 0; /* ulpTimeout is only one byte */ if (lpfc_cmd->timeout > 0xff) { /* * Do not timeout the command at the firmware level. * The driver will provide the timeout mechanism. */ piocb->ulpTimeout = 0; } else piocb->ulpTimeout = lpfc_cmd->timeout; return 1; } /** * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: Pointer to lpfc_io_buf data structure. * @lun: Logical unit number. * @task_mgmt_cmd: SCSI task management command. * * This routine creates FCP information unit corresponding to @task_mgmt_cmd * for device with SLI-4 interface spec. * * Return codes: * 0 - Error * 1 - Success **/ static int lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, u64 lun, u8 task_mgmt_cmd) { struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; union lpfc_wqe128 *wqe = &pwqeq->wqe; struct fcp_cmnd *fcp_cmnd; struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_nodelist *ndlp = rdata->pnode; if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) return 0; pwqeq->vport = vport; /* Initialize 64 bytes only */ memset(wqe, 0, sizeof(union lpfc_wqe128)); /* From the icmnd template, initialize words 4 - 11 */ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], sizeof(uint32_t) * 8); fcp_cmnd = lpfc_cmd->fcp_cmnd; /* Clear out any old data in the FCP command area */ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); int_to_scsilun(lun, &fcp_cmnd->fcp_lun); fcp_cmnd->fcpCntl3 = 0; fcp_cmnd->fcpCntl2 = task_mgmt_cmd; bf_set(payload_offset_len, &wqe->fcp_icmd, sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); bf_set(cmd_buff_len, &wqe->fcp_icmd, 0); bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, /* ulpContext */ vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0)); bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, (ndlp->nlp_fcp_info & 0x0f)); /* ulpTimeout is only one byte */ if (lpfc_cmd->timeout > 0xff) { /* * Do not timeout the command at the firmware level. * The driver will provide the timeout mechanism. */ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0); } else { bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout); } lpfc_prep_embed_io(vport->phba, lpfc_cmd); bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); wqe->generic.wqe_com.abort_tag = pwqeq->iotag; bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); return 1; } /** * lpfc_scsi_api_table_setup - Set up scsi api function jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. * * This routine sets up the SCSI interface API function jump table in @phba * struct. * Returns: 0 - success, -ENODEV - failure. **/ int lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; switch (dev_grp) { case LPFC_PCI_DEV_LP: phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3; phba->lpfc_scsi_prep_task_mgmt_cmd = lpfc_scsi_prep_task_mgmt_cmd_s3; break; case LPFC_PCI_DEV_OC: phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4; phba->lpfc_scsi_prep_task_mgmt_cmd = lpfc_scsi_prep_task_mgmt_cmd_s4; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1418 Invalid HBA PCI-device group: 0x%x\n", dev_grp); return -ENODEV; } phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; return 0; } /** * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command * @phba: The Hba for which this call is being executed. * @cmdiocbq: Pointer to lpfc_iocbq data structure. * @rspiocbq: Pointer to lpfc_iocbq data structure. * * This routine is IOCB completion routine for device reset and target reset * routine. This routine release scsi buffer associated with lpfc_cmd. **/ static void lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, struct lpfc_iocbq *rspiocbq) { struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf; if (lpfc_cmd) lpfc_release_scsi_buf(phba, lpfc_cmd); return; } /** * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check * if issuing a pci_bus_reset is possibly unsafe * @phba: lpfc_hba pointer. * * Description: * Walks the bus_list to ensure only PCI devices with Emulex * vendor id, device ids that support hot reset, and only one occurrence * of function 0. * * Returns: * -EBADSLT, detected invalid device * 0, successful */ int lpfc_check_pci_resettable(struct lpfc_hba *phba) { const struct pci_dev *pdev = phba->pcidev; struct pci_dev *ptr = NULL; u8 counter = 0; /* Walk the list of devices on the pci_dev's bus */ list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { /* Check for Emulex Vendor ID */ if (ptr->vendor != PCI_VENDOR_ID_EMULEX) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8346 Non-Emulex vendor found: " "0x%04x\n", ptr->vendor); return -EBADSLT; } /* Check for valid Emulex Device ID */ if (phba->sli_rev != LPFC_SLI_REV4 || phba->hba_flag & HBA_FCOE_MODE) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8347 Incapable PCI reset device: " "0x%04x\n", ptr->device); return -EBADSLT; } /* Check for only one function 0 ID to ensure only one HBA on * secondary bus */ if (ptr->devfn == 0) { if (++counter > 1) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8348 More than one device on " "secondary bus found\n"); return -EBADSLT; } } } return 0; } /** * lpfc_info - Info entry point of scsi_host_template data structure * @host: The scsi host for which this call is being executed. * * This routine provides module information about hba. * * Reutrn code: * Pointer to char - Success. **/ const char * lpfc_info(struct Scsi_Host *host) { struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; struct lpfc_hba *phba = vport->phba; int link_speed = 0; static char lpfcinfobuf[384]; char tmp[384] = {0}; memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf)); if (phba && phba->pcidev){ /* Model Description */ scnprintf(tmp, sizeof(tmp), phba->ModelDesc); if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= sizeof(lpfcinfobuf)) goto buffer_done; /* PCI Info */ scnprintf(tmp, sizeof(tmp), " on PCI bus %02x device %02x irq %d", phba->pcidev->bus->number, phba->pcidev->devfn, phba->pcidev->irq); if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= sizeof(lpfcinfobuf)) goto buffer_done; /* Port Number */ if (phba->Port[0]) { scnprintf(tmp, sizeof(tmp), " port %s", phba->Port); if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= sizeof(lpfcinfobuf)) goto buffer_done; } /* Link Speed */ link_speed = lpfc_sli_port_speed_get(phba); if (link_speed != 0) { scnprintf(tmp, sizeof(tmp), " Logical Link Speed: %d Mbps", link_speed); if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= sizeof(lpfcinfobuf)) goto buffer_done; } /* PCI resettable */ if (!lpfc_check_pci_resettable(phba)) { scnprintf(tmp, sizeof(tmp), " PCI resettable"); strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)); } } buffer_done: return lpfcinfobuf; } /** * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba * @phba: The Hba for which this call is being executed. * * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. * The default value of cfg_poll_tmo is 10 milliseconds. **/ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) { unsigned long poll_tmo_expires = (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) mod_timer(&phba->fcp_poll_timer, poll_tmo_expires); } /** * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA * @phba: The Hba for which this call is being executed. * * This routine starts the fcp_poll_timer of @phba. **/ void lpfc_poll_start_timer(struct lpfc_hba * phba) { lpfc_poll_rearm_timer(phba); } /** * lpfc_poll_timeout - Restart polling timer * @t: Timer construct where lpfc_hba data structure pointer is obtained. * * This routine restarts fcp_poll timer, when FCP ring polling is enable * and FCP Ring interrupt is disable. **/ void lpfc_poll_timeout(struct timer_list *t) { struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } } /* * lpfc_is_command_vm_io - get the UUID from blk cgroup * @cmd: Pointer to scsi_cmnd data structure * Returns UUID if present, otherwise NULL */ static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd) { struct bio *bio = scsi_cmd_to_rq(cmd)->bio; if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio) return NULL; return blkcg_get_fc_appid(bio); } /** * lpfc_queuecommand - scsi_host_template queuecommand entry point * @shost: kernel scsi host pointer. * @cmnd: Pointer to scsi_cmnd data structure. * * Driver registers this routine to scsi midlayer to submit a @cmd to process. * This routine prepares an IOCB from scsi command and provides to firmware. * The @done callback is invoked after driver finished processing the command. * * Return value : * 0 - Success * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. **/ static int lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cur_iocbq = NULL; struct lpfc_rport_data *rdata; struct lpfc_nodelist *ndlp; struct lpfc_io_buf *lpfc_cmd; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); int err, idx; u8 *uuid = NULL; uint64_t start; start = ktime_get_ns(); rdata = lpfc_rport_data_from_scsi_device(cmnd->device); /* sanity check on references */ if (unlikely(!rdata) || unlikely(!rport)) goto out_fail_command; err = fc_remote_port_chkready(rport); if (err) { cmnd->result = err; goto out_fail_command; } ndlp = rdata->pnode; if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" " op:%02x str=%s without registering for" " BlockGuard - Rejecting command\n", cmnd->cmnd[0], scsi_get_prot_op(cmnd), dif_op_str[scsi_get_prot_op(cmnd)]); goto out_fail_command; } /* * Catch race where our node has transitioned, but the * transport is still transitioning. */ if (!ndlp) goto out_tgt_busy1; /* Check if IO qualifies for CMF */ if (phba->cmf_active_mode != LPFC_CFG_OFF && cmnd->sc_data_direction == DMA_FROM_DEVICE && (scsi_sg_count(cmnd))) { /* Latency start time saved in rx_cmd_start later in routine */ err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd)); if (err) goto out_tgt_busy1; } if (lpfc_ndlp_check_qdepth(phba, ndlp)) { if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, "3377 Target Queue Full, scsi Id:%d " "Qdepth:%d Pending command:%d" " WWNN:%02x:%02x:%02x:%02x:" "%02x:%02x:%02x:%02x, " " WWPN:%02x:%02x:%02x:%02x:" "%02x:%02x:%02x:%02x", ndlp->nlp_sid, ndlp->cmd_qdepth, atomic_read(&ndlp->cmd_pending), ndlp->nlp_nodename.u.wwn[0], ndlp->nlp_nodename.u.wwn[1], ndlp->nlp_nodename.u.wwn[2], ndlp->nlp_nodename.u.wwn[3], ndlp->nlp_nodename.u.wwn[4], ndlp->nlp_nodename.u.wwn[5], ndlp->nlp_nodename.u.wwn[6], ndlp->nlp_nodename.u.wwn[7], ndlp->nlp_portname.u.wwn[0], ndlp->nlp_portname.u.wwn[1], ndlp->nlp_portname.u.wwn[2], ndlp->nlp_portname.u.wwn[3], ndlp->nlp_portname.u.wwn[4], ndlp->nlp_portname.u.wwn[5], ndlp->nlp_portname.u.wwn[6], ndlp->nlp_portname.u.wwn[7]); goto out_tgt_busy2; } } lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd); if (lpfc_cmd == NULL) { lpfc_rampdown_queue_depth(phba); lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, "0707 driver's buffer pool is empty, " "IO busied\n"); goto out_host_busy; } lpfc_cmd->rx_cmd_start = start; cur_iocbq = &lpfc_cmd->cur_iocbq; /* * Store the midlayer's command structure for the completion phase * and complete the command initialization. */ lpfc_cmd->pCmd = cmnd; lpfc_cmd->rdata = rdata; lpfc_cmd->ndlp = ndlp; cur_iocbq->cmd_cmpl = NULL; cmnd->host_scribble = (unsigned char *)lpfc_cmd; err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); if (err) goto out_host_busy_release_buf; if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { if (vport->phba->cfg_enable_bg) { lpfc_printf_vlog(vport, KERN_INFO, LOG_SCSI_CMD, "9033 BLKGRD: rcvd %s cmd:x%x " "reftag x%x cnt %u pt %x\n", dif_op_str[scsi_get_prot_op(cmnd)], cmnd->cmnd[0], scsi_prot_ref_tag(cmnd), scsi_logical_block_count(cmnd), (cmnd->cmnd[1]>>5)); } err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { if (vport->phba->cfg_enable_bg) { lpfc_printf_vlog(vport, KERN_INFO, LOG_SCSI_CMD, "9038 BLKGRD: rcvd PROT_NORMAL cmd: " "x%x reftag x%x cnt %u pt %x\n", cmnd->cmnd[0], scsi_prot_ref_tag(cmnd), scsi_logical_block_count(cmnd), (cmnd->cmnd[1]>>5)); } err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); } if (unlikely(err)) { if (err == 2) { cmnd->result = DID_ERROR << 16; goto out_fail_command_release_buf; } goto out_host_busy_free_buf; } /* check the necessary and sufficient condition to support VMID */ if (lpfc_is_vmid_enabled(phba) && (ndlp->vmid_support || phba->pport->vmid_priority_tagging == LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { /* is the I/O generated by a VM, get the associated virtual */ /* entity id */ uuid = lpfc_is_command_vm_io(cmnd); if (uuid) { err = lpfc_vmid_get_appid(vport, uuid, cmnd->sc_data_direction, (union lpfc_vmid_io_tag *) &cur_iocbq->vmid_tag); if (!err) cur_iocbq->cmd_flag |= LPFC_IO_VMID; } } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); #endif /* Issue I/O to adapter */ err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq, SLI_IOCB_RET_IOCB); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (start) { lpfc_cmd->ts_cmd_start = start; lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd; lpfc_cmd->ts_cmd_wqput = ktime_get_ns(); } else { lpfc_cmd->ts_cmd_start = 0; } #endif if (err) { lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "3376 FCP could not issue iocb err %x " "FCP cmd x%x <%d/%llu> " "sid: x%x did: x%x oxid: x%x " "Data: x%x x%x x%x x%x\n", err, cmnd->cmnd[0], cmnd->device ? cmnd->device->id : 0xffff, cmnd->device ? cmnd->device->lun : (u64)-1, vport->fc_myDID, ndlp->nlp_DID, phba->sli_rev == LPFC_SLI_REV4 ? cur_iocbq->sli4_xritag : 0xffff, phba->sli_rev == LPFC_SLI_REV4 ? phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] : cur_iocbq->iocb.ulpContext, cur_iocbq->iotag, phba->sli_rev == LPFC_SLI_REV4 ? bf_get(wqe_tmo, &cur_iocbq->wqe.generic.wqe_com) : cur_iocbq->iocb.ulpTimeout, (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000)); goto out_host_busy_free_buf; } if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } if (phba->cfg_xri_rebalancing) lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no); return 0; out_host_busy_free_buf: idx = lpfc_cmd->hdwq_no; lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); if (phba->sli4_hba.hdwq) { switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { case WRITE_DATA: phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--; break; case READ_DATA: phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--; break; default: phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--; } } out_host_busy_release_buf: lpfc_release_scsi_buf(phba, lpfc_cmd); out_host_busy: lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), shost); return SCSI_MLQUEUE_HOST_BUSY; out_tgt_busy2: lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), shost); out_tgt_busy1: return SCSI_MLQUEUE_TARGET_BUSY; out_fail_command_release_buf: lpfc_release_scsi_buf(phba, lpfc_cmd); lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), shost); out_fail_command: scsi_done(cmnd); return 0; } /* * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport * @vport: The virtual port for which this call is being executed. */ void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) { u32 bucket; struct lpfc_vmid *cur; if (vport->port_type == LPFC_PHYSICAL_PORT) del_timer_sync(&vport->phba->inactive_vmid_poll); kfree(vport->qfpa_res); kfree(vport->vmid_priority.vmid_range); kfree(vport->vmid); if (!hash_empty(vport->hash_table)) hash_for_each(vport->hash_table, bucket, cur, hnode) hash_del(&cur->hnode); vport->qfpa_res = NULL; vport->vmid_priority.vmid_range = NULL; vport->vmid = NULL; vport->cur_vmid_cnt = 0; } /** * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point * @cmnd: Pointer to scsi_cmnd data structure. * * This routine aborts @cmnd pending in base driver. * * Return code : * 0x2003 - Error * 0x2002 - Success **/ static int lpfc_abort_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *iocb; struct lpfc_io_buf *lpfc_cmd; int ret = SUCCESS, status = 0; struct lpfc_sli_ring *pring_s4 = NULL; struct lpfc_sli_ring *pring = NULL; int ret_val; unsigned long flags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); status = fc_block_rport(rport); if (status != 0 && status != SUCCESS) return status; lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble; if (!lpfc_cmd) return ret; /* Guard against IO completion being called at same time */ spin_lock_irqsave(&lpfc_cmd->buf_lock, flags); spin_lock(&phba->hbalock); /* driver queued commands are in process of being flushed */ if (phba->hba_flag & HBA_IOQ_FLUSH) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3168 SCSI Layer abort requested I/O has been " "flushed by LLD.\n"); ret = FAILED; goto out_unlock_hba; } if (!lpfc_cmd->pCmd) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "2873 SCSI Layer I/O Abort Request IO CMPL Status " "x%x ID %d LUN %llu\n", SUCCESS, cmnd->device->id, cmnd->device->lun); goto out_unlock_hba; } iocb = &lpfc_cmd->cur_iocbq; if (phba->sli_rev == LPFC_SLI_REV4) { pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; if (!pring_s4) { ret = FAILED; goto out_unlock_hba; } spin_lock(&pring_s4->ring_lock); } /* the command is in process of being cancelled */ if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3169 SCSI Layer abort requested I/O has been " "cancelled by LLD.\n"); ret = FAILED; goto out_unlock_ring; } /* * If pCmd field of the corresponding lpfc_io_buf structure * points to a different SCSI command, then the driver has * already completed this command, but the midlayer did not * see the completion before the eh fired. Just return SUCCESS. */ if (lpfc_cmd->pCmd != cmnd) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3170 SCSI Layer abort requested I/O has been " "completed by LLD.\n"); goto out_unlock_ring; } WARN_ON(iocb->io_buf != lpfc_cmd); /* abort issued in recovery is still in progress */ if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3389 SCSI Layer I/O Abort Request is pending\n"); if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring_s4->ring_lock); spin_unlock(&phba->hbalock); spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags); goto wait_for_cmpl; } lpfc_cmd->waitq = &waitq; if (phba->sli_rev == LPFC_SLI_REV4) { spin_unlock(&pring_s4->ring_lock); ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb, lpfc_sli_abort_fcp_cmpl); } else { pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, lpfc_sli_abort_fcp_cmpl); } /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); if (ret_val != IOCB_SUCCESS) { /* Indicate the IO is not being aborted by the driver. */ lpfc_cmd->waitq = NULL; ret = FAILED; goto out_unlock_hba; } /* no longer need the lock after this point */ spin_unlock(&phba->hbalock); spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_sli_handle_fast_ring_event(phba, &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); wait_for_cmpl: /* * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait * for abort to complete. */ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); spin_lock(&lpfc_cmd->buf_lock); if (lpfc_cmd->pCmd == cmnd) { ret = FAILED; lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0748 abort handler timed out waiting " "for aborting I/O (xri:x%x) to complete: " "ret %#x, ID %d, LUN %llu\n", iocb->sli4_xritag, ret, cmnd->device->id, cmnd->device->lun); } lpfc_cmd->waitq = NULL; spin_unlock(&lpfc_cmd->buf_lock); goto out; out_unlock_ring: if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring_s4->ring_lock); out_unlock_hba: spin_unlock(&phba->hbalock); spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags); out: lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "0749 SCSI Layer I/O Abort Request Status x%x ID %d " "LUN %llu\n", ret, cmnd->device->id, cmnd->device->lun); return ret; } static char * lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) { switch (task_mgmt_cmd) { case FCP_ABORT_TASK_SET: return "ABORT_TASK_SET"; case FCP_CLEAR_TASK_SET: return "FCP_CLEAR_TASK_SET"; case FCP_BUS_RESET: return "FCP_BUS_RESET"; case FCP_LUN_RESET: return "FCP_LUN_RESET"; case FCP_TARGET_RESET: return "FCP_TARGET_RESET"; case FCP_CLEAR_ACA: return "FCP_CLEAR_ACA"; case FCP_TERMINATE_TASK: return "FCP_TERMINATE_TASK"; default: return "unknown"; } } /** * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: Pointer to lpfc_io_buf data structure. * * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded * * Return code : * 0x2003 - Error * 0x2002 - Success **/ static int lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) { struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; uint32_t rsp_info; uint32_t rsp_len; uint8_t rsp_info_code; int ret = FAILED; if (fcprsp == NULL) lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0703 fcp_rsp is missing\n"); else { rsp_info = fcprsp->rspStatus2; rsp_len = be32_to_cpu(fcprsp->rspRspLen); rsp_info_code = fcprsp->rspInfo3; lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0706 fcp_rsp valid 0x%x," " rsp len=%d code 0x%x\n", rsp_info, rsp_len, rsp_info_code); /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN * field specifies the number of valid bytes of FCP_RSP_INFO. * The FCP_RSP_LEN field shall be set to 0x04 or 0x08 */ if ((fcprsp->rspStatus2 & RSP_LEN_VALID) && ((rsp_len == 8) || (rsp_len == 4))) { switch (rsp_info_code) { case RSP_NO_FAILURE: lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0715 Task Mgmt No Failure\n"); ret = SUCCESS; break; case RSP_TM_NOT_SUPPORTED: /* TM rejected */ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0716 Task Mgmt Target " "reject\n"); break; case RSP_TM_NOT_COMPLETED: /* TM failed */ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0717 Task Mgmt Target " "failed TM\n"); break; case RSP_TM_INVALID_LU: /* TM to invalid LU! */ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0718 Task Mgmt to invalid " "LUN\n"); break; } } } return ret; } /** * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler * @vport: The virtual port for which this call is being executed. * @rport: Pointer to remote port * @tgt_id: Target ID of remote device. * @lun_id: Lun number for the TMF * @task_mgmt_cmd: type of TMF to send * * This routine builds and sends a TMF (SCSI Task Mgmt Function) to * a remote port. * * Return Code: * 0x2003 - Error * 0x2002 - Success. **/ static int lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport, unsigned int tgt_id, uint64_t lun_id, uint8_t task_mgmt_cmd) { struct lpfc_hba *phba = vport->phba; struct lpfc_io_buf *lpfc_cmd; struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbqrsp; struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; int ret; int status; rdata = rport->dd_data; if (!rdata || !rdata->pnode) return FAILED; pnode = rdata->pnode; lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL); if (lpfc_cmd == NULL) return FAILED; lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; lpfc_cmd->rdata = rdata; lpfc_cmd->pCmd = NULL; lpfc_cmd->ndlp = pnode; status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, task_mgmt_cmd); if (!status) { lpfc_release_scsi_buf(phba, lpfc_cmd); return FAILED; } iocbq = &lpfc_cmd->cur_iocbq; iocbqrsp = lpfc_sli_get_iocbq(phba); if (iocbqrsp == NULL) { lpfc_release_scsi_buf(phba, lpfc_cmd); return FAILED; } iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl; iocbq->vport = vport; lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue %s to TGT %d LUN %llu " "rpi x%x nlp_flag x%x Data: x%x x%x\n", lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, iocbq->cmd_flag); status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, iocbq, iocbqrsp, lpfc_cmd->timeout); if ((status != IOCB_SUCCESS) || (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) { if (status != IOCB_SUCCESS || get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0727 TMF %s to TGT %d LUN %llu " "failed (%d, %d) cmd_flag x%x\n", lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, get_job_ulpstatus(phba, iocbqrsp), get_job_word4(phba, iocbqrsp), iocbq->cmd_flag); /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ if (status == IOCB_SUCCESS) { if (get_job_ulpstatus(phba, iocbqrsp) == IOSTAT_FCP_RSP_ERROR) /* Something in the FCP_RSP was invalid. * Check conditions */ ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); else ret = FAILED; } else if ((status == IOCB_TIMEDOUT) || (status == IOCB_ABORTED)) { ret = TIMEOUT_ERROR; } else { ret = FAILED; } } else ret = SUCCESS; lpfc_sli_release_iocbq(phba, iocbqrsp); if (status != IOCB_TIMEDOUT) lpfc_release_scsi_buf(phba, lpfc_cmd); return ret; } /** * lpfc_chk_tgt_mapped - * @vport: The virtual port to check on * @rport: Pointer to fc_rport data structure. * * This routine delays until the scsi target (aka rport) for the * command exists (is present and logged in) or we declare it non-existent. * * Return code : * 0x2003 - Error * 0x2002 - Success **/ static int lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport) { struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode = NULL; unsigned long later; rdata = rport->dd_data; if (!rdata) { lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0797 Tgt Map rport failure: rdata x%px\n", rdata); return FAILED; } pnode = rdata->pnode; /* * If target is not in a MAPPED state, delay until * target is rediscovered or devloss timeout expires. */ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; while (time_after(later, jiffies)) { if (!pnode) return FAILED; if (pnode->nlp_state == NLP_STE_MAPPED_NODE) return SUCCESS; schedule_timeout_uninterruptible(msecs_to_jiffies(500)); rdata = rport->dd_data; if (!rdata) return FAILED; pnode = rdata->pnode; } if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) return FAILED; return SUCCESS; } /** * lpfc_reset_flush_io_context - * @vport: The virtual port (scsi_host) for the flush context * @tgt_id: If aborting by Target contect - specifies the target id * @lun_id: If aborting by Lun context - specifies the lun id * @context: specifies the context level to flush at. * * After a reset condition via TMF, we need to flush orphaned i/o * contexts from the adapter. This routine aborts any contexts * outstanding, then waits for their completions. The wait is * bounded by devloss_tmo though. * * Return code : * 0x2003 - Error * 0x2002 - Success **/ static int lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd context) { struct lpfc_hba *phba = vport->phba; unsigned long later; int cnt; cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); if (cnt) lpfc_sli_abort_taskmgmt(vport, &phba->sli.sli3_ring[LPFC_FCP_RING], tgt_id, lun_id, context); later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; while (time_after(later, jiffies) && cnt) { schedule_timeout_uninterruptible(msecs_to_jiffies(20)); cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); } if (cnt) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0724 I/O flush failure for context %s : cnt x%x\n", ((context == LPFC_CTX_LUN) ? "LUN" : ((context == LPFC_CTX_TGT) ? "TGT" : ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), cnt); return FAILED; } return SUCCESS; } /** * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point * @cmnd: Pointer to scsi_cmnd data structure. * * This routine does a device reset by sending a LUN_RESET task management * command. * * Return code : * 0x2003 - Error * 0x2002 - Success **/ static int lpfc_device_reset_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; unsigned tgt_id = cmnd->device->id; uint64_t lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; int status; u32 logit = LOG_FCP; if (!rport) return FAILED; rdata = rport->dd_data; if (!rdata || !rdata->pnode) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0798 Device Reset rdata failure: rdata x%px\n", rdata); return FAILED; } pnode = rdata->pnode; status = fc_block_rport(rport); if (status != 0 && status != SUCCESS) return status; status = lpfc_chk_tgt_mapped(vport, rport); if (status == FAILED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0721 Device Reset rport failure: rdata x%px\n", rdata); return FAILED; } scsi_event.event_type = FC_REG_SCSI_EVENT; scsi_event.subcategory = LPFC_EVENT_LUNRESET; scsi_event.lun = lun_id; memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id, FCP_LUN_RESET); if (status != SUCCESS) logit = LOG_TRACE_EVENT; lpfc_printf_vlog(vport, KERN_ERR, logit, "0713 SCSI layer issued Device Reset (%d, %llu) " "return x%x\n", tgt_id, lun_id, status); /* * We have to clean up i/o as : they may be orphaned by the TMF; * or if the TMF failed, they may be in an indeterminate state. * So, continue on. * We will report success if all the i/o aborts successfully. */ if (status == SUCCESS) status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, LPFC_CTX_LUN); return status; } /** * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point * @cmnd: Pointer to scsi_cmnd data structure. * * This routine does a target reset by sending a TARGET_RESET task management * command. * * Return code : * 0x2003 - Error * 0x2002 - Success **/ static int lpfc_target_reset_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; unsigned tgt_id = cmnd->device->id; uint64_t lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; int status; u32 logit = LOG_FCP; u32 dev_loss_tmo = vport->cfg_devloss_tmo; unsigned long flags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); if (!rport) return FAILED; rdata = rport->dd_data; if (!rdata || !rdata->pnode) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0799 Target Reset rdata failure: rdata x%px\n", rdata); return FAILED; } pnode = rdata->pnode; status = fc_block_rport(rport); if (status != 0 && status != SUCCESS) return status; status = lpfc_chk_tgt_mapped(vport, rport); if (status == FAILED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0722 Target Reset rport failure: rdata x%px\n", rdata); if (pnode) { spin_lock_irqsave(&pnode->lock, flags); pnode->nlp_flag &= ~NLP_NPR_ADISC; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; spin_unlock_irqrestore(&pnode->lock, flags); } lpfc_reset_flush_io_context(vport, tgt_id, lun_id, LPFC_CTX_TGT); return FAST_IO_FAIL; } scsi_event.event_type = FC_REG_SCSI_EVENT; scsi_event.subcategory = LPFC_EVENT_TGTRESET; scsi_event.lun = 0; memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id, FCP_TARGET_RESET); if (status != SUCCESS) { logit = LOG_TRACE_EVENT; /* Issue LOGO, if no LOGO is outstanding */ spin_lock_irqsave(&pnode->lock, flags); if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) && !pnode->logo_waitq) { pnode->logo_waitq = &waitq; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; pnode->nlp_flag |= NLP_ISSUE_LOGO; pnode->save_flags |= NLP_WAIT_FOR_LOGO; spin_unlock_irqrestore(&pnode->lock, flags); lpfc_unreg_rpi(vport, pnode); wait_event_timeout(waitq, (!(pnode->save_flags & NLP_WAIT_FOR_LOGO)), msecs_to_jiffies(dev_loss_tmo * 1000)); if (pnode->save_flags & NLP_WAIT_FOR_LOGO) { lpfc_printf_vlog(vport, KERN_ERR, logit, "0725 SCSI layer TGTRST " "failed & LOGO TMO (%d, %llu) " "return x%x\n", tgt_id, lun_id, status); spin_lock_irqsave(&pnode->lock, flags); pnode->save_flags &= ~NLP_WAIT_FOR_LOGO; } else { spin_lock_irqsave(&pnode->lock, flags); } pnode->logo_waitq = NULL; spin_unlock_irqrestore(&pnode->lock, flags); status = SUCCESS; } else { spin_unlock_irqrestore(&pnode->lock, flags); status = FAILED; } } lpfc_printf_vlog(vport, KERN_ERR, logit, "0723 SCSI layer issued Target Reset (%d, %llu) " "return x%x\n", tgt_id, lun_id, status); /* * We have to clean up i/o as : they may be orphaned by the TMF; * or if the TMF failed, they may be in an indeterminate state. * So, continue on. * We will report success if all the i/o aborts successfully. */ if (status == SUCCESS) status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, LPFC_CTX_TGT); return status; } /** * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt * @cmnd: Pointer to scsi_cmnd data structure. * * This routine does host reset to the adaptor port. It brings the HBA * offline, performs a board restart, and then brings the board back online. * The lpfc_offline calls lpfc_sli_hba_down which will abort and local * reject all outstanding SCSI commands to the host and error returned * back to SCSI mid-level. As this will be SCSI mid-level's last resort * of error handling, it will only return error if resetting of the adapter * is not successful; in all other cases, will return success. * * Return code : * 0x2003 - Error * 0x2002 - Success **/ static int lpfc_host_reset_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int rc, ret = SUCCESS; lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "3172 SCSI layer issued Host Reset Data:\n"); lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); rc = lpfc_sli_brdrestart(phba); if (rc) goto error; /* Wait for successful restart of adapter */ if (phba->sli_rev < LPFC_SLI_REV4) { rc = lpfc_sli_chipset_init(phba); if (rc) goto error; } rc = lpfc_online(phba); if (rc) goto error; lpfc_unblock_mgmt_io(phba); return ret; error: lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "3323 Failed host reset\n"); lpfc_unblock_mgmt_io(phba); return FAILED; } /** * lpfc_slave_alloc - scsi_host_template slave_alloc entry point * @sdev: Pointer to scsi_device. * * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's * globally available list of scsi buffers. This routine also makes sure scsi * buffer is not allocated more than HBA limit conveyed to midlayer. This list * of scsi buffer exists for the lifetime of the driver. * * Return codes: * non-0 - Error * 0 - Success **/ static int lpfc_slave_alloc(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); uint32_t total = 0; uint32_t num_to_alloc = 0; int num_allocated = 0; uint32_t sdev_cnt; struct lpfc_device_data *device_data; unsigned long flags; struct lpfc_name target_wwpn; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; if (phba->cfg_fof) { /* * Check to see if the device data structure for the lun * exists. If not, create one. */ u64_to_wwn(rport->port_name, target_wwpn.u.wwn); spin_lock_irqsave(&phba->devicelock, flags); device_data = __lpfc_get_device_data(phba, &phba->luns, &vport->fc_portname, &target_wwpn, sdev->lun); if (!device_data) { spin_unlock_irqrestore(&phba->devicelock, flags); device_data = lpfc_create_device_data(phba, &vport->fc_portname, &target_wwpn, sdev->lun, phba->cfg_XLanePriority, true); if (!device_data) return -ENOMEM; spin_lock_irqsave(&phba->devicelock, flags); list_add_tail(&device_data->listentry, &phba->luns); } device_data->rport_data = rport->dd_data; device_data->available = true; spin_unlock_irqrestore(&phba->devicelock, flags); sdev->hostdata = device_data; } else { sdev->hostdata = rport->dd_data; } sdev_cnt = atomic_inc_return(&phba->sdev_cnt); /* For SLI4, all IO buffers are pre-allocated */ if (phba->sli_rev == LPFC_SLI_REV4) return 0; /* This code path is now ONLY for SLI3 adapters */ /* * Populate the cmds_per_lun count scsi_bufs into this host's globally * available list of scsi buffers. Don't allocate more than the * HBA limit conveyed to the midlayer via the host structure. The * formula accounts for the lun_queue_depth + error handlers + 1 * extra. This list of scsi bufs exists for the lifetime of the driver. */ total = phba->total_scsi_bufs; num_to_alloc = vport->cfg_lun_queue_depth + 2; /* If allocated buffers are enough do nothing */ if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) return 0; /* Allow some exchanges to be available always to complete discovery */ if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "0704 At limitation of %d preallocated " "command buffers\n", total); return 0; /* Allow some exchanges to be available always to complete discovery */ } else if (total + num_to_alloc > phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "0705 Allocation request of %d " "command buffers will exceed max of %d. " "Reducing allocation request to %d.\n", num_to_alloc, phba->cfg_hba_queue_depth, (phba->cfg_hba_queue_depth - total)); num_to_alloc = phba->cfg_hba_queue_depth - total; } num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); if (num_to_alloc != num_allocated) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0708 Allocation request of %d " "command buffers did not succeed. " "Allocated %d buffers.\n", num_to_alloc, num_allocated); } if (num_allocated > 0) phba->total_scsi_bufs += num_allocated; return 0; } /** * lpfc_slave_configure - scsi_host_template slave_configure entry point * @sdev: Pointer to scsi_device. * * This routine configures following items * - Tag command queuing support for @sdev if supported. * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. * * Return codes: * 0 - Success **/ static int lpfc_slave_configure(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } return 0; } /** * lpfc_slave_destroy - slave_destroy entry point of SHT data structure * @sdev: Pointer to scsi_device. * * This routine sets @sdev hostatdata filed to null. **/ static void lpfc_slave_destroy(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; unsigned long flags; struct lpfc_device_data *device_data = sdev->hostdata; atomic_dec(&phba->sdev_cnt); if ((phba->cfg_fof) && (device_data)) { spin_lock_irqsave(&phba->devicelock, flags); device_data->available = false; if (!device_data->oas_enabled) lpfc_delete_device_data(phba, device_data); spin_unlock_irqrestore(&phba->devicelock, flags); } sdev->hostdata = NULL; return; } /** * lpfc_create_device_data - creates and initializes device data structure for OAS * @phba: Pointer to host bus adapter structure. * @vport_wwpn: Pointer to vport's wwpn information * @target_wwpn: Pointer to target's wwpn information * @lun: Lun on target * @pri: Priority * @atomic_create: Flag to indicate if memory should be allocated using the * GFP_ATOMIC flag or not. * * This routine creates a device data structure which will contain identifying * information for the device (host wwpn, target wwpn, lun), state of OAS, * whether or not the corresponding lun is available by the system, * and pointer to the rport data. * * Return codes: * NULL - Error * Pointer to lpfc_device_data - Success **/ struct lpfc_device_data* lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, struct lpfc_name *target_wwpn, uint64_t lun, uint32_t pri, bool atomic_create) { struct lpfc_device_data *lun_info; int memory_flags; if (unlikely(!phba) || !vport_wwpn || !target_wwpn || !(phba->cfg_fof)) return NULL; /* Attempt to create the device data to contain lun info */ if (atomic_create) memory_flags = GFP_ATOMIC; else memory_flags = GFP_KERNEL; lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags); if (!lun_info) return NULL; INIT_LIST_HEAD(&lun_info->listentry); lun_info->rport_data = NULL; memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, sizeof(struct lpfc_name)); memcpy(&lun_info->device_id.target_wwpn, target_wwpn, sizeof(struct lpfc_name)); lun_info->device_id.lun = lun; lun_info->oas_enabled = false; lun_info->priority = pri; lun_info->available = false; return lun_info; } /** * lpfc_delete_device_data - frees a device data structure for OAS * @phba: Pointer to host bus adapter structure. * @lun_info: Pointer to device data structure to free. * * This routine frees the previously allocated device data structure passed. * **/ void lpfc_delete_device_data(struct lpfc_hba *phba, struct lpfc_device_data *lun_info) { if (unlikely(!phba) || !lun_info || !(phba->cfg_fof)) return; if (!list_empty(&lun_info->listentry)) list_del(&lun_info->listentry); mempool_free(lun_info, phba->device_data_mem_pool); return; } /** * __lpfc_get_device_data - returns the device data for the specified lun * @phba: Pointer to host bus adapter structure. * @list: Point to list to search. * @vport_wwpn: Pointer to vport's wwpn information * @target_wwpn: Pointer to target's wwpn information * @lun: Lun on target * * This routine searches the list passed for the specified lun's device data. * This function does not hold locks, it is the responsibility of the caller * to ensure the proper lock is held before calling the function. * * Return codes: * NULL - Error * Pointer to lpfc_device_data - Success **/ struct lpfc_device_data* __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, struct lpfc_name *vport_wwpn, struct lpfc_name *target_wwpn, uint64_t lun) { struct lpfc_device_data *lun_info; if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || !phba->cfg_fof) return NULL; /* Check to see if the lun is already enabled for OAS. */ list_for_each_entry(lun_info, list, listentry) { if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, sizeof(struct lpfc_name)) == 0) && (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, sizeof(struct lpfc_name)) == 0) && (lun_info->device_id.lun == lun)) return lun_info; } return NULL; } /** * lpfc_find_next_oas_lun - searches for the next oas lun * @phba: Pointer to host bus adapter structure. * @vport_wwpn: Pointer to vport's wwpn information * @target_wwpn: Pointer to target's wwpn information * @starting_lun: Pointer to the lun to start searching for * @found_vport_wwpn: Pointer to the found lun's vport wwpn information * @found_target_wwpn: Pointer to the found lun's target wwpn information * @found_lun: Pointer to the found lun. * @found_lun_status: Pointer to status of the found lun. * @found_lun_pri: Pointer to priority of the found lun. * * This routine searches the luns list for the specified lun * or the first lun for the vport/target. If the vport wwpn contains * a zero value then a specific vport is not specified. In this case * any vport which contains the lun will be considered a match. If the * target wwpn contains a zero value then a specific target is not specified. * In this case any target which contains the lun will be considered a * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status * are returned. The function will also return the next lun if available. * If the next lun is not found, starting_lun parameter will be set to * NO_MORE_OAS_LUN. * * Return codes: * non-0 - Error * 0 - Success **/ bool lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, struct lpfc_name *target_wwpn, uint64_t *starting_lun, struct lpfc_name *found_vport_wwpn, struct lpfc_name *found_target_wwpn, uint64_t *found_lun, uint32_t *found_lun_status, uint32_t *found_lun_pri) { unsigned long flags; struct lpfc_device_data *lun_info; struct lpfc_device_id *device_id; uint64_t lun; bool found = false; if (unlikely(!phba) || !vport_wwpn || !target_wwpn || !starting_lun || !found_vport_wwpn || !found_target_wwpn || !found_lun || !found_lun_status || (*starting_lun == NO_MORE_OAS_LUN) || !phba->cfg_fof) return false; lun = *starting_lun; *found_lun = NO_MORE_OAS_LUN; *starting_lun = NO_MORE_OAS_LUN; /* Search for lun or the lun closet in value */ spin_lock_irqsave(&phba->devicelock, flags); list_for_each_entry(lun_info, &phba->luns, listentry) { if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) || (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, sizeof(struct lpfc_name)) == 0)) && ((wwn_to_u64(target_wwpn->u.wwn) == 0) || (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, sizeof(struct lpfc_name)) == 0)) && (lun_info->oas_enabled)) { device_id = &lun_info->device_id; if ((!found) && ((lun == FIND_FIRST_OAS_LUN) || (device_id->lun == lun))) { *found_lun = device_id->lun; memcpy(found_vport_wwpn, &device_id->vport_wwpn, sizeof(struct lpfc_name)); memcpy(found_target_wwpn, &device_id->target_wwpn, sizeof(struct lpfc_name)); if (lun_info->available) *found_lun_status = OAS_LUN_STATUS_EXISTS; else *found_lun_status = 0; *found_lun_pri = lun_info->priority; if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) memset(vport_wwpn, 0x0, sizeof(struct lpfc_name)); if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) memset(target_wwpn, 0x0, sizeof(struct lpfc_name)); found = true; } else if (found) { *starting_lun = device_id->lun; memcpy(vport_wwpn, &device_id->vport_wwpn, sizeof(struct lpfc_name)); memcpy(target_wwpn, &device_id->target_wwpn, sizeof(struct lpfc_name)); break; } } } spin_unlock_irqrestore(&phba->devicelock, flags); return found; } /** * lpfc_enable_oas_lun - enables a lun for OAS operations * @phba: Pointer to host bus adapter structure. * @vport_wwpn: Pointer to vport's wwpn information * @target_wwpn: Pointer to target's wwpn information * @lun: Lun * @pri: Priority * * This routine enables a lun for oas operations. The routines does so by * doing the following : * * 1) Checks to see if the device data for the lun has been created. * 2) If found, sets the OAS enabled flag if not set and returns. * 3) Otherwise, creates a device data structure. * 4) If successfully created, indicates the device data is for an OAS lun, * indicates the lun is not available and add to the list of luns. * * Return codes: * false - Error * true - Success **/ bool lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) { struct lpfc_device_data *lun_info; unsigned long flags; if (unlikely(!phba) || !vport_wwpn || !target_wwpn || !phba->cfg_fof) return false; spin_lock_irqsave(&phba->devicelock, flags); /* Check to see if the device data for the lun has been created */ lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, target_wwpn, lun); if (lun_info) { if (!lun_info->oas_enabled) lun_info->oas_enabled = true; lun_info->priority = pri; spin_unlock_irqrestore(&phba->devicelock, flags); return true; } /* Create an lun info structure and add to list of luns */ lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, pri, true); if (lun_info) { lun_info->oas_enabled = true; lun_info->priority = pri; lun_info->available = false; list_add_tail(&lun_info->listentry, &phba->luns); spin_unlock_irqrestore(&phba->devicelock, flags); return true; } spin_unlock_irqrestore(&phba->devicelock, flags); return false; } /** * lpfc_disable_oas_lun - disables a lun for OAS operations * @phba: Pointer to host bus adapter structure. * @vport_wwpn: Pointer to vport's wwpn information * @target_wwpn: Pointer to target's wwpn information * @lun: Lun * @pri: Priority * * This routine disables a lun for oas operations. The routines does so by * doing the following : * * 1) Checks to see if the device data for the lun is created. * 2) If present, clears the flag indicating this lun is for OAS. * 3) If the lun is not available by the system, the device data is * freed. * * Return codes: * false - Error * true - Success **/ bool lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) { struct lpfc_device_data *lun_info; unsigned long flags; if (unlikely(!phba) || !vport_wwpn || !target_wwpn || !phba->cfg_fof) return false; spin_lock_irqsave(&phba->devicelock, flags); /* Check to see if the lun is available. */ lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, target_wwpn, lun); if (lun_info) { lun_info->oas_enabled = false; lun_info->priority = pri; if (!lun_info->available) lpfc_delete_device_data(phba, lun_info); spin_unlock_irqrestore(&phba->devicelock, flags); return true; } spin_unlock_irqrestore(&phba->devicelock, flags); return false; } static int lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) { return SCSI_MLQUEUE_HOST_BUSY; } static int lpfc_no_slave(struct scsi_device *sdev) { return -ENODEV; } struct scsi_host_template lpfc_template_nvme = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_no_command, .slave_alloc = lpfc_no_slave, .slave_configure = lpfc_no_slave, .scan_finished = lpfc_scan_finished, .this_id = -1, .sg_tablesize = 1, .cmd_per_lun = 1, .shost_groups = lpfc_hba_groups, .max_sectors = 0xFFFFFFFF, .vendor_id = LPFC_NL_VENDOR_ID, .track_queue_depth = 0, }; struct scsi_host_template lpfc_template = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, .eh_timed_out = fc_eh_timed_out, .eh_should_retry_cmd = fc_eh_should_retry_cmd, .eh_abort_handler = lpfc_abort_handler, .eh_device_reset_handler = lpfc_device_reset_handler, .eh_target_reset_handler = lpfc_target_reset_handler, .eh_host_reset_handler = lpfc_host_reset_handler, .slave_alloc = lpfc_slave_alloc, .slave_configure = lpfc_slave_configure, .slave_destroy = lpfc_slave_destroy, .scan_finished = lpfc_scan_finished, .this_id = -1, .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, .cmd_per_lun = LPFC_CMD_PER_LUN, .shost_groups = lpfc_hba_groups, .max_sectors = 0xFFFFFFFF, .vendor_id = LPFC_NL_VENDOR_ID, .change_queue_depth = scsi_change_queue_depth, .track_queue_depth = 1, }; struct scsi_host_template lpfc_vport_template = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, .eh_timed_out = fc_eh_timed_out, .eh_should_retry_cmd = fc_eh_should_retry_cmd, .eh_abort_handler = lpfc_abort_handler, .eh_device_reset_handler = lpfc_device_reset_handler, .eh_target_reset_handler = lpfc_target_reset_handler, .eh_bus_reset_handler = NULL, .eh_host_reset_handler = NULL, .slave_alloc = lpfc_slave_alloc, .slave_configure = lpfc_slave_configure, .slave_destroy = lpfc_slave_destroy, .scan_finished = lpfc_scan_finished, .this_id = -1, .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, .cmd_per_lun = LPFC_CMD_PER_LUN, .shost_groups = lpfc_vport_groups, .max_sectors = 0xFFFFFFFF, .vendor_id = 0, .change_queue_depth = scsi_change_queue_depth, .track_queue_depth = 1, };
linux-master
drivers/scsi/lpfc/lpfc_scsi.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/interrupt.h> #include <linux/mempool.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/bsg-lib.h> #include <linux/vmalloc.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_bsg_fc.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_bsg.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_debugfs.h" #include "lpfc_vport.h" #include "lpfc_version.h" struct lpfc_bsg_event { struct list_head node; struct kref kref; wait_queue_head_t wq; /* Event type and waiter identifiers */ uint32_t type_mask; uint32_t req_id; uint32_t reg_id; /* next two flags are here for the auto-delete logic */ unsigned long wait_time_stamp; int waiting; /* seen and not seen events */ struct list_head events_to_get; struct list_head events_to_see; /* driver data associated with the job */ void *dd_data; }; struct lpfc_bsg_iocb { struct lpfc_iocbq *cmdiocbq; struct lpfc_dmabuf *rmp; struct lpfc_nodelist *ndlp; }; struct lpfc_bsg_mbox { LPFC_MBOXQ_t *pmboxq; MAILBOX_t *mb; struct lpfc_dmabuf *dmabuffers; /* for BIU diags */ uint8_t *ext; /* extended mailbox data */ uint32_t mbOffset; /* from app */ uint32_t inExtWLen; /* from app */ uint32_t outExtWLen; /* from app */ }; #define TYPE_EVT 1 #define TYPE_IOCB 2 #define TYPE_MBOX 3 struct bsg_job_data { uint32_t type; struct bsg_job *set_job; /* job waiting for this iocb to finish */ union { struct lpfc_bsg_event *evt; struct lpfc_bsg_iocb iocb; struct lpfc_bsg_mbox mbox; } context_un; }; struct event_data { struct list_head node; uint32_t type; uint32_t immed_dat; void *data; uint32_t len; }; #define BUF_SZ_4K 4096 #define SLI_CT_ELX_LOOPBACK 0x10 enum ELX_LOOPBACK_CMD { ELX_LOOPBACK_XRI_SETUP, ELX_LOOPBACK_DATA, }; #define ELX_LOOPBACK_HEADER_SZ \ (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) struct lpfc_dmabufext { struct lpfc_dmabuf dma; uint32_t size; uint32_t flag; }; static void lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) { struct lpfc_dmabuf *mlast, *next_mlast; if (mlist) { list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) { list_del(&mlast->list); lpfc_mbuf_free(phba, mlast->virt, mlast->phys); kfree(mlast); } lpfc_mbuf_free(phba, mlist->virt, mlist->phys); kfree(mlist); } return; } static struct lpfc_dmabuf * lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size, int outbound_buffers, struct ulp_bde64 *bpl, int *bpl_entries) { struct lpfc_dmabuf *mlist = NULL; struct lpfc_dmabuf *mp; unsigned int bytes_left = size; /* Verify we can support the size specified */ if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE))) return NULL; /* Determine the number of dma buffers to allocate */ *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 : size/LPFC_BPL_SIZE); /* Allocate dma buffer and place in BPL passed */ while (bytes_left) { /* Allocate dma buffer */ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { if (mlist) lpfc_free_bsg_buffers(phba, mlist); return NULL; } INIT_LIST_HEAD(&mp->list); mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); if (!mp->virt) { kfree(mp); if (mlist) lpfc_free_bsg_buffers(phba, mlist); return NULL; } /* Queue it to a linked list */ if (!mlist) mlist = mp; else list_add_tail(&mp->list, &mlist->list); /* Add buffer to buffer pointer list */ if (outbound_buffers) bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; else bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); bpl->tus.f.bdeSize = (uint16_t) (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE : bytes_left); bytes_left -= bpl->tus.f.bdeSize; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; } return mlist; } static unsigned int lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, struct bsg_buffer *bsg_buffers, unsigned int bytes_to_transfer, int to_buffers) { struct lpfc_dmabuf *mp; unsigned int transfer_bytes, bytes_copied = 0; unsigned int sg_offset, dma_offset; unsigned char *dma_address, *sg_address; LIST_HEAD(temp_list); struct sg_mapping_iter miter; unsigned long flags; unsigned int sg_flags = SG_MITER_ATOMIC; bool sg_valid; list_splice_init(&dma_buffers->list, &temp_list); list_add(&dma_buffers->list, &temp_list); sg_offset = 0; if (to_buffers) sg_flags |= SG_MITER_FROM_SG; else sg_flags |= SG_MITER_TO_SG; sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, sg_flags); local_irq_save(flags); sg_valid = sg_miter_next(&miter); list_for_each_entry(mp, &temp_list, list) { dma_offset = 0; while (bytes_to_transfer && sg_valid && (dma_offset < LPFC_BPL_SIZE)) { dma_address = mp->virt + dma_offset; if (sg_offset) { /* Continue previous partial transfer of sg */ sg_address = miter.addr + sg_offset; transfer_bytes = miter.length - sg_offset; } else { sg_address = miter.addr; transfer_bytes = miter.length; } if (bytes_to_transfer < transfer_bytes) transfer_bytes = bytes_to_transfer; if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset)) transfer_bytes = LPFC_BPL_SIZE - dma_offset; if (to_buffers) memcpy(dma_address, sg_address, transfer_bytes); else memcpy(sg_address, dma_address, transfer_bytes); dma_offset += transfer_bytes; sg_offset += transfer_bytes; bytes_to_transfer -= transfer_bytes; bytes_copied += transfer_bytes; if (sg_offset >= miter.length) { sg_offset = 0; sg_valid = sg_miter_next(&miter); } } } sg_miter_stop(&miter); local_irq_restore(flags); list_del_init(&dma_buffers->list); list_splice(&temp_list, &dma_buffers->list); return bytes_copied; } /** * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler * @phba: Pointer to HBA context object. * @cmdiocbq: Pointer to command iocb. * @rspiocbq: Pointer to response iocb. * * This function is the completion handler for iocbs issued using * lpfc_bsg_send_mgmt_cmd function. This function is called by the * ring event handler function without any lock held. This function * can be called from both worker thread context and interrupt * context. This function also can be called from another thread which * cleans up the SLI layer objects. * This function copies the contents of the response iocb to the * response iocb memory object provided by the caller of * lpfc_sli_issue_iocb_wait and then wakes up the thread which * sleeps for the iocb completion. **/ static void lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, struct lpfc_iocbq *rspiocbq) { struct bsg_job_data *dd_data; struct bsg_job *job; struct fc_bsg_reply *bsg_reply; struct lpfc_dmabuf *bmp, *cmp, *rmp; struct lpfc_nodelist *ndlp; struct lpfc_bsg_iocb *iocb; unsigned long flags; int rc = 0; u32 ulp_status, ulp_word4, total_data_placed; dd_data = cmdiocbq->context_un.dd_data; /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); job = dd_data->set_job; if (job) { bsg_reply = job->reply; /* Prevent timeout handling from trying to abort job */ job->dd_data = NULL; } spin_unlock_irqrestore(&phba->ct_ev_lock, flags); /* Close the timeout handler abort window */ spin_lock_irqsave(&phba->hbalock, flags); cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; spin_unlock_irqrestore(&phba->hbalock, flags); iocb = &dd_data->context_un.iocb; ndlp = iocb->cmdiocbq->ndlp; rmp = iocb->rmp; cmp = cmdiocbq->cmd_dmabuf; bmp = cmdiocbq->bpl_dmabuf; ulp_status = get_job_ulpstatus(phba, rspiocbq); ulp_word4 = get_job_word4(phba, rspiocbq); total_data_placed = get_job_data_placed(phba, rspiocbq); /* Copy the completed data or set the error status */ if (job) { if (ulp_status) { if (ulp_status == IOSTAT_LOCAL_REJECT) { switch (ulp_word4 & IOERR_PARAM_MASK) { case IOERR_SEQUENCE_TIMEOUT: rc = -ETIMEDOUT; break; case IOERR_INVALID_RPI: rc = -EFAULT; break; default: rc = -EACCES; break; } } else { rc = -EACCES; } } else { bsg_reply->reply_payload_rcv_len = lpfc_bsg_copy_data(rmp, &job->reply_payload, total_data_placed, 0); } } lpfc_free_bsg_buffers(phba, cmp); lpfc_free_bsg_buffers(phba, rmp); lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); lpfc_nlp_put(ndlp); lpfc_sli_release_iocbq(phba, cmdiocbq); kfree(dd_data); /* Complete the job if the job is still active */ if (job) { bsg_reply->result = rc; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } return; } /** * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request * @job: fc_bsg_job to handle **/ static int lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = rdata->pnode; struct fc_bsg_reply *bsg_reply = job->reply; struct ulp_bde64 *bpl = NULL; struct lpfc_iocbq *cmdiocbq = NULL; struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; int request_nseg, reply_nseg; u32 num_entry; struct bsg_job_data *dd_data; unsigned long flags; uint32_t creg_val; int rc = 0; int iocb_stat; u16 ulp_context; /* in case no data is transferred */ bsg_reply->reply_payload_rcv_len = 0; if (ndlp->nlp_flag & NLP_ELS_SND_MASK) return -ENODEV; /* allocate our bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); if (!dd_data) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2733 Failed allocation of dd_data\n"); rc = -ENOMEM; goto no_dd_data; } cmdiocbq = lpfc_sli_get_iocbq(phba); if (!cmdiocbq) { rc = -ENOMEM; goto free_dd; } bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!bmp) { rc = -ENOMEM; goto free_cmdiocbq; } bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); if (!bmp->virt) { rc = -ENOMEM; goto free_bmp; } INIT_LIST_HEAD(&bmp->list); bpl = (struct ulp_bde64 *) bmp->virt; request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 1, bpl, &request_nseg); if (!cmp) { rc = -ENOMEM; goto free_bmp; } lpfc_bsg_copy_data(cmp, &job->request_payload, job->request_payload.payload_len, 1); bpl += request_nseg; reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, bpl, &reply_nseg); if (!rmp) { rc = -ENOMEM; goto free_cmp; } num_entry = request_nseg + reply_nseg; if (phba->sli_rev == LPFC_SLI_REV4) ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; else ulp_context = ndlp->nlp_rpi; lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry, phba->fc_ratov * 2); cmdiocbq->num_bdes = num_entry; cmdiocbq->vport = phba->pport; cmdiocbq->cmd_dmabuf = cmp; cmdiocbq->bpl_dmabuf = bmp; cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; cmdiocbq->context_un.dd_data = dd_data; dd_data->type = TYPE_IOCB; dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = cmdiocbq; dd_data->context_un.iocb.rmp = rmp; job->dd_data = dd_data; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) { rc = -EIO ; goto free_rmp; } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } cmdiocbq->ndlp = lpfc_nlp_get(ndlp); if (!cmdiocbq->ndlp) { rc = -ENODEV; goto free_rmp; } iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); if (iocb_stat == IOCB_SUCCESS) { spin_lock_irqsave(&phba->hbalock, flags); /* make sure the I/O had not been completed yet */ if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) { /* open up abort window to timeout handler */ cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; } spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ } else if (iocb_stat == IOCB_BUSY) { rc = -EAGAIN; } else { rc = -EIO; } /* iocb failed so cleanup */ lpfc_nlp_put(ndlp); free_rmp: lpfc_free_bsg_buffers(phba, rmp); free_cmp: lpfc_free_bsg_buffers(phba, cmp); free_bmp: if (bmp->virt) lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); free_cmdiocbq: lpfc_sli_release_iocbq(phba, cmdiocbq); free_dd: kfree(dd_data); no_dd_data: /* make error code available to userspace */ bsg_reply->result = rc; job->dd_data = NULL; return rc; } /** * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler * @phba: Pointer to HBA context object. * @cmdiocbq: Pointer to command iocb. * @rspiocbq: Pointer to response iocb. * * This function is the completion handler for iocbs issued using * lpfc_bsg_rport_els_cmp function. This function is called by the * ring event handler function without any lock held. This function * can be called from both worker thread context and interrupt * context. This function also can be called from other thread which * cleans up the SLI layer objects. * This function copies the contents of the response iocb to the * response iocb memory object provided by the caller of * lpfc_sli_issue_iocb_wait and then wakes up the thread which * sleeps for the iocb completion. **/ static void lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, struct lpfc_iocbq *rspiocbq) { struct bsg_job_data *dd_data; struct bsg_job *job; struct fc_bsg_reply *bsg_reply; struct lpfc_nodelist *ndlp; struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; struct fc_bsg_ctels_reply *els_reply; uint8_t *rjt_data; unsigned long flags; unsigned int rsp_size; int rc = 0; u32 ulp_status, ulp_word4, total_data_placed; dd_data = cmdiocbq->context_un.dd_data; ndlp = dd_data->context_un.iocb.ndlp; cmdiocbq->ndlp = ndlp; /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); job = dd_data->set_job; if (job) { bsg_reply = job->reply; /* Prevent timeout handling from trying to abort job */ job->dd_data = NULL; } spin_unlock_irqrestore(&phba->ct_ev_lock, flags); /* Close the timeout handler abort window */ spin_lock_irqsave(&phba->hbalock, flags); cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; spin_unlock_irqrestore(&phba->hbalock, flags); ulp_status = get_job_ulpstatus(phba, rspiocbq); ulp_word4 = get_job_word4(phba, rspiocbq); total_data_placed = get_job_data_placed(phba, rspiocbq); pcmd = cmdiocbq->cmd_dmabuf; prsp = (struct lpfc_dmabuf *)pcmd->list.next; /* Copy the completed job data or determine the job status if job is * still active */ if (job) { if (ulp_status == IOSTAT_SUCCESS) { rsp_size = total_data_placed; bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, prsp->virt, rsp_size); } else if (ulp_status == IOSTAT_LS_RJT) { bsg_reply->reply_payload_rcv_len = sizeof(struct fc_bsg_ctels_reply); /* LS_RJT data returned in word 4 */ rjt_data = (uint8_t *)&ulp_word4; els_reply = &bsg_reply->reply_data.ctels_reply; els_reply->status = FC_CTELS_STATUS_REJECT; els_reply->rjt_data.action = rjt_data[3]; els_reply->rjt_data.reason_code = rjt_data[2]; els_reply->rjt_data.reason_explanation = rjt_data[1]; els_reply->rjt_data.vendor_unique = rjt_data[0]; } else if (ulp_status == IOSTAT_LOCAL_REJECT && (ulp_word4 & IOERR_PARAM_MASK) == IOERR_SEQUENCE_TIMEOUT) { rc = -ETIMEDOUT; } else { rc = -EIO; } } lpfc_els_free_iocb(phba, cmdiocbq); lpfc_nlp_put(ndlp); kfree(dd_data); /* Complete the job if the job is still active */ if (job) { bsg_reply->result = rc; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } return; } /** * lpfc_bsg_rport_els - send an ELS command from a bsg request * @job: fc_bsg_job to handle **/ static int lpfc_bsg_rport_els(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; struct lpfc_nodelist *ndlp = rdata->pnode; struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; uint32_t elscmd; uint32_t cmdsize; struct lpfc_iocbq *cmdiocbq; uint16_t rpi = 0; struct bsg_job_data *dd_data; unsigned long flags; uint32_t creg_val; int rc = 0; /* in case no data is transferred */ bsg_reply->reply_payload_rcv_len = 0; /* verify the els command is not greater than the * maximum ELS transfer size. */ if (job->request_payload.payload_len > FCELSSIZE) { rc = -EINVAL; goto no_dd_data; } /* allocate our bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); if (!dd_data) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2735 Failed allocation of dd_data\n"); rc = -ENOMEM; goto no_dd_data; } elscmd = bsg_request->rqst_data.r_els.els_code; cmdsize = job->request_payload.payload_len; if (!lpfc_nlp_get(ndlp)) { rc = -ENODEV; goto free_dd_data; } /* We will use the allocated dma buffers by prep els iocb for command * and response to ensure if the job times out and the request is freed, * we won't be dma into memory that is no longer allocated to for the * request. */ cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, elscmd); if (!cmdiocbq) { rc = -EIO; goto release_ndlp; } /* Transfer the request payload to allocated command dma buffer */ sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, cmdiocbq->cmd_dmabuf->virt, cmdsize); rpi = ndlp->nlp_rpi; if (phba->sli_rev == LPFC_SLI_REV4) bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com, phba->sli4_hba.rpi_ids[rpi]); else cmdiocbq->iocb.ulpContext = rpi; cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; cmdiocbq->context_un.dd_data = dd_data; cmdiocbq->ndlp = ndlp; cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp; dd_data->type = TYPE_IOCB; dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = cmdiocbq; dd_data->context_un.iocb.ndlp = ndlp; dd_data->context_un.iocb.rmp = NULL; job->dd_data = dd_data; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) { rc = -EIO; goto linkdown_err; } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); if (rc == IOCB_SUCCESS) { spin_lock_irqsave(&phba->hbalock, flags); /* make sure the I/O had not been completed/released */ if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) { /* open up abort window to timeout handler */ cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; } spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ } else if (rc == IOCB_BUSY) { rc = -EAGAIN; } else { rc = -EIO; } /* I/O issue failed. Cleanup resources. */ linkdown_err: lpfc_els_free_iocb(phba, cmdiocbq); release_ndlp: lpfc_nlp_put(ndlp); free_dd_data: kfree(dd_data); no_dd_data: /* make error code available to userspace */ bsg_reply->result = rc; job->dd_data = NULL; return rc; } /** * lpfc_bsg_event_free - frees an allocated event structure * @kref: Pointer to a kref. * * Called from kref_put. Back cast the kref into an event structure address. * Free any events to get, delete associated nodes, free any events to see, * free any data then free the event itself. **/ static void lpfc_bsg_event_free(struct kref *kref) { struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, kref); struct event_data *ed; list_del(&evt->node); while (!list_empty(&evt->events_to_get)) { ed = list_entry(evt->events_to_get.next, typeof(*ed), node); list_del(&ed->node); kfree(ed->data); kfree(ed); } while (!list_empty(&evt->events_to_see)) { ed = list_entry(evt->events_to_see.next, typeof(*ed), node); list_del(&ed->node); kfree(ed->data); kfree(ed); } kfree(evt->dd_data); kfree(evt); } /** * lpfc_bsg_event_ref - increments the kref for an event * @evt: Pointer to an event structure. **/ static inline void lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) { kref_get(&evt->kref); } /** * lpfc_bsg_event_unref - Uses kref_put to free an event structure * @evt: Pointer to an event structure. **/ static inline void lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) { kref_put(&evt->kref, lpfc_bsg_event_free); } /** * lpfc_bsg_event_new - allocate and initialize a event structure * @ev_mask: Mask of events. * @ev_reg_id: Event reg id. * @ev_req_id: Event request id. **/ static struct lpfc_bsg_event * lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) { struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); if (!evt) return NULL; INIT_LIST_HEAD(&evt->events_to_get); INIT_LIST_HEAD(&evt->events_to_see); evt->type_mask = ev_mask; evt->req_id = ev_req_id; evt->reg_id = ev_reg_id; evt->wait_time_stamp = jiffies; evt->dd_data = NULL; init_waitqueue_head(&evt->wq); kref_init(&evt->kref); return evt; } /** * diag_cmd_data_free - Frees an lpfc dma buffer extension * @phba: Pointer to HBA context object. * @mlist: Pointer to an lpfc dma buffer extension. **/ static int diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) { struct lpfc_dmabufext *mlast; struct pci_dev *pcidev; struct list_head head, *curr, *next; if ((!mlist) || (!lpfc_is_link_up(phba) && (phba->link_flag & LS_LOOPBACK_MODE))) { return 0; } pcidev = phba->pcidev; list_add_tail(&head, &mlist->dma.list); list_for_each_safe(curr, next, &head) { mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); if (mlast->dma.virt) dma_free_coherent(&pcidev->dev, mlast->size, mlast->dma.virt, mlast->dma.phys); kfree(mlast); } return 0; } /* * lpfc_bsg_ct_unsol_event - process an unsolicited CT command * * This function is called when an unsolicited CT command is received. It * forwards the event to any processes registered to receive CT events. **/ int lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocbq) { uint32_t evt_req_id = 0; u16 cmd; struct lpfc_dmabuf *dmabuf = NULL; struct lpfc_bsg_event *evt; struct event_data *evt_dat = NULL; struct lpfc_iocbq *iocbq; IOCB_t *iocb = NULL; size_t offset = 0; struct list_head head; struct ulp_bde64 *bde; dma_addr_t dma_addr; int i; struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf; struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf; struct lpfc_sli_ct_request *ct_req; struct bsg_job *job = NULL; struct fc_bsg_reply *bsg_reply; struct bsg_job_data *dd_data = NULL; unsigned long flags; int size = 0; u32 bde_count = 0; INIT_LIST_HEAD(&head); list_add_tail(&head, &piocbq->list); ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt; evt_req_id = ct_req->FsType; cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); spin_lock_irqsave(&phba->ct_ev_lock, flags); list_for_each_entry(evt, &phba->ct_ev_waiters, node) { if (!(evt->type_mask & FC_REG_CT_EVENT) || evt->req_id != evt_req_id) continue; lpfc_bsg_event_ref(evt); spin_unlock_irqrestore(&phba->ct_ev_lock, flags); evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); if (evt_dat == NULL) { spin_lock_irqsave(&phba->ct_ev_lock, flags); lpfc_bsg_event_unref(evt); lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2614 Memory allocation failed for " "CT event\n"); break; } if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { /* take accumulated byte count from the last iocbq */ iocbq = list_entry(head.prev, typeof(*iocbq), list); if (phba->sli_rev == LPFC_SLI_REV4) evt_dat->len = iocbq->wcqe_cmpl.total_data_placed; else evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; } else { list_for_each_entry(iocbq, &head, list) { iocb = &iocbq->iocb; for (i = 0; i < iocb->ulpBdeCount; i++) evt_dat->len += iocb->un.cont64[i].tus.f.bdeSize; } } evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); if (evt_dat->data == NULL) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2615 Memory allocation failed for " "CT event data, size %d\n", evt_dat->len); kfree(evt_dat); spin_lock_irqsave(&phba->ct_ev_lock, flags); lpfc_bsg_event_unref(evt); spin_unlock_irqrestore(&phba->ct_ev_lock, flags); goto error_ct_unsol_exit; } list_for_each_entry(iocbq, &head, list) { size = 0; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { bdeBuf1 = iocbq->cmd_dmabuf; bdeBuf2 = iocbq->bpl_dmabuf; } if (phba->sli_rev == LPFC_SLI_REV4) bde_count = iocbq->wcqe_cmpl.word3; else bde_count = iocbq->iocb.ulpBdeCount; for (i = 0; i < bde_count; i++) { if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { if (i == 0) { size = iocbq->wqe.gen_req.bde.tus.f.bdeSize; dmabuf = bdeBuf1; } else if (i == 1) { size = iocbq->unsol_rcv_len; dmabuf = bdeBuf2; } if ((offset + size) > evt_dat->len) size = evt_dat->len - offset; } else { size = iocbq->iocb.un.cont64[i]. tus.f.bdeSize; bde = &iocbq->iocb.un.cont64[i]; dma_addr = getPaddr(bde->addrHigh, bde->addrLow); dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); } if (!dmabuf) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2616 No dmabuf " "found for iocbq x%px\n", iocbq); kfree(evt_dat->data); kfree(evt_dat); spin_lock_irqsave(&phba->ct_ev_lock, flags); lpfc_bsg_event_unref(evt); spin_unlock_irqrestore( &phba->ct_ev_lock, flags); goto error_ct_unsol_exit; } memcpy((char *)(evt_dat->data) + offset, dmabuf->virt, size); offset += size; if (evt_req_id != SLI_CT_ELX_LOOPBACK && !(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); } else { switch (cmd) { case ELX_LOOPBACK_DATA: if (phba->sli_rev < LPFC_SLI_REV4) diag_cmd_data_free(phba, (struct lpfc_dmabufext *)dmabuf); break; case ELX_LOOPBACK_XRI_SETUP: if ((phba->sli_rev == LPFC_SLI_REV2) || (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED )) { lpfc_in_buf_free(phba, dmabuf); } else { lpfc_sli3_post_buffer(phba, pring, 1); } break; default: if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) lpfc_sli3_post_buffer(phba, pring, 1); break; } } } } spin_lock_irqsave(&phba->ct_ev_lock, flags); if (phba->sli_rev == LPFC_SLI_REV4) { evt_dat->immed_dat = phba->ctx_idx; phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX; /* Provide warning for over-run of the ct_ctx array */ if (phba->ct_ctx[evt_dat->immed_dat].valid == UNSOL_VALID) lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, "2717 CT context array entry " "[%d] over-run: oxid:x%x, " "sid:x%x\n", phba->ctx_idx, phba->ct_ctx[ evt_dat->immed_dat].oxid, phba->ct_ctx[ evt_dat->immed_dat].SID); phba->ct_ctx[evt_dat->immed_dat].rxid = get_job_ulpcontext(phba, piocbq); phba->ct_ctx[evt_dat->immed_dat].oxid = get_job_rcvoxid(phba, piocbq); phba->ct_ctx[evt_dat->immed_dat].SID = bf_get(wqe_els_did, &piocbq->wqe.xmit_els_rsp.wqe_dest); phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID; } else evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq); evt_dat->type = FC_REG_CT_EVENT; list_add(&evt_dat->node, &evt->events_to_see); if (evt_req_id == SLI_CT_ELX_LOOPBACK) { wake_up_interruptible(&evt->wq); lpfc_bsg_event_unref(evt); break; } list_move(evt->events_to_see.prev, &evt->events_to_get); dd_data = (struct bsg_job_data *)evt->dd_data; job = dd_data->set_job; dd_data->set_job = NULL; lpfc_bsg_event_unref(evt); if (job) { bsg_reply = job->reply; bsg_reply->reply_payload_rcv_len = size; /* make error code available to userspace */ bsg_reply->result = 0; job->dd_data = NULL; /* complete the job back to userspace */ spin_unlock_irqrestore(&phba->ct_ev_lock, flags); bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); spin_lock_irqsave(&phba->ct_ev_lock, flags); } } spin_unlock_irqrestore(&phba->ct_ev_lock, flags); error_ct_unsol_exit: if (!list_empty(&head)) list_del(&head); if ((phba->sli_rev < LPFC_SLI_REV4) && (evt_req_id == SLI_CT_ELX_LOOPBACK)) return 0; return 1; } /** * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane * @phba: Pointer to HBA context object. * @dmabuf: pointer to a dmabuf that describes the FC sequence * * This function handles abort to the CT command toward management plane * for SLI4 port. * * If the pending context of a CT command to management plane present, clears * such context and returns 1 for handled; otherwise, it returns 0 indicating * no context exists. **/ int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) { struct fc_frame_header fc_hdr; struct fc_frame_header *fc_hdr_ptr = &fc_hdr; int ctx_idx, handled = 0; uint16_t oxid, rxid; uint32_t sid; memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); sid = sli4_sid_from_fc_hdr(fc_hdr_ptr); oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id); rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id); for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) { if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID) continue; if (phba->ct_ctx[ctx_idx].rxid != rxid) continue; if (phba->ct_ctx[ctx_idx].oxid != oxid) continue; if (phba->ct_ctx[ctx_idx].SID != sid) continue; phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID; handled = 1; } return handled; } /** * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command * @job: SET_EVENT fc_bsg_job **/ static int lpfc_bsg_hba_set_event(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; struct fc_bsg_request *bsg_request = job->request; struct set_ct_event *event_req; struct lpfc_bsg_event *evt; int rc = 0; struct bsg_job_data *dd_data = NULL; uint32_t ev_mask; unsigned long flags; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2612 Received SET_CT_EVENT below minimum " "size\n"); rc = -EINVAL; goto job_error; } event_req = (struct set_ct_event *) bsg_request->rqst_data.h_vendor.vendor_cmd; ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & FC_REG_EVENT_MASK); spin_lock_irqsave(&phba->ct_ev_lock, flags); list_for_each_entry(evt, &phba->ct_ev_waiters, node) { if (evt->reg_id == event_req->ev_reg_id) { lpfc_bsg_event_ref(evt); evt->wait_time_stamp = jiffies; dd_data = (struct bsg_job_data *)evt->dd_data; break; } } spin_unlock_irqrestore(&phba->ct_ev_lock, flags); if (&evt->node == &phba->ct_ev_waiters) { /* no event waiting struct yet - first call */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); if (dd_data == NULL) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2734 Failed allocation of dd_data\n"); rc = -ENOMEM; goto job_error; } evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, event_req->ev_req_id); if (!evt) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2617 Failed allocation of event " "waiter\n"); rc = -ENOMEM; goto job_error; } dd_data->type = TYPE_EVT; dd_data->set_job = NULL; dd_data->context_un.evt = evt; evt->dd_data = (void *)dd_data; spin_lock_irqsave(&phba->ct_ev_lock, flags); list_add(&evt->node, &phba->ct_ev_waiters); lpfc_bsg_event_ref(evt); evt->wait_time_stamp = jiffies; spin_unlock_irqrestore(&phba->ct_ev_lock, flags); } spin_lock_irqsave(&phba->ct_ev_lock, flags); evt->waiting = 1; dd_data->set_job = job; /* for unsolicited command */ job->dd_data = dd_data; /* for fc transport timeout callback*/ spin_unlock_irqrestore(&phba->ct_ev_lock, flags); return 0; /* call job done later */ job_error: kfree(dd_data); job->dd_data = NULL; return rc; } /** * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command * @job: GET_EVENT fc_bsg_job **/ static int lpfc_bsg_hba_get_event(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; struct get_ct_event *event_req; struct get_ct_event_reply *event_reply; struct lpfc_bsg_event *evt, *evt_next; struct event_data *evt_dat = NULL; unsigned long flags; uint32_t rc = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2613 Received GET_CT_EVENT request below " "minimum size\n"); rc = -EINVAL; goto job_error; } event_req = (struct get_ct_event *) bsg_request->rqst_data.h_vendor.vendor_cmd; event_reply = (struct get_ct_event_reply *) bsg_reply->reply_data.vendor_reply.vendor_rsp; spin_lock_irqsave(&phba->ct_ev_lock, flags); list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { if (evt->reg_id == event_req->ev_reg_id) { if (list_empty(&evt->events_to_get)) break; lpfc_bsg_event_ref(evt); evt->wait_time_stamp = jiffies; evt_dat = list_entry(evt->events_to_get.prev, struct event_data, node); list_del(&evt_dat->node); break; } } spin_unlock_irqrestore(&phba->ct_ev_lock, flags); /* The app may continue to ask for event data until it gets * an error indicating that there isn't anymore */ if (evt_dat == NULL) { bsg_reply->reply_payload_rcv_len = 0; rc = -ENOENT; goto job_error; } if (evt_dat->len > job->request_payload.payload_len) { evt_dat->len = job->request_payload.payload_len; lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2618 Truncated event data at %d " "bytes\n", job->request_payload.payload_len); } event_reply->type = evt_dat->type; event_reply->immed_data = evt_dat->immed_dat; if (evt_dat->len > 0) bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, evt_dat->data, evt_dat->len); else bsg_reply->reply_payload_rcv_len = 0; if (evt_dat) { kfree(evt_dat->data); kfree(evt_dat); } spin_lock_irqsave(&phba->ct_ev_lock, flags); lpfc_bsg_event_unref(evt); spin_unlock_irqrestore(&phba->ct_ev_lock, flags); job->dd_data = NULL; bsg_reply->result = 0; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return 0; job_error: job->dd_data = NULL; bsg_reply->result = rc; return rc; } /** * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler * @phba: Pointer to HBA context object. * @cmdiocbq: Pointer to command iocb. * @rspiocbq: Pointer to response iocb. * * This function is the completion handler for iocbs issued using * lpfc_issue_ct_rsp_cmp function. This function is called by the * ring event handler function without any lock held. This function * can be called from both worker thread context and interrupt * context. This function also can be called from other thread which * cleans up the SLI layer objects. * This function copy the contents of the response iocb to the * response iocb memory object provided by the caller of * lpfc_sli_issue_iocb_wait and then wakes up the thread which * sleeps for the iocb completion. **/ static void lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, struct lpfc_iocbq *rspiocbq) { struct bsg_job_data *dd_data; struct bsg_job *job; struct fc_bsg_reply *bsg_reply; struct lpfc_dmabuf *bmp, *cmp; struct lpfc_nodelist *ndlp; unsigned long flags; int rc = 0; u32 ulp_status, ulp_word4; dd_data = cmdiocbq->context_un.dd_data; /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); job = dd_data->set_job; if (job) { /* Prevent timeout handling from trying to abort job */ job->dd_data = NULL; } spin_unlock_irqrestore(&phba->ct_ev_lock, flags); /* Close the timeout handler abort window */ spin_lock_irqsave(&phba->hbalock, flags); cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; spin_unlock_irqrestore(&phba->hbalock, flags); ndlp = dd_data->context_un.iocb.ndlp; cmp = cmdiocbq->cmd_dmabuf; bmp = cmdiocbq->bpl_dmabuf; ulp_status = get_job_ulpstatus(phba, rspiocbq); ulp_word4 = get_job_word4(phba, rspiocbq); /* Copy the completed job data or set the error status */ if (job) { bsg_reply = job->reply; if (ulp_status) { if (ulp_status == IOSTAT_LOCAL_REJECT) { switch (ulp_word4 & IOERR_PARAM_MASK) { case IOERR_SEQUENCE_TIMEOUT: rc = -ETIMEDOUT; break; case IOERR_INVALID_RPI: rc = -EFAULT; break; default: rc = -EACCES; break; } } else { rc = -EACCES; } } else { bsg_reply->reply_payload_rcv_len = 0; } } lpfc_free_bsg_buffers(phba, cmp); lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); lpfc_sli_release_iocbq(phba, cmdiocbq); lpfc_nlp_put(ndlp); kfree(dd_data); /* Complete the job if the job is still active */ if (job) { bsg_reply->result = rc; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } return; } /** * lpfc_issue_ct_rsp - issue a ct response * @phba: Pointer to HBA context object. * @job: Pointer to the job object. * @tag: tag index value into the ports context exchange array. * @cmp: Pointer to a cmp dma buffer descriptor. * @bmp: Pointer to a bmp dma buffer descriptor. * @num_entry: Number of enties in the bde. **/ static int lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, int num_entry) { struct lpfc_iocbq *ctiocb = NULL; int rc = 0; struct lpfc_nodelist *ndlp = NULL; struct bsg_job_data *dd_data; unsigned long flags; uint32_t creg_val; u16 ulp_context, iotag; ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); if (!ndlp) { lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, "2721 ndlp null for oxid %x SID %x\n", phba->ct_ctx[tag].rxid, phba->ct_ctx[tag].SID); return IOCB_ERROR; } /* allocate our bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); if (!dd_data) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2736 Failed allocation of dd_data\n"); rc = -ENOMEM; goto no_dd_data; } /* Allocate buffer for command iocb */ ctiocb = lpfc_sli_get_iocbq(phba); if (!ctiocb) { rc = -ENOMEM; goto no_ctiocb; } if (phba->sli_rev == LPFC_SLI_REV4) { /* Do not issue unsol response if oxid not marked as valid */ if (phba->ct_ctx[tag].valid != UNSOL_VALID) { rc = IOCB_ERROR; goto issue_ct_rsp_exit; } lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], phba->ct_ctx[tag].oxid, num_entry, FC_RCTL_DD_SOL_CTL, 1, CMD_XMIT_SEQUENCE64_WQE); /* The exchange is done, mark the entry as invalid */ phba->ct_ctx[tag].valid = UNSOL_INVALID; iotag = get_wqe_reqtag(ctiocb); } else { lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry, FC_RCTL_DD_SOL_CTL, 1, CMD_XMIT_SEQUENCE64_CX); ctiocb->num_bdes = num_entry; iotag = ctiocb->iocb.ulpIoTag; } ulp_context = get_job_ulpcontext(phba, ctiocb); /* Xmit CT response on exchange <xid> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", ulp_context, iotag, tag, phba->link_state); ctiocb->cmd_flag |= LPFC_IO_LIBDFC; ctiocb->vport = phba->pport; ctiocb->context_un.dd_data = dd_data; ctiocb->cmd_dmabuf = cmp; ctiocb->bpl_dmabuf = bmp; ctiocb->ndlp = ndlp; ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp; dd_data->type = TYPE_IOCB; dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = ctiocb; dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp); if (!dd_data->context_un.iocb.ndlp) { rc = -IOCB_ERROR; goto issue_ct_rsp_exit; } dd_data->context_un.iocb.rmp = NULL; job->dd_data = dd_data; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) { rc = -IOCB_ERROR; goto issue_ct_rsp_exit; } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); if (rc == IOCB_SUCCESS) { spin_lock_irqsave(&phba->hbalock, flags); /* make sure the I/O had not been completed/released */ if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) { /* open up abort window to timeout handler */ ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; } spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ } /* iocb failed so cleanup */ job->dd_data = NULL; lpfc_nlp_put(ndlp); issue_ct_rsp_exit: lpfc_sli_release_iocbq(phba, ctiocb); no_ctiocb: kfree(dd_data); no_dd_data: return rc; } /** * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command * @job: SEND_MGMT_RESP fc_bsg_job **/ static int lpfc_bsg_send_mgmt_rsp(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) bsg_request->rqst_data.h_vendor.vendor_cmd; struct ulp_bde64 *bpl; struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; int bpl_entries; uint32_t tag = mgmt_resp->tag; unsigned long reqbfrcnt = (unsigned long)job->request_payload.payload_len; int rc = 0; /* in case no data is transferred */ bsg_reply->reply_payload_rcv_len = 0; if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { rc = -ERANGE; goto send_mgmt_rsp_exit; } bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!bmp) { rc = -ENOMEM; goto send_mgmt_rsp_exit; } bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); if (!bmp->virt) { rc = -ENOMEM; goto send_mgmt_rsp_free_bmp; } INIT_LIST_HEAD(&bmp->list); bpl = (struct ulp_bde64 *) bmp->virt; bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64)); cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, 1, bpl, &bpl_entries); if (!cmp) { rc = -ENOMEM; goto send_mgmt_rsp_free_bmp; } lpfc_bsg_copy_data(cmp, &job->request_payload, job->request_payload.payload_len, 1); rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries); if (rc == IOCB_SUCCESS) return 0; /* done for now */ rc = -EACCES; lpfc_free_bsg_buffers(phba, cmp); send_mgmt_rsp_free_bmp: if (bmp->virt) lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); send_mgmt_rsp_exit: /* make error code available to userspace */ bsg_reply->result = rc; job->dd_data = NULL; return rc; } /** * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode * @phba: Pointer to HBA context object. * * This function is responsible for preparing driver for diag loopback * on device. */ static int lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct Scsi_Host *shost; struct lpfc_sli *psli; struct lpfc_queue *qp = NULL; struct lpfc_sli_ring *pring; int i = 0; psli = &phba->sli; if (!psli) return -ENODEV; if ((phba->link_state == LPFC_HBA_ERROR) || (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || (!(psli->sli_flag & LPFC_SLI_ACTIVE))) return -EACCES; vports = lpfc_create_vport_work_array(phba); if (vports) { for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); scsi_block_requests(shost); } lpfc_destroy_vport_work_array(phba, vports); } else { shost = lpfc_shost_from_vport(phba->pport); scsi_block_requests(shost); } if (phba->sli_rev != LPFC_SLI_REV4) { pring = &psli->sli3_ring[LPFC_FCP_RING]; lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock); return 0; } list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { pring = qp->pring; if (!pring || (pring->ringno != LPFC_FCP_RING)) continue; if (!lpfc_emptyq_wait(phba, &pring->txcmplq, &pring->ring_lock)) break; } return 0; } /** * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode * @phba: Pointer to HBA context object. * * This function is responsible for driver exit processing of setting up * diag loopback mode on device. */ static void lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba) { struct Scsi_Host *shost; struct lpfc_vport **vports; int i; vports = lpfc_create_vport_work_array(phba); if (vports) { for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); scsi_unblock_requests(shost); } lpfc_destroy_vport_work_array(phba, vports); } else { shost = lpfc_shost_from_vport(phba->pport); scsi_unblock_requests(shost); } return; } /** * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command * @phba: Pointer to HBA context object. * @job: LPFC_BSG_VENDOR_DIAG_MODE * * This function is responsible for placing an sli3 port into diagnostic * loopback mode in order to perform a diagnostic loopback test. * All new scsi requests are blocked, a small delay is used to allow the * scsi requests to complete then the link is brought down. If the link is * is placed in loopback mode then scsi requests are again allowed * so the scsi mid-layer doesn't give up on the port. * All of this is done in-line. */ static int lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) { struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; struct diag_mode_set *loopback_mode; uint32_t link_flags; uint32_t timeout; LPFC_MBOXQ_t *pmboxq = NULL; int mbxstatus = MBX_SUCCESS; int i = 0; int rc = 0; /* no data to return just the return code */ bsg_reply->reply_payload_rcv_len = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2738 Received DIAG MODE request size:%d " "below the minimum size:%d\n", job->request_len, (int)(sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set))); rc = -EINVAL; goto job_error; } rc = lpfc_bsg_diag_mode_enter(phba); if (rc) goto job_error; /* bring the link to diagnostic mode */ loopback_mode = (struct diag_mode_set *) bsg_request->rqst_data.h_vendor.vendor_cmd; link_flags = loopback_mode->type; timeout = loopback_mode->timeout * 100; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { rc = -ENOMEM; goto loopback_mode_exit; } memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; pmboxq->u.mb.mbxOwner = OWN_HOST; mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { /* wait for link down before proceeding */ i = 0; while (phba->link_state != LPFC_LINK_DOWN) { if (i++ > timeout) { rc = -ETIMEDOUT; goto loopback_mode_exit; } msleep(10); } memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); if (link_flags == INTERNAL_LOOP_BACK) pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; else pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; pmboxq->u.mb.mbxOwner = OWN_HOST; mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) rc = -ENODEV; else { spin_lock_irq(&phba->hbalock); phba->link_flag |= LS_LOOPBACK_MODE; spin_unlock_irq(&phba->hbalock); /* wait for the link attention interrupt */ msleep(100); i = 0; while (phba->link_state != LPFC_HBA_READY) { if (i++ > timeout) { rc = -ETIMEDOUT; break; } msleep(10); } } } else rc = -ENODEV; loopback_mode_exit: lpfc_bsg_diag_mode_exit(phba); /* * Let SLI layer release mboxq if mbox command completed after timeout. */ if (pmboxq && mbxstatus != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); job_error: /* make error code available to userspace */ bsg_reply->result = rc; /* complete the job back to userspace if no error */ if (rc == 0) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } /** * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state * @phba: Pointer to HBA context object. * @diag: Flag for set link to diag or nomral operation state. * * This function is responsible for issuing a sli4 mailbox command for setting * link to either diag state or normal operation state. */ static int lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag) { LPFC_MBOXQ_t *pmboxq; struct lpfc_mbx_set_link_diag_state *link_diag_state; uint32_t req_len, alloc_len; int mbxstatus = MBX_SUCCESS, rc; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) return -ENOMEM; req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - sizeof(struct lpfc_sli4_cfg_mhdr)); alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, req_len, LPFC_SLI4_MBX_EMBED); if (alloc_len != req_len) { rc = -ENOMEM; goto link_diag_state_set_out; } lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3128 Set link to diagnostic state:x%x (x%x/x%x)\n", diag, phba->sli4_hba.lnk_info.lnk_tp, phba->sli4_hba.lnk_info.lnk_no); link_diag_state = &pmboxq->u.mqe.un.link_diag_state; bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req, LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE); bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, phba->sli4_hba.lnk_info.lnk_no); bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, phba->sli4_hba.lnk_info.lnk_tp); if (diag) bf_set(lpfc_mbx_set_diag_state_diag, &link_diag_state->u.req, 1); else bf_set(lpfc_mbx_set_diag_state_diag, &link_diag_state->u.req, 0); mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) rc = 0; else rc = -ENODEV; link_diag_state_set_out: if (pmboxq && (mbxstatus != MBX_TIMEOUT)) mempool_free(pmboxq, phba->mbox_mem_pool); return rc; } /** * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic * @phba: Pointer to HBA context object. * @mode: loopback mode to set * @link_no: link number for loopback mode to set * * This function is responsible for issuing a sli4 mailbox command for setting * up loopback diagnostic for a link. */ static int lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode, uint32_t link_no) { LPFC_MBOXQ_t *pmboxq; uint32_t req_len, alloc_len; struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; int mbxstatus = MBX_SUCCESS, rc = 0; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) return -ENOMEM; req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) - sizeof(struct lpfc_sli4_cfg_mhdr)); alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK, req_len, LPFC_SLI4_MBX_EMBED); if (alloc_len != req_len) { mempool_free(pmboxq, phba->mbox_mem_pool); return -ENOMEM; } link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback; bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_loopback->u.req, link_no); if (phba->sli4_hba.conf_trunk & (1 << link_no)) { bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED); } else { bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp); } bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, mode); mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "3127 Failed setup loopback mode mailbox " "command, rc:x%x, status:x%x\n", mbxstatus, pmboxq->u.mb.mbxStatus); rc = -ENODEV; } if (pmboxq && (mbxstatus != MBX_TIMEOUT)) mempool_free(pmboxq, phba->mbox_mem_pool); return rc; } /** * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic * @phba: Pointer to HBA context object. * * This function set up SLI4 FC port registrations for diagnostic run, which * includes all the rpis, vfi, and also vpi. */ static int lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) { if (phba->pport->fc_flag & FC_VFI_REGISTERED) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "3136 Port still had vfi registered: " "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n", phba->pport->fc_myDID, phba->fcf.fcfi, phba->sli4_hba.vfi_ids[phba->pport->vfi], phba->vpi_ids[phba->pport->vpi]); return -EINVAL; } return lpfc_issue_reg_vfi(phba->pport); } /** * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command * @phba: Pointer to HBA context object. * @job: LPFC_BSG_VENDOR_DIAG_MODE * * This function is responsible for placing an sli4 port into diagnostic * loopback mode in order to perform a diagnostic loopback test. */ static int lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) { struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; struct diag_mode_set *loopback_mode; uint32_t link_flags, timeout, link_no; int i, rc = 0; /* no data to return just the return code */ bsg_reply->reply_payload_rcv_len = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "3011 Received DIAG MODE request size:%d " "below the minimum size:%d\n", job->request_len, (int)(sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set))); rc = -EINVAL; goto job_done; } loopback_mode = (struct diag_mode_set *) bsg_request->rqst_data.h_vendor.vendor_cmd; link_flags = loopback_mode->type; timeout = loopback_mode->timeout * 100; if (loopback_mode->physical_link == -1) link_no = phba->sli4_hba.lnk_info.lnk_no; else link_no = loopback_mode->physical_link; if (link_flags == DISABLE_LOOP_BACK) { rc = lpfc_sli4_bsg_set_loopback_mode(phba, LPFC_DIAG_LOOPBACK_TYPE_DISABLE, link_no); if (!rc) { /* Unset the need disable bit */ phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4); } goto job_done; } else { /* Check if we need to disable the loopback state */ if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) { rc = -EPERM; goto job_done; } } rc = lpfc_bsg_diag_mode_enter(phba); if (rc) goto job_done; /* indicate we are in loobpack diagnostic mode */ spin_lock_irq(&phba->hbalock); phba->link_flag |= LS_LOOPBACK_MODE; spin_unlock_irq(&phba->hbalock); /* reset port to start frome scratch */ rc = lpfc_selective_reset(phba); if (rc) goto job_done; /* bring the link to diagnostic mode */ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3129 Bring link to diagnostic state.\n"); rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "3130 Failed to bring link to diagnostic " "state, rc:x%x\n", rc); goto loopback_mode_exit; } /* wait for link down before proceeding */ i = 0; while (phba->link_state != LPFC_LINK_DOWN) { if (i++ > timeout) { rc = -ETIMEDOUT; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3131 Timeout waiting for link to " "diagnostic mode, timeout:%d ms\n", timeout * 10); goto loopback_mode_exit; } msleep(10); } /* set up loopback mode */ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3132 Set up loopback mode:x%x\n", link_flags); switch (link_flags) { case INTERNAL_LOOP_BACK: if (phba->sli4_hba.conf_trunk & (1 << link_no)) { rc = lpfc_sli4_bsg_set_loopback_mode(phba, LPFC_DIAG_LOOPBACK_TYPE_INTERNAL, link_no); } else { /* Trunk is configured, but link is not in this trunk */ if (phba->sli4_hba.conf_trunk) { rc = -ELNRNG; goto loopback_mode_exit; } rc = lpfc_sli4_bsg_set_loopback_mode(phba, LPFC_DIAG_LOOPBACK_TYPE_INTERNAL, link_no); } if (!rc) { /* Set the need disable bit */ phba->sli4_hba.conf_trunk |= (1 << link_no) << 4; } break; case EXTERNAL_LOOP_BACK: if (phba->sli4_hba.conf_trunk & (1 << link_no)) { rc = lpfc_sli4_bsg_set_loopback_mode(phba, LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED, link_no); } else { /* Trunk is configured, but link is not in this trunk */ if (phba->sli4_hba.conf_trunk) { rc = -ELNRNG; goto loopback_mode_exit; } rc = lpfc_sli4_bsg_set_loopback_mode(phba, LPFC_DIAG_LOOPBACK_TYPE_SERDES, link_no); } if (!rc) { /* Set the need disable bit */ phba->sli4_hba.conf_trunk |= (1 << link_no) << 4; } break; default: rc = -EINVAL; lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "3141 Loopback mode:x%x not supported\n", link_flags); goto loopback_mode_exit; } if (!rc) { /* wait for the link attention interrupt */ msleep(100); i = 0; while (phba->link_state < LPFC_LINK_UP) { if (i++ > timeout) { rc = -ETIMEDOUT; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3137 Timeout waiting for link up " "in loopback mode, timeout:%d ms\n", timeout * 10); break; } msleep(10); } } /* port resource registration setup for loopback diagnostic */ if (!rc) { /* set up a none zero myDID for loopback test */ phba->pport->fc_myDID = 1; rc = lpfc_sli4_diag_fcport_reg_setup(phba); } else goto loopback_mode_exit; if (!rc) { /* wait for the port ready */ msleep(100); i = 0; while (phba->link_state != LPFC_HBA_READY) { if (i++ > timeout) { rc = -ETIMEDOUT; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3133 Timeout waiting for port " "loopback mode ready, timeout:%d ms\n", timeout * 10); break; } msleep(10); } } loopback_mode_exit: /* clear loopback diagnostic mode */ if (rc) { spin_lock_irq(&phba->hbalock); phba->link_flag &= ~LS_LOOPBACK_MODE; spin_unlock_irq(&phba->hbalock); } lpfc_bsg_diag_mode_exit(phba); job_done: /* make error code available to userspace */ bsg_reply->result = rc; /* complete the job back to userspace if no error */ if (rc == 0) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } /** * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode * @job: LPFC_BSG_VENDOR_DIAG_MODE * * This function is responsible for responding to check and dispatch bsg diag * command from the user to proper driver action routines. */ static int lpfc_bsg_diag_loopback_mode(struct bsg_job *job) { struct Scsi_Host *shost; struct lpfc_vport *vport; struct lpfc_hba *phba; int rc; shost = fc_bsg_to_shost(job); if (!shost) return -ENODEV; vport = shost_priv(shost); if (!vport) return -ENODEV; phba = vport->phba; if (!phba) return -ENODEV; if (phba->sli_rev < LPFC_SLI_REV4) rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job); else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job); else rc = -ENODEV; return rc; } /** * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode * @job: LPFC_BSG_VENDOR_DIAG_MODE_END * * This function is responsible for responding to check and dispatch bsg diag * command from the user to proper driver action routines. */ static int lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job) { struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; struct Scsi_Host *shost; struct lpfc_vport *vport; struct lpfc_hba *phba; struct diag_mode_set *loopback_mode_end_cmd; uint32_t timeout; int rc, i; shost = fc_bsg_to_shost(job); if (!shost) return -ENODEV; vport = shost_priv(shost); if (!vport) return -ENODEV; phba = vport->phba; if (!phba) return -ENODEV; if (phba->sli_rev < LPFC_SLI_REV4) return -ENODEV; if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2) return -ENODEV; /* clear loopback diagnostic mode */ spin_lock_irq(&phba->hbalock); phba->link_flag &= ~LS_LOOPBACK_MODE; spin_unlock_irq(&phba->hbalock); loopback_mode_end_cmd = (struct diag_mode_set *) bsg_request->rqst_data.h_vendor.vendor_cmd; timeout = loopback_mode_end_cmd->timeout * 100; rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "3139 Failed to bring link to diagnostic " "state, rc:x%x\n", rc); goto loopback_mode_end_exit; } /* wait for link down before proceeding */ i = 0; while (phba->link_state != LPFC_LINK_DOWN) { if (i++ > timeout) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3140 Timeout waiting for link to " "diagnostic mode_end, timeout:%d ms\n", timeout * 10); /* there is nothing much we can do here */ break; } msleep(10); } /* reset port resource registrations */ rc = lpfc_selective_reset(phba); phba->pport->fc_myDID = 0; loopback_mode_end_exit: /* make return code available to userspace */ bsg_reply->result = rc; /* complete the job back to userspace if no error */ if (rc == 0) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } /** * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST * * This function is to perform SLI4 diag link test request from the user * applicaiton. */ static int lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) { struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; struct Scsi_Host *shost; struct lpfc_vport *vport; struct lpfc_hba *phba; LPFC_MBOXQ_t *pmboxq; struct sli4_link_diag *link_diag_test_cmd; uint32_t req_len, alloc_len; struct lpfc_mbx_run_link_diag_test *run_link_diag_test; union lpfc_sli4_cfg_shdr *shdr; uint32_t shdr_status, shdr_add_status; struct diag_status *diag_status_reply; int mbxstatus, rc = -ENODEV, rc1 = 0; shost = fc_bsg_to_shost(job); if (!shost) goto job_error; vport = shost_priv(shost); if (!vport) goto job_error; phba = vport->phba; if (!phba) goto job_error; if (phba->sli_rev < LPFC_SLI_REV4) goto job_error; if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2) goto job_error; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct sli4_link_diag)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "3013 Received LINK DIAG TEST request " " size:%d below the minimum size:%d\n", job->request_len, (int)(sizeof(struct fc_bsg_request) + sizeof(struct sli4_link_diag))); rc = -EINVAL; goto job_error; } rc = lpfc_bsg_diag_mode_enter(phba); if (rc) goto job_error; link_diag_test_cmd = (struct sli4_link_diag *) bsg_request->rqst_data.h_vendor.vendor_cmd; rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); if (rc) goto job_error; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { rc = -ENOMEM; goto link_diag_test_exit; } req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - sizeof(struct lpfc_sli4_cfg_mhdr)); alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, req_len, LPFC_SLI4_MBX_EMBED); if (alloc_len != req_len) { rc = -ENOMEM; goto link_diag_test_exit; } run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, phba->sli4_hba.lnk_info.lnk_no); bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req, phba->sli4_hba.lnk_info.lnk_tp); bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req, link_diag_test_cmd->test_id); bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req, link_diag_test_cmd->loops); bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req, link_diag_test_cmd->test_version); bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req, link_diag_test_cmd->error_action); mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || mbxstatus) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "3010 Run link diag test mailbox failed with " "mbx_status x%x status x%x, add_status x%x\n", mbxstatus, shdr_status, shdr_add_status); } diag_status_reply = (struct diag_status *) bsg_reply->reply_data.vendor_reply.vendor_rsp; if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "3012 Received Run link diag test reply " "below minimum size (%d): reply_len:%d\n", (int)(sizeof(*bsg_reply) + sizeof(*diag_status_reply)), job->reply_len); rc = -EINVAL; goto job_error; } diag_status_reply->mbox_status = mbxstatus; diag_status_reply->shdr_status = shdr_status; diag_status_reply->shdr_add_status = shdr_add_status; link_diag_test_exit: rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0); if (pmboxq) mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_diag_mode_exit(phba); job_error: /* make error code available to userspace */ if (rc1 && !rc) rc = rc1; bsg_reply->result = rc; /* complete the job back to userspace if no error */ if (rc == 0) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } /** * lpfcdiag_loop_self_reg - obtains a remote port login id * @phba: Pointer to HBA context object * @rpi: Pointer to a remote port login id * * This function obtains a remote port login id so the diag loopback test * can send and receive its own unsolicited CT command. **/ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) { LPFC_MBOXQ_t *mbox; struct lpfc_dmabuf *dmabuff; int status; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; if (phba->sli_rev < LPFC_SLI_REV4) status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi); else { *rpi = lpfc_sli4_alloc_rpi(phba); if (*rpi == LPFC_RPI_ALLOC_ERROR) { mempool_free(mbox, phba->mbox_mem_pool); return -EBUSY; } status = lpfc_reg_rpi(phba, phba->pport->vpi, phba->pport->fc_myDID, (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi); } if (status) { mempool_free(mbox, phba->mbox_mem_pool); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_free_rpi(phba, *rpi); return -ENOMEM; } dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf; mbox->ctx_buf = NULL; mbox->ctx_ndlp = NULL; status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); kfree(dmabuff); if (status != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_free_rpi(phba, *rpi); return -ENODEV; } if (phba->sli_rev < LPFC_SLI_REV4) *rpi = mbox->u.mb.un.varWords[0]; lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); kfree(dmabuff); mempool_free(mbox, phba->mbox_mem_pool); return 0; } /** * lpfcdiag_loop_self_unreg - unregs from the rpi * @phba: Pointer to HBA context object * @rpi: Remote port login id * * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg **/ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) { LPFC_MBOXQ_t *mbox; int status; /* Allocate mboxq structure */ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox == NULL) return -ENOMEM; if (phba->sli_rev < LPFC_SLI_REV4) lpfc_unreg_login(phba, 0, rpi, mbox); else lpfc_unreg_login(phba, phba->pport->vpi, phba->sli4_hba.rpi_ids[rpi], mbox); status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { if (status != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); return -EIO; } mempool_free(mbox, phba->mbox_mem_pool); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_free_rpi(phba, rpi); return 0; } /** * lpfcdiag_loop_get_xri - obtains the transmit and receive ids * @phba: Pointer to HBA context object * @rpi: Remote port login id * @txxri: Pointer to transmit exchange id * @rxxri: Pointer to response exchabge id * * This function obtains the transmit and receive ids required to send * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp * flags are used to the unsolicited response handler is able to process * the ct command sent on the same port. **/ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, uint16_t *txxri, uint16_t * rxxri) { struct lpfc_bsg_event *evt; struct lpfc_iocbq *cmdiocbq, *rspiocbq; struct lpfc_dmabuf *dmabuf; struct ulp_bde64 *bpl = NULL; struct lpfc_sli_ct_request *ctreq = NULL; int ret_val = 0; int time_left; int iocb_stat = IOCB_SUCCESS; unsigned long flags; u32 status; *txxri = 0; *rxxri = 0; evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, SLI_CT_ELX_LOOPBACK); if (!evt) return -ENOMEM; spin_lock_irqsave(&phba->ct_ev_lock, flags); list_add(&evt->node, &phba->ct_ev_waiters); lpfc_bsg_event_ref(evt); spin_unlock_irqrestore(&phba->ct_ev_lock, flags); cmdiocbq = lpfc_sli_get_iocbq(phba); rspiocbq = lpfc_sli_get_iocbq(phba); dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (dmabuf) { dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); if (dmabuf->virt) { INIT_LIST_HEAD(&dmabuf->list); bpl = (struct ulp_bde64 *) dmabuf->virt; memset(bpl, 0, sizeof(*bpl)); ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl))); bpl->addrLow = le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl))); bpl->tus.f.bdeFlags = 0; bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; bpl->tus.w = le32_to_cpu(bpl->tus.w); } } if (cmdiocbq == NULL || rspiocbq == NULL || dmabuf == NULL || bpl == NULL || ctreq == NULL || dmabuf->virt == NULL) { ret_val = -ENOMEM; goto err_get_xri_exit; } memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; ctreq->RevisionId.bits.InId = 0; ctreq->FsType = SLI_CT_ELX_LOOPBACK; ctreq->FsSubType = 0; ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; ctreq->CommandResponse.bits.Size = 0; cmdiocbq->bpl_dmabuf = dmabuf; cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; cmdiocbq->vport = phba->pport; cmdiocbq->cmd_cmpl = NULL; lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1, FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR); iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); status = get_job_ulpstatus(phba, rspiocbq); if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) { ret_val = -EIO; goto err_get_xri_exit; } *txxri = get_job_ulpcontext(phba, rspiocbq); evt->waiting = 1; evt->wait_time_stamp = jiffies; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), msecs_to_jiffies(1000 * ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); if (list_empty(&evt->events_to_see)) ret_val = (time_left) ? -EINTR : -ETIMEDOUT; else { spin_lock_irqsave(&phba->ct_ev_lock, flags); list_move(evt->events_to_see.prev, &evt->events_to_get); spin_unlock_irqrestore(&phba->ct_ev_lock, flags); *rxxri = (list_entry(evt->events_to_get.prev, typeof(struct event_data), node))->immed_dat; } evt->waiting = 0; err_get_xri_exit: spin_lock_irqsave(&phba->ct_ev_lock, flags); lpfc_bsg_event_unref(evt); /* release ref */ lpfc_bsg_event_unref(evt); /* delete */ spin_unlock_irqrestore(&phba->ct_ev_lock, flags); if (dmabuf) { if (dmabuf->virt) lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); kfree(dmabuf); } if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) lpfc_sli_release_iocbq(phba, cmdiocbq); if (rspiocbq) lpfc_sli_release_iocbq(phba, rspiocbq); return ret_val; } /** * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers * @phba: Pointer to HBA context object * * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and * returns the pointer to the buffer. **/ static struct lpfc_dmabuf * lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) { struct lpfc_dmabuf *dmabuf; struct pci_dev *pcidev = phba->pcidev; /* allocate dma buffer struct */ dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!dmabuf) return NULL; INIT_LIST_HEAD(&dmabuf->list); /* now, allocate dma buffer */ dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, &(dmabuf->phys), GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); return NULL; } return dmabuf; } /** * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer * @phba: Pointer to HBA context object. * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor. * * This routine just simply frees a dma buffer and its associated buffer * descriptor referred by @dmabuf. **/ static void lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf) { struct pci_dev *pcidev = phba->pcidev; if (!dmabuf) return; if (dmabuf->virt) dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE, dmabuf->virt, dmabuf->phys); kfree(dmabuf); return; } /** * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers * @phba: Pointer to HBA context object. * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs. * * This routine just simply frees all dma buffers and their associated buffer * descriptors referred by @dmabuf_list. **/ static void lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba, struct list_head *dmabuf_list) { struct lpfc_dmabuf *dmabuf, *next_dmabuf; if (list_empty(dmabuf_list)) return; list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) { list_del_init(&dmabuf->list); lpfc_bsg_dma_page_free(phba, dmabuf); } return; } /** * diag_cmd_data_alloc - fills in a bde struct with dma buffers * @phba: Pointer to HBA context object * @bpl: Pointer to 64 bit bde structure * @size: Number of bytes to process * @nocopydata: Flag to copy user data into the allocated buffer * * This function allocates page size buffers and populates an lpfc_dmabufext. * If allowed the user data pointed to with indataptr is copied into the kernel * memory. The chained list of page size buffers is returned. **/ static struct lpfc_dmabufext * diag_cmd_data_alloc(struct lpfc_hba *phba, struct ulp_bde64 *bpl, uint32_t size, int nocopydata) { struct lpfc_dmabufext *mlist = NULL; struct lpfc_dmabufext *dmp; int cnt, offset = 0, i = 0; struct pci_dev *pcidev; pcidev = phba->pcidev; while (size) { /* We get chunks of 4K */ if (size > BUF_SZ_4K) cnt = BUF_SZ_4K; else cnt = size; /* allocate struct lpfc_dmabufext buffer header */ dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); if (!dmp) goto out; INIT_LIST_HEAD(&dmp->dma.list); /* Queue it to a linked list */ if (mlist) list_add_tail(&dmp->dma.list, &mlist->dma.list); else mlist = dmp; /* allocate buffer */ dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, cnt, &(dmp->dma.phys), GFP_KERNEL); if (!dmp->dma.virt) goto out; dmp->size = cnt; if (nocopydata) { bpl->tus.f.bdeFlags = 0; } else { memset((uint8_t *)dmp->dma.virt, 0, cnt); bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; } /* build buffer ptr list for IOCB */ bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); bpl->tus.f.bdeSize = (ushort) cnt; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; i++; offset += cnt; size -= cnt; } if (mlist) { mlist->flag = i; return mlist; } out: diag_cmd_data_free(phba, mlist); return NULL; } /** * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd * @phba: Pointer to HBA context object * @rxxri: Receive exchange id * @len: Number of data bytes * * This function allocates and posts a data buffer of sufficient size to receive * an unsolicited CT command. **/ static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, size_t len) { struct lpfc_sli_ring *pring; struct lpfc_iocbq *cmdiocbq; IOCB_t *cmd = NULL; struct list_head head, *curr, *next; struct lpfc_dmabuf *rxbmp; struct lpfc_dmabuf *dmp; struct lpfc_dmabuf *mp[2] = {NULL, NULL}; struct ulp_bde64 *rxbpl = NULL; uint32_t num_bde; struct lpfc_dmabufext *rxbuffer = NULL; int ret_val = 0; int iocb_stat; int i = 0; pring = lpfc_phba_elsring(phba); cmdiocbq = lpfc_sli_get_iocbq(phba); rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (rxbmp != NULL) { rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); if (rxbmp->virt) { INIT_LIST_HEAD(&rxbmp->list); rxbpl = (struct ulp_bde64 *) rxbmp->virt; rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); } } if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) { ret_val = -ENOMEM; goto err_post_rxbufs_exit; } /* Queue buffers for the receive exchange */ num_bde = (uint32_t)rxbuffer->flag; dmp = &rxbuffer->dma; cmd = &cmdiocbq->iocb; i = 0; INIT_LIST_HEAD(&head); list_add_tail(&head, &dmp->list); list_for_each_safe(curr, next, &head) { mp[i] = list_entry(curr, struct lpfc_dmabuf, list); list_del(curr); if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); cmd->un.quexri64cx.buff.bde.addrHigh = putPaddrHigh(mp[i]->phys); cmd->un.quexri64cx.buff.bde.addrLow = putPaddrLow(mp[i]->phys); cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = ((struct lpfc_dmabufext *)mp[i])->size; cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; cmd->ulpCommand = CMD_QUE_XRI64_CX; cmd->ulpPU = 0; cmd->ulpLe = 1; cmd->ulpBdeCount = 1; cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; } else { cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); cmd->un.cont64[i].tus.f.bdeSize = ((struct lpfc_dmabufext *)mp[i])->size; cmd->ulpBdeCount = ++i; if ((--num_bde > 0) && (i < 2)) continue; cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; cmd->ulpLe = 1; } cmd->ulpClass = CLASS3; cmd->ulpContext = rxxri; iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); if (iocb_stat == IOCB_ERROR) { diag_cmd_data_free(phba, (struct lpfc_dmabufext *)mp[0]); if (mp[1]) diag_cmd_data_free(phba, (struct lpfc_dmabufext *)mp[1]); dmp = list_entry(next, struct lpfc_dmabuf, list); ret_val = -EIO; goto err_post_rxbufs_exit; } lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); if (mp[1]) { lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); mp[1] = NULL; } /* The iocb was freed by lpfc_sli_issue_iocb */ cmdiocbq = lpfc_sli_get_iocbq(phba); if (!cmdiocbq) { dmp = list_entry(next, struct lpfc_dmabuf, list); ret_val = -EIO; goto err_post_rxbufs_exit; } cmd = &cmdiocbq->iocb; i = 0; } list_del(&head); err_post_rxbufs_exit: if (rxbmp) { if (rxbmp->virt) lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); kfree(rxbmp); } if (cmdiocbq) lpfc_sli_release_iocbq(phba, cmdiocbq); return ret_val; } /** * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job * * This function receives a user data buffer to be transmitted and received on * the same port, the link must be up and in loopback mode prior * to being called. * 1. A kernel buffer is allocated to copy the user data into. * 2. The port registers with "itself". * 3. The transmit and receive exchange ids are obtained. * 4. The receive exchange id is posted. * 5. A new els loopback event is created. * 6. The command and response iocbs are allocated. * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. * * This function is meant to be called n times while the port is in loopback * so it is the apps responsibility to issue a reset to take the port out * of loopback mode. **/ static int lpfc_bsg_diag_loopback_run(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_hba *phba = vport->phba; struct lpfc_bsg_event *evt; struct event_data *evdat; struct lpfc_sli *psli = &phba->sli; uint32_t size; uint32_t full_size; size_t segment_len = 0, segment_offset = 0, current_offset = 0; uint16_t rpi = 0; struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL; union lpfc_wqe128 *cmdwqe, *rspwqe; struct lpfc_sli_ct_request *ctreq; struct lpfc_dmabuf *txbmp; struct ulp_bde64 *txbpl = NULL; struct lpfc_dmabufext *txbuffer = NULL; struct list_head head; struct lpfc_dmabuf *curr; uint16_t txxri = 0, rxxri; uint32_t num_bde; uint8_t *ptr = NULL, *rx_databuf = NULL; int rc = 0; int time_left; int iocb_stat = IOCB_SUCCESS; unsigned long flags; void *dataout = NULL; uint32_t total_mem; /* in case no data is returned return just the return code */ bsg_reply->reply_payload_rcv_len = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2739 Received DIAG TEST request below minimum " "size\n"); rc = -EINVAL; goto loopback_test_exit; } if (job->request_payload.payload_len != job->reply_payload.payload_len) { rc = -EINVAL; goto loopback_test_exit; } if ((phba->link_state == LPFC_HBA_ERROR) || (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { rc = -EACCES; goto loopback_test_exit; } if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { rc = -EACCES; goto loopback_test_exit; } size = job->request_payload.payload_len; full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ if ((size == 0) || (size > 80 * BUF_SZ_4K)) { rc = -ERANGE; goto loopback_test_exit; } if (full_size >= BUF_SZ_4K) { /* * Allocate memory for ioctl data. If buffer is bigger than 64k, * then we allocate 64k and re-use that buffer over and over to * xfer the whole block. This is because Linux kernel has a * problem allocating more than 120k of kernel space memory. Saw * problem with GET_FCPTARGETMAPPING... */ if (size <= (64 * 1024)) total_mem = full_size; else total_mem = 64 * 1024; } else /* Allocate memory for ioctl data */ total_mem = BUF_SZ_4K; dataout = kmalloc(total_mem, GFP_KERNEL); if (dataout == NULL) { rc = -ENOMEM; goto loopback_test_exit; } ptr = dataout; ptr += ELX_LOOPBACK_HEADER_SZ; sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, ptr, size); rc = lpfcdiag_loop_self_reg(phba, &rpi); if (rc) goto loopback_test_exit; if (phba->sli_rev < LPFC_SLI_REV4) { rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); if (rc) { lpfcdiag_loop_self_unreg(phba, rpi); goto loopback_test_exit; } rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size); if (rc) { lpfcdiag_loop_self_unreg(phba, rpi); goto loopback_test_exit; } } evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, SLI_CT_ELX_LOOPBACK); if (!evt) { lpfcdiag_loop_self_unreg(phba, rpi); rc = -ENOMEM; goto loopback_test_exit; } spin_lock_irqsave(&phba->ct_ev_lock, flags); list_add(&evt->node, &phba->ct_ev_waiters); lpfc_bsg_event_ref(evt); spin_unlock_irqrestore(&phba->ct_ev_lock, flags); cmdiocbq = lpfc_sli_get_iocbq(phba); if (phba->sli_rev < LPFC_SLI_REV4) rspiocbq = lpfc_sli_get_iocbq(phba); txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (txbmp) { txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); if (txbmp->virt) { INIT_LIST_HEAD(&txbmp->list); txbpl = (struct ulp_bde64 *) txbmp->virt; txbuffer = diag_cmd_data_alloc(phba, txbpl, full_size, 0); } } if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) { rc = -ENOMEM; goto err_loopback_test_exit; } if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) { rc = -ENOMEM; goto err_loopback_test_exit; } cmdwqe = &cmdiocbq->wqe; memset(cmdwqe, 0, sizeof(union lpfc_wqe)); if (phba->sli_rev < LPFC_SLI_REV4) { rspwqe = &rspiocbq->wqe; memset(rspwqe, 0, sizeof(union lpfc_wqe)); } INIT_LIST_HEAD(&head); list_add_tail(&head, &txbuffer->dma.list); list_for_each_entry(curr, &head, list) { segment_len = ((struct lpfc_dmabufext *)curr)->size; if (current_offset == 0) { ctreq = curr->virt; memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; ctreq->RevisionId.bits.InId = 0; ctreq->FsType = SLI_CT_ELX_LOOPBACK; ctreq->FsSubType = 0; ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(ELX_LOOPBACK_DATA); ctreq->CommandResponse.bits.Size = cpu_to_be16(size); segment_offset = ELX_LOOPBACK_HEADER_SZ; } else segment_offset = 0; BUG_ON(segment_offset >= segment_len); memcpy(curr->virt + segment_offset, ptr + current_offset, segment_len - segment_offset); current_offset += segment_len - segment_offset; BUG_ON(current_offset > size); } list_del(&head); /* Build the XMIT_SEQUENCE iocb */ num_bde = (uint32_t)txbuffer->flag; cmdiocbq->num_bdes = num_bde; cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK; cmdiocbq->vport = phba->pport; cmdiocbq->cmd_cmpl = NULL; cmdiocbq->bpl_dmabuf = txbmp; if (phba->sli_rev < LPFC_SLI_REV4) { lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri, num_bde, FC_RCTL_DD_UNSOL_CTL, 1, CMD_XMIT_SEQUENCE64_CX); } else { lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, phba->sli4_hba.rpi_ids[rpi], 0xffff, full_size, FC_RCTL_DD_UNSOL_CTL, 1, CMD_XMIT_SEQUENCE64_WQE); cmdiocbq->sli4_xritag = NO_XRI; } iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); if (iocb_stat != IOCB_SUCCESS || (phba->sli_rev < LPFC_SLI_REV4 && (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "3126 Failed loopback test issue iocb: " "iocb_stat:x%x\n", iocb_stat); rc = -EIO; goto err_loopback_test_exit; } evt->waiting = 1; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), msecs_to_jiffies(1000 * ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); evt->waiting = 0; if (list_empty(&evt->events_to_see)) { rc = (time_left) ? -EINTR : -ETIMEDOUT; lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "3125 Not receiving unsolicited event, " "rc:x%x\n", rc); } else { spin_lock_irqsave(&phba->ct_ev_lock, flags); list_move(evt->events_to_see.prev, &evt->events_to_get); evdat = list_entry(evt->events_to_get.prev, typeof(*evdat), node); spin_unlock_irqrestore(&phba->ct_ev_lock, flags); rx_databuf = evdat->data; if (evdat->len != full_size) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "1603 Loopback test did not receive expected " "data length. actual length 0x%x expected " "length 0x%x\n", evdat->len, full_size); rc = -EIO; } else if (rx_databuf == NULL) rc = -EIO; else { rc = IOCB_SUCCESS; /* skip over elx loopback header */ rx_databuf += ELX_LOOPBACK_HEADER_SZ; bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, rx_databuf, size); bsg_reply->reply_payload_rcv_len = size; } } err_loopback_test_exit: lpfcdiag_loop_self_unreg(phba, rpi); spin_lock_irqsave(&phba->ct_ev_lock, flags); lpfc_bsg_event_unref(evt); /* release ref */ lpfc_bsg_event_unref(evt); /* delete */ spin_unlock_irqrestore(&phba->ct_ev_lock, flags); if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT)) lpfc_sli_release_iocbq(phba, cmdiocbq); if (rspiocbq != NULL) lpfc_sli_release_iocbq(phba, rspiocbq); if (txbmp != NULL) { if (txbpl != NULL) { if (txbuffer != NULL) diag_cmd_data_free(phba, txbuffer); lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); } kfree(txbmp); } loopback_test_exit: kfree(dataout); /* make error code available to userspace */ bsg_reply->result = rc; job->dd_data = NULL; /* complete the job back to userspace if no error */ if (rc == IOCB_SUCCESS) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } /** * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command * @job: GET_DFC_REV fc_bsg_job **/ static int lpfc_bsg_get_dfc_rev(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_hba *phba = vport->phba; struct get_mgmt_rev_reply *event_reply; int rc = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2740 Received GET_DFC_REV request below " "minimum size\n"); rc = -EINVAL; goto job_error; } event_reply = (struct get_mgmt_rev_reply *) bsg_reply->reply_data.vendor_reply.vendor_rsp; if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2741 Received GET_DFC_REV reply below " "minimum size\n"); rc = -EINVAL; goto job_error; } event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; job_error: bsg_reply->result = rc; if (rc == 0) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } /** * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler * @phba: Pointer to HBA context object. * @pmboxq: Pointer to mailbox command. * * This is completion handler function for mailbox commands issued from * lpfc_bsg_issue_mbox function. This function is called by the * mailbox event handler function with no lock held. This function * will wake up thread waiting on the wait queue pointed by dd_data * of the mailbox. **/ static void lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct bsg_job_data *dd_data; struct fc_bsg_reply *bsg_reply; struct bsg_job *job; uint32_t size; unsigned long flags; uint8_t *pmb, *pmb_buf; dd_data = pmboxq->ctx_ndlp; /* * The outgoing buffer is readily referred from the dma buffer, * just need to get header part from mailboxq structure. */ pmb = (uint8_t *)&pmboxq->u.mb; pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); job = dd_data->set_job; if (job) { /* Prevent timeout handling from trying to abort job */ job->dd_data = NULL; } spin_unlock_irqrestore(&phba->ct_ev_lock, flags); /* Copy the mailbox data to the job if it is still active */ if (job) { bsg_reply = job->reply; size = job->reply_payload.payload_len; bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, pmb_buf, size); } dd_data->set_job = NULL; mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); kfree(dd_data); /* Complete the job if the job is still active */ if (job) { bsg_reply->result = 0; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } return; } /** * lpfc_bsg_check_cmd_access - test for a supported mailbox command * @phba: Pointer to HBA context object. * @mb: Pointer to a mailbox object. * @vport: Pointer to a vport object. * * Some commands require the port to be offline, some may not be called from * the application. **/ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, MAILBOX_t *mb, struct lpfc_vport *vport) { /* return negative error values for bsg job */ switch (mb->mbxCommand) { /* Offline only */ case MBX_INIT_LINK: case MBX_DOWN_LINK: case MBX_CONFIG_LINK: case MBX_CONFIG_RING: case MBX_RESET_RING: case MBX_UNREG_LOGIN: case MBX_CLEAR_LA: case MBX_DUMP_CONTEXT: case MBX_RUN_DIAGS: case MBX_RESTART: case MBX_SET_MASK: if (!(vport->fc_flag & FC_OFFLINE_MODE)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2743 Command 0x%x is illegal in on-line " "state\n", mb->mbxCommand); return -EPERM; } break; case MBX_WRITE_NV: case MBX_WRITE_VPARMS: case MBX_LOAD_SM: case MBX_READ_NV: case MBX_READ_CONFIG: case MBX_READ_RCONFIG: case MBX_READ_STATUS: case MBX_READ_XRI: case MBX_READ_REV: case MBX_READ_LNK_STAT: case MBX_DUMP_MEMORY: case MBX_DOWN_LOAD: case MBX_UPDATE_CFG: case MBX_KILL_BOARD: case MBX_READ_TOPOLOGY: case MBX_LOAD_AREA: case MBX_LOAD_EXP_ROM: case MBX_BEACON: case MBX_DEL_LD_ENTRY: case MBX_SET_DEBUG: case MBX_WRITE_WWN: case MBX_SLI4_CONFIG: case MBX_READ_EVENT_LOG: case MBX_READ_EVENT_LOG_STATUS: case MBX_WRITE_EVENT_LOG: case MBX_PORT_CAPABILITIES: case MBX_PORT_IOV_CONTROL: case MBX_RUN_BIU_DIAG64: break; case MBX_SET_VARIABLE: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1226 mbox: set_variable 0x%x, 0x%x\n", mb->un.varWords[0], mb->un.varWords[1]); break; case MBX_READ_SPARM64: case MBX_REG_LOGIN: case MBX_REG_LOGIN64: case MBX_CONFIG_PORT: case MBX_RUN_BIU_DIAG: default: lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2742 Unknown Command 0x%x\n", mb->mbxCommand); return -EPERM; } return 0; /* ok */ } /** * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session * @phba: Pointer to HBA context object. * * This is routine clean up and reset BSG handling of multi-buffer mbox * command session. **/ static void lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) { if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) return; /* free all memory, including dma buffers */ lpfc_bsg_dma_page_list_free(phba, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf); /* multi-buffer write mailbox command pass-through complete */ memset((char *)&phba->mbox_ext_buf_ctx, 0, sizeof(struct lpfc_mbox_ext_buf_ctx)); INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); return; } /** * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl * @phba: Pointer to HBA context object. * @pmboxq: Pointer to mailbox command. * * This is routine handles BSG job for mailbox commands completions with * multiple external buffers. **/ static struct bsg_job * lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct bsg_job_data *dd_data; struct bsg_job *job; struct fc_bsg_reply *bsg_reply; uint8_t *pmb, *pmb_buf; unsigned long flags; uint32_t size; int rc = 0; struct lpfc_dmabuf *dmabuf; struct lpfc_sli_config_mbox *sli_cfg_mbx; uint8_t *pmbx; dd_data = pmboxq->ctx_buf; /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); job = dd_data->set_job; if (job) { bsg_reply = job->reply; /* Prevent timeout handling from trying to abort job */ job->dd_data = NULL; } spin_unlock_irqrestore(&phba->ct_ev_lock, flags); /* * The outgoing buffer is readily referred from the dma buffer, * just need to get header part from mailboxq structure. */ pmb = (uint8_t *)&pmboxq->u.mb; pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; /* Copy the byte swapped response mailbox back to the user */ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); /* if there is any non-embedded extended data copy that too */ dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf; sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; if (!bsg_bf_get(lpfc_mbox_hdr_emb, &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { pmbx = (uint8_t *)dmabuf->virt; /* byte swap the extended data following the mailbox command */ lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], &pmbx[sizeof(MAILBOX_t)], sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); } /* Complete the job if the job is still active */ if (job) { size = job->reply_payload.payload_len; bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, pmb_buf, size); /* result for successful */ bsg_reply->result = 0; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2937 SLI_CONFIG ext-buffer mailbox command " "(x%x/x%x) complete bsg job done, bsize:%d\n", phba->mbox_ext_buf_ctx.nembType, phba->mbox_ext_buf_ctx.mboxType, size); lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, phba->mbox_ext_buf_ctx.mboxType, dma_ebuf, sta_pos_addr, phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); } else { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2938 SLI_CONFIG ext-buffer mailbox " "command (x%x/x%x) failure, rc:x%x\n", phba->mbox_ext_buf_ctx.nembType, phba->mbox_ext_buf_ctx.mboxType, rc); } /* state change */ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; kfree(dd_data); return job; } /** * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox * @phba: Pointer to HBA context object. * @pmboxq: Pointer to mailbox command. * * This is completion handler function for mailbox read commands with multiple * external buffers. **/ static void lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct bsg_job *job; struct fc_bsg_reply *bsg_reply; job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); /* handle the BSG job with mailbox command */ if (!job) pmboxq->u.mb.mbxStatus = MBXERR_ERROR; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2939 SLI_CONFIG ext-buffer rd mailbox command " "complete, ctxState:x%x, mbxStatus:x%x\n", phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) lpfc_bsg_mbox_ext_session_reset(phba); /* free base driver mailbox structure memory */ mempool_free(pmboxq, phba->mbox_mem_pool); /* if the job is still active, call job done */ if (job) { bsg_reply = job->reply; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } return; } /** * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox * @phba: Pointer to HBA context object. * @pmboxq: Pointer to mailbox command. * * This is completion handler function for mailbox write commands with multiple * external buffers. **/ static void lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct bsg_job *job; struct fc_bsg_reply *bsg_reply; job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); /* handle the BSG job with the mailbox command */ if (!job) pmboxq->u.mb.mbxStatus = MBXERR_ERROR; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2940 SLI_CONFIG ext-buffer wr mailbox command " "complete, ctxState:x%x, mbxStatus:x%x\n", phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); /* free all memory, including dma buffers */ mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_mbox_ext_session_reset(phba); /* if the job is still active, call job done */ if (job) { bsg_reply = job->reply; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } return; } static void lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp, uint32_t index, struct lpfc_dmabuf *mbx_dmabuf, struct lpfc_dmabuf *ext_dmabuf) { struct lpfc_sli_config_mbox *sli_cfg_mbx; /* pointer to the start of mailbox command */ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt; if (nemb_tp == nemb_mse) { if (index == 0) { sli_cfg_mbx->un.sli_config_emb0_subsys. mse[index].pa_hi = putPaddrHigh(mbx_dmabuf->phys + sizeof(MAILBOX_t)); sli_cfg_mbx->un.sli_config_emb0_subsys. mse[index].pa_lo = putPaddrLow(mbx_dmabuf->phys + sizeof(MAILBOX_t)); lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2943 SLI_CONFIG(mse)[%d], " "bufLen:%d, addrHi:x%x, addrLo:x%x\n", index, sli_cfg_mbx->un.sli_config_emb0_subsys. mse[index].buf_len, sli_cfg_mbx->un.sli_config_emb0_subsys. mse[index].pa_hi, sli_cfg_mbx->un.sli_config_emb0_subsys. mse[index].pa_lo); } else { sli_cfg_mbx->un.sli_config_emb0_subsys. mse[index].pa_hi = putPaddrHigh(ext_dmabuf->phys); sli_cfg_mbx->un.sli_config_emb0_subsys. mse[index].pa_lo = putPaddrLow(ext_dmabuf->phys); lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2944 SLI_CONFIG(mse)[%d], " "bufLen:%d, addrHi:x%x, addrLo:x%x\n", index, sli_cfg_mbx->un.sli_config_emb0_subsys. mse[index].buf_len, sli_cfg_mbx->un.sli_config_emb0_subsys. mse[index].pa_hi, sli_cfg_mbx->un.sli_config_emb0_subsys. mse[index].pa_lo); } } else { if (index == 0) { sli_cfg_mbx->un.sli_config_emb1_subsys. hbd[index].pa_hi = putPaddrHigh(mbx_dmabuf->phys + sizeof(MAILBOX_t)); sli_cfg_mbx->un.sli_config_emb1_subsys. hbd[index].pa_lo = putPaddrLow(mbx_dmabuf->phys + sizeof(MAILBOX_t)); lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3007 SLI_CONFIG(hbd)[%d], " "bufLen:%d, addrHi:x%x, addrLo:x%x\n", index, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, &sli_cfg_mbx->un. sli_config_emb1_subsys.hbd[index]), sli_cfg_mbx->un.sli_config_emb1_subsys. hbd[index].pa_hi, sli_cfg_mbx->un.sli_config_emb1_subsys. hbd[index].pa_lo); } else { sli_cfg_mbx->un.sli_config_emb1_subsys. hbd[index].pa_hi = putPaddrHigh(ext_dmabuf->phys); sli_cfg_mbx->un.sli_config_emb1_subsys. hbd[index].pa_lo = putPaddrLow(ext_dmabuf->phys); lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3008 SLI_CONFIG(hbd)[%d], " "bufLen:%d, addrHi:x%x, addrLo:x%x\n", index, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, &sli_cfg_mbx->un. sli_config_emb1_subsys.hbd[index]), sli_cfg_mbx->un.sli_config_emb1_subsys. hbd[index].pa_hi, sli_cfg_mbx->un.sli_config_emb1_subsys. hbd[index].pa_lo); } } return; } /** * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read * @phba: Pointer to HBA context object. * @job: Pointer to the job object. * @nemb_tp: Enumerate of non-embedded mailbox command type. * @dmabuf: Pointer to a DMA buffer descriptor. * * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with * non-embedded external buffers. **/ static int lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, enum nemb_type nemb_tp, struct lpfc_dmabuf *dmabuf) { struct fc_bsg_request *bsg_request = job->request; struct lpfc_sli_config_mbox *sli_cfg_mbx; struct dfc_mbox_req *mbox_req; struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; uint32_t ext_buf_cnt, ext_buf_index; struct lpfc_dmabuf *ext_dmabuf = NULL; struct bsg_job_data *dd_data = NULL; LPFC_MBOXQ_t *pmboxq = NULL; MAILBOX_t *pmb; uint8_t *pmbx; int rc, i; mbox_req = (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; /* pointer to the start of mailbox command */ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; if (nemb_tp == nemb_mse) { ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2945 Handled SLI_CONFIG(mse) rd, " "ext_buf_cnt(%d) out of range(%d)\n", ext_buf_cnt, LPFC_MBX_SLI_CONFIG_MAX_MSE); rc = -ERANGE; goto job_error; } lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2941 Handled SLI_CONFIG(mse) rd, " "ext_buf_cnt:%d\n", ext_buf_cnt); } else { /* sanity check on interface type for support */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2) { rc = -ENODEV; goto job_error; } /* nemb_tp == nemb_hbd */ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2946 Handled SLI_CONFIG(hbd) rd, " "ext_buf_cnt(%d) out of range(%d)\n", ext_buf_cnt, LPFC_MBX_SLI_CONFIG_MAX_HBD); rc = -ERANGE; goto job_error; } lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2942 Handled SLI_CONFIG(hbd) rd, " "ext_buf_cnt:%d\n", ext_buf_cnt); } /* before dma descriptor setup */ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, sta_pre_addr, dmabuf, ext_buf_cnt); /* reject non-embedded mailbox command with none external buffer */ if (ext_buf_cnt == 0) { rc = -EPERM; goto job_error; } else if (ext_buf_cnt > 1) { /* additional external read buffers */ for (i = 1; i < ext_buf_cnt; i++) { ext_dmabuf = lpfc_bsg_dma_page_alloc(phba); if (!ext_dmabuf) { rc = -ENOMEM; goto job_error; } list_add_tail(&ext_dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); } } /* bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); if (!dd_data) { rc = -ENOMEM; goto job_error; } /* mailbox command structure for base driver */ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { rc = -ENOMEM; goto job_error; } memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); /* for the first external buffer */ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); /* for the rest of external buffer descriptors if any */ if (ext_buf_cnt > 1) { ext_buf_index = 1; list_for_each_entry_safe(curr_dmabuf, next_dmabuf, &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) { lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, ext_buf_index, dmabuf, curr_dmabuf); ext_buf_index++; } } /* after dma descriptor setup */ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, sta_pos_addr, dmabuf, ext_buf_cnt); /* construct base driver mbox command */ pmb = &pmboxq->u.mb; pmbx = (uint8_t *)dmabuf->virt; memcpy(pmb, pmbx, sizeof(*pmb)); pmb->mbxOwner = OWN_HOST; pmboxq->vport = phba->pport; /* multi-buffer handling context */ phba->mbox_ext_buf_ctx.nembType = nemb_tp; phba->mbox_ext_buf_ctx.mboxType = mbox_rd; phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; /* callback for multi-buffer read mailbox command */ pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl; /* context fields to callback function */ pmboxq->ctx_buf = dd_data; dd_data->type = TYPE_MBOX; dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; job->dd_data = dd_data; /* state change */ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; /* * Non-embedded mailbox subcommand data gets byte swapped here because * the lower level driver code only does the first 64 mailbox words. */ if ((!bsg_bf_get(lpfc_mbox_hdr_emb, &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) && (nemb_tp == nemb_mse)) lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], &pmbx[sizeof(MAILBOX_t)], sli_cfg_mbx->un.sli_config_emb0_subsys. mse[0].buf_len); rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2947 Issued SLI_CONFIG ext-buffer " "mailbox command, rc:x%x\n", rc); return SLI_CONFIG_HANDLED; } lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2948 Failed to issue SLI_CONFIG ext-buffer " "mailbox command, rc:x%x\n", rc); rc = -EPIPE; job_error: if (pmboxq) mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_dma_page_list_free(phba, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); kfree(dd_data); phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; return rc; } /** * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write * @phba: Pointer to HBA context object. * @job: Pointer to the job object. * @nemb_tp: Enumerate of non-embedded mailbox command type. * @dmabuf: Pointer to a DMA buffer descriptor. * * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with * non-embedded external buffers. **/ static int lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, enum nemb_type nemb_tp, struct lpfc_dmabuf *dmabuf) { struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; struct dfc_mbox_req *mbox_req; struct lpfc_sli_config_mbox *sli_cfg_mbx; uint32_t ext_buf_cnt; struct bsg_job_data *dd_data = NULL; LPFC_MBOXQ_t *pmboxq = NULL; MAILBOX_t *pmb; uint8_t *mbx; int rc = SLI_CONFIG_NOT_HANDLED, i; mbox_req = (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; /* pointer to the start of mailbox command */ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; if (nemb_tp == nemb_mse) { ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2953 Failed SLI_CONFIG(mse) wr, " "ext_buf_cnt(%d) out of range(%d)\n", ext_buf_cnt, LPFC_MBX_SLI_CONFIG_MAX_MSE); return -ERANGE; } lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2949 Handled SLI_CONFIG(mse) wr, " "ext_buf_cnt:%d\n", ext_buf_cnt); } else { /* sanity check on interface type for support */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2) return -ENODEV; /* nemb_tp == nemb_hbd */ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2954 Failed SLI_CONFIG(hbd) wr, " "ext_buf_cnt(%d) out of range(%d)\n", ext_buf_cnt, LPFC_MBX_SLI_CONFIG_MAX_HBD); return -ERANGE; } lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2950 Handled SLI_CONFIG(hbd) wr, " "ext_buf_cnt:%d\n", ext_buf_cnt); } /* before dma buffer descriptor setup */ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, sta_pre_addr, dmabuf, ext_buf_cnt); if (ext_buf_cnt == 0) return -EPERM; /* for the first external buffer */ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); /* after dma descriptor setup */ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, sta_pos_addr, dmabuf, ext_buf_cnt); /* log for looking forward */ for (i = 1; i < ext_buf_cnt; i++) { if (nemb_tp == nemb_mse) lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n", i, sli_cfg_mbx->un.sli_config_emb0_subsys. mse[i].buf_len); else lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n", i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, &sli_cfg_mbx->un.sli_config_emb1_subsys. hbd[i])); } /* multi-buffer handling context */ phba->mbox_ext_buf_ctx.nembType = nemb_tp; phba->mbox_ext_buf_ctx.mboxType = mbox_wr; phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; if (ext_buf_cnt == 1) { /* bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); if (!dd_data) { rc = -ENOMEM; goto job_error; } /* mailbox command structure for base driver */ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { rc = -ENOMEM; goto job_error; } memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); pmb = &pmboxq->u.mb; mbx = (uint8_t *)dmabuf->virt; memcpy(pmb, mbx, sizeof(*pmb)); pmb->mbxOwner = OWN_HOST; pmboxq->vport = phba->pport; /* callback for multi-buffer read mailbox command */ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; /* context fields to callback function */ pmboxq->ctx_buf = dd_data; dd_data->type = TYPE_MBOX; dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; job->dd_data = dd_data; /* state change */ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2955 Issued SLI_CONFIG ext-buffer " "mailbox command, rc:x%x\n", rc); return SLI_CONFIG_HANDLED; } lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2956 Failed to issue SLI_CONFIG ext-buffer " "mailbox command, rc:x%x\n", rc); rc = -EPIPE; goto job_error; } /* wait for additional external buffers */ bsg_reply->result = 0; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return SLI_CONFIG_HANDLED; job_error: if (pmboxq) mempool_free(pmboxq, phba->mbox_mem_pool); kfree(dd_data); return rc; } /** * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer * @phba: Pointer to HBA context object. * @job: Pointer to the job object. * @dmabuf: Pointer to a DMA buffer descriptor. * * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded * external buffers, including both 0x9B with non-embedded MSEs and 0x9B * with embedded subsystem 0x1 and opcodes with external HBDs. **/ static int lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, struct lpfc_dmabuf *dmabuf) { struct lpfc_sli_config_mbox *sli_cfg_mbx; uint32_t subsys; uint32_t opcode; int rc = SLI_CONFIG_NOT_HANDLED; /* state change on new multi-buffer pass-through mailbox command */ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST; sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; if (!bsg_bf_get(lpfc_mbox_hdr_emb, &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys, &sli_cfg_mbx->un.sli_config_emb0_subsys); opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, &sli_cfg_mbx->un.sli_config_emb0_subsys); if (subsys == SLI_CONFIG_SUBSYS_FCOE) { switch (opcode) { case FCOE_OPCODE_READ_FCF: case FCOE_OPCODE_GET_DPORT_RESULTS: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2957 Handled SLI_CONFIG " "subsys_fcoe, opcode:x%x\n", opcode); rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, nemb_mse, dmabuf); break; case FCOE_OPCODE_ADD_FCF: case FCOE_OPCODE_SET_DPORT_MODE: case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2958 Handled SLI_CONFIG " "subsys_fcoe, opcode:x%x\n", opcode); rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, nemb_mse, dmabuf); break; default: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2959 Reject SLI_CONFIG " "subsys_fcoe, opcode:x%x\n", opcode); rc = -EPERM; break; } } else if (subsys == SLI_CONFIG_SUBSYS_COMN) { switch (opcode) { case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: case COMN_OPCODE_GET_CNTL_ATTRIBUTES: case COMN_OPCODE_GET_PROFILE_CONFIG: case COMN_OPCODE_SET_FEATURES: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3106 Handled SLI_CONFIG " "subsys_comn, opcode:x%x\n", opcode); rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, nemb_mse, dmabuf); break; default: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3107 Reject SLI_CONFIG " "subsys_comn, opcode:x%x\n", opcode); rc = -EPERM; break; } } else { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2977 Reject SLI_CONFIG " "subsys:x%d, opcode:x%x\n", subsys, opcode); rc = -EPERM; } } else { subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys, &sli_cfg_mbx->un.sli_config_emb1_subsys); opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode, &sli_cfg_mbx->un.sli_config_emb1_subsys); if (subsys == SLI_CONFIG_SUBSYS_COMN) { switch (opcode) { case COMN_OPCODE_READ_OBJECT: case COMN_OPCODE_READ_OBJECT_LIST: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2960 Handled SLI_CONFIG " "subsys_comn, opcode:x%x\n", opcode); rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, nemb_hbd, dmabuf); break; case COMN_OPCODE_WRITE_OBJECT: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2961 Handled SLI_CONFIG " "subsys_comn, opcode:x%x\n", opcode); rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, nemb_hbd, dmabuf); break; default: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2962 Not handled SLI_CONFIG " "subsys_comn, opcode:x%x\n", opcode); rc = SLI_CONFIG_NOT_HANDLED; break; } } else { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2978 Not handled SLI_CONFIG " "subsys:x%d, opcode:x%x\n", subsys, opcode); rc = SLI_CONFIG_NOT_HANDLED; } } /* state reset on not handled new multi-buffer mailbox command */ if (rc != SLI_CONFIG_HANDLED) phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; return rc; } /** * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers * @phba: Pointer to HBA context object. * * This routine is for requesting to abort a pass-through mailbox command with * multiple external buffers due to error condition. **/ static void lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba) { if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; else lpfc_bsg_mbox_ext_session_reset(phba); return; } /** * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer * @phba: Pointer to HBA context object. * @job: Pointer to the job object. * * This routine extracts the next mailbox read external buffer back to * user space through BSG. **/ static int lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job) { struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_sli_config_mbox *sli_cfg_mbx; struct lpfc_dmabuf *dmabuf; uint8_t *pbuf; uint32_t size; uint32_t index; index = phba->mbox_ext_buf_ctx.seqNum; phba->mbox_ext_buf_ctx.seqNum++; sli_cfg_mbx = (struct lpfc_sli_config_mbox *) phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { size = bsg_bf_get(lpfc_mbox_sli_config_mse_len, &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]); lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2963 SLI_CONFIG (mse) ext-buffer rd get " "buffer[%d], size:%d\n", index, size); } else { size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]); lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2964 SLI_CONFIG (hbd) ext-buffer rd get " "buffer[%d], size:%d\n", index, size); } if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list)) return -EPIPE; dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, struct lpfc_dmabuf, list); list_del_init(&dmabuf->list); /* after dma buffer descriptor setup */ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, mbox_rd, dma_ebuf, sta_pos_addr, dmabuf, index); pbuf = (uint8_t *)dmabuf->virt; bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, pbuf, size); lpfc_bsg_dma_page_free(phba, dmabuf); if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2965 SLI_CONFIG (hbd) ext-buffer rd mbox " "command session done\n"); lpfc_bsg_mbox_ext_session_reset(phba); } bsg_reply->result = 0; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return SLI_CONFIG_HANDLED; } /** * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer * @phba: Pointer to HBA context object. * @job: Pointer to the job object. * @dmabuf: Pointer to a DMA buffer descriptor. * * This routine sets up the next mailbox read external buffer obtained * from user space through BSG. **/ static int lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, struct lpfc_dmabuf *dmabuf) { struct fc_bsg_reply *bsg_reply = job->reply; struct bsg_job_data *dd_data = NULL; LPFC_MBOXQ_t *pmboxq = NULL; MAILBOX_t *pmb; enum nemb_type nemb_tp; uint8_t *pbuf; uint32_t size; uint32_t index; int rc; index = phba->mbox_ext_buf_ctx.seqNum; phba->mbox_ext_buf_ctx.seqNum++; nemb_tp = phba->mbox_ext_buf_ctx.nembType; pbuf = (uint8_t *)dmabuf->virt; size = job->request_payload.payload_len; sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, pbuf, size); if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2966 SLI_CONFIG (mse) ext-buffer wr set " "buffer[%d], size:%d\n", phba->mbox_ext_buf_ctx.seqNum, size); } else { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2967 SLI_CONFIG (hbd) ext-buffer wr set " "buffer[%d], size:%d\n", phba->mbox_ext_buf_ctx.seqNum, size); } /* set up external buffer descriptor and add to external buffer list */ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index, phba->mbox_ext_buf_ctx.mbx_dmabuf, dmabuf); list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); /* after write dma buffer */ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, mbox_wr, dma_ebuf, sta_pos_addr, dmabuf, index); if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2968 SLI_CONFIG ext-buffer wr all %d " "ebuffers received\n", phba->mbox_ext_buf_ctx.numBuf); dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); if (!dd_data) { rc = -ENOMEM; goto job_error; } /* mailbox command structure for base driver */ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { rc = -ENOMEM; goto job_error; } memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; pmb = &pmboxq->u.mb; memcpy(pmb, pbuf, sizeof(*pmb)); pmb->mbxOwner = OWN_HOST; pmboxq->vport = phba->pport; /* callback for multi-buffer write mailbox command */ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; /* context fields to callback function */ pmboxq->ctx_buf = dd_data; dd_data->type = TYPE_MBOX; dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; job->dd_data = dd_data; /* state change */ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2969 Issued SLI_CONFIG ext-buffer " "mailbox command, rc:x%x\n", rc); return SLI_CONFIG_HANDLED; } lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2970 Failed to issue SLI_CONFIG ext-buffer " "mailbox command, rc:x%x\n", rc); rc = -EPIPE; goto job_error; } /* wait for additional external buffers */ bsg_reply->result = 0; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return SLI_CONFIG_HANDLED; job_error: if (pmboxq) mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_dma_page_free(phba, dmabuf); kfree(dd_data); return rc; } /** * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd * @phba: Pointer to HBA context object. * @job: Pointer to the job object. * @dmabuf: Pointer to a DMA buffer descriptor. * * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox * command with multiple non-embedded external buffers. **/ static int lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job, struct lpfc_dmabuf *dmabuf) { int rc; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2971 SLI_CONFIG buffer (type:x%x)\n", phba->mbox_ext_buf_ctx.mboxType); if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) { if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2972 SLI_CONFIG rd buffer state " "mismatch:x%x\n", phba->mbox_ext_buf_ctx.state); lpfc_bsg_mbox_ext_abort(phba); return -EPIPE; } rc = lpfc_bsg_read_ebuf_get(phba, job); if (rc == SLI_CONFIG_HANDLED) lpfc_bsg_dma_page_free(phba, dmabuf); } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2973 SLI_CONFIG wr buffer state " "mismatch:x%x\n", phba->mbox_ext_buf_ctx.state); lpfc_bsg_mbox_ext_abort(phba); return -EPIPE; } rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf); } return rc; } /** * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer * @phba: Pointer to HBA context object. * @job: Pointer to the job object. * @dmabuf: Pointer to a DMA buffer descriptor. * * This routine checks and handles non-embedded multi-buffer SLI_CONFIG * (0x9B) mailbox commands and external buffers. **/ static int lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job, struct lpfc_dmabuf *dmabuf) { struct fc_bsg_request *bsg_request = job->request; struct dfc_mbox_req *mbox_req; int rc = SLI_CONFIG_NOT_HANDLED; mbox_req = (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; /* mbox command with/without single external buffer */ if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) return rc; /* mbox command and first external buffer */ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { if (mbox_req->extSeqNum == 1) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2974 SLI_CONFIG mailbox: tag:%d, " "seq:%d\n", mbox_req->extMboxTag, mbox_req->extSeqNum); rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf); return rc; } else goto sli_cfg_ext_error; } /* * handle additional external buffers */ /* check broken pipe conditions */ if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag) goto sli_cfg_ext_error; if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf) goto sli_cfg_ext_error; if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1) goto sli_cfg_ext_error; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2975 SLI_CONFIG mailbox external buffer: " "extSta:x%x, tag:%d, seq:%d\n", phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag, mbox_req->extSeqNum); rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf); return rc; sli_cfg_ext_error: /* all other cases, broken pipe */ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2976 SLI_CONFIG mailbox broken pipe: " "ctxSta:x%x, ctxNumBuf:%d " "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n", phba->mbox_ext_buf_ctx.state, phba->mbox_ext_buf_ctx.numBuf, phba->mbox_ext_buf_ctx.mbxTag, phba->mbox_ext_buf_ctx.seqNum, mbox_req->extMboxTag, mbox_req->extSeqNum); lpfc_bsg_mbox_ext_session_reset(phba); return -EPIPE; } /** * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app * @phba: Pointer to HBA context object. * @job: Pointer to the job object. * @vport: Pointer to a vport object. * * Allocate a tracking object, mailbox command memory, get a mailbox * from the mailbox pool, copy the caller mailbox command. * * If offline and the sli is active we need to poll for the command (port is * being reset) and complete the job, otherwise issue the mailbox command and * let our completion handler finish the command. **/ static int lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, struct lpfc_vport *vport) { struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ /* a 4k buffer to hold the mb and extended data from/to the bsg */ uint8_t *pmbx = NULL; struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ struct lpfc_dmabuf *dmabuf = NULL; struct dfc_mbox_req *mbox_req; struct READ_EVENT_LOG_VAR *rdEventLog; uint32_t transmit_length, receive_length, mode; struct lpfc_mbx_sli4_config *sli4_config; struct lpfc_mbx_nembed_cmd *nembed_sge; struct ulp_bde64 *bde; uint8_t *ext = NULL; int rc = 0; uint8_t *from; uint32_t size; /* in case no data is transferred */ bsg_reply->reply_payload_rcv_len = 0; /* sanity check to protect driver */ if (job->reply_payload.payload_len > BSG_MBOX_SIZE || job->request_payload.payload_len > BSG_MBOX_SIZE) { rc = -ERANGE; goto job_done; } /* * Don't allow mailbox commands to be sent when blocked or when in * the middle of discovery */ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { rc = -EAGAIN; goto job_done; } mbox_req = (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd; /* check if requested extended data lengths are valid */ if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { rc = -ERANGE; goto job_done; } dmabuf = lpfc_bsg_dma_page_alloc(phba); if (!dmabuf || !dmabuf->virt) { rc = -ENOMEM; goto job_done; } /* Get the mailbox command or external buffer from BSG */ pmbx = (uint8_t *)dmabuf->virt; size = job->request_payload.payload_len; sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, pmbx, size); /* Handle possible SLI_CONFIG with non-embedded payloads */ if (phba->sli_rev == LPFC_SLI_REV4) { rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf); if (rc == SLI_CONFIG_HANDLED) goto job_cont; if (rc) goto job_done; /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */ } rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport); if (rc != 0) goto job_done; /* must be negative */ /* allocate our bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); if (!dd_data) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2727 Failed allocation of dd_data\n"); rc = -ENOMEM; goto job_done; } pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { rc = -ENOMEM; goto job_done; } memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); pmb = &pmboxq->u.mb; memcpy(pmb, pmbx, sizeof(*pmb)); pmb->mbxOwner = OWN_HOST; pmboxq->vport = vport; /* If HBA encountered an error attention, allow only DUMP * or RESTART mailbox commands until the HBA is restarted. */ if (phba->pport->stopped && pmb->mbxCommand != MBX_DUMP_MEMORY && pmb->mbxCommand != MBX_RESTART && pmb->mbxCommand != MBX_WRITE_VPARMS && pmb->mbxCommand != MBX_WRITE_WWN) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "2797 mbox: Issued mailbox cmd " "0x%x while in stopped state.\n", pmb->mbxCommand); /* extended mailbox commands will need an extended buffer */ if (mbox_req->inExtWLen || mbox_req->outExtWLen) { from = pmbx; ext = from + sizeof(MAILBOX_t); pmboxq->ctx_buf = ext; pmboxq->in_ext_byte_len = mbox_req->inExtWLen * sizeof(uint32_t); pmboxq->out_ext_byte_len = mbox_req->outExtWLen * sizeof(uint32_t); pmboxq->mbox_offset_word = mbox_req->mbOffset; } /* biu diag will need a kernel buffer to transfer the data * allocate our own buffer and setup the mailbox command to * use ours */ if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { transmit_length = pmb->un.varWords[1]; receive_length = pmb->un.varWords[4]; /* transmit length cannot be greater than receive length or * mailbox extension size */ if ((transmit_length > receive_length) || (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { rc = -ERANGE; goto job_done; } pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t) + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t) + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { rdEventLog = &pmb->un.varRdEventLog; receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; mode = bf_get(lpfc_event_log, rdEventLog); /* receive length cannot be greater than mailbox * extension size */ if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { rc = -ERANGE; goto job_done; } /* mode zero uses a bde like biu diags command */ if (mode == 0) { pmb->un.varWords[3] = putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); } } else if (phba->sli_rev == LPFC_SLI_REV4) { /* Let type 4 (well known data) through because the data is * returned in varwords[4-8] * otherwise check the recieve length and fetch the buffer addr */ if ((pmb->mbxCommand == MBX_DUMP_MEMORY) && (pmb->un.varDmp.type != DMP_WELL_KNOWN)) { /* rebuild the command for sli4 using our own buffers * like we do for biu diags */ receive_length = pmb->un.varWords[2]; /* receive length cannot be greater than mailbox * extension size */ if (receive_length == 0) { rc = -ERANGE; goto job_done; } pmb->un.varWords[3] = putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && pmb->un.varUpdateCfg.co) { bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; /* bde size cannot be greater than mailbox ext size */ if (bde->tus.f.bdeSize > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { rc = -ERANGE; goto job_done; } bde->addrHigh = putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); bde->addrLow = putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { /* Handling non-embedded SLI_CONFIG mailbox command */ sli4_config = &pmboxq->u.mqe.un.sli4_config; if (!bf_get(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr)) { /* rebuild the command for sli4 using our * own buffers like we do for biu diags */ nembed_sge = (struct lpfc_mbx_nembed_cmd *) &pmb->un.varWords[0]; receive_length = nembed_sge->sge[0].length; /* receive length cannot be greater than * mailbox extension size */ if ((receive_length == 0) || (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { rc = -ERANGE; goto job_done; } nembed_sge->sge[0].pa_hi = putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); nembed_sge->sge[0].pa_lo = putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); } } } dd_data->context_un.mbox.dmabuffers = dmabuf; /* setup wake call as IOCB callback */ pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl; /* setup context field to pass wait_queue pointer to wake function */ pmboxq->ctx_ndlp = dd_data; dd_data->type = TYPE_MBOX; dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; dd_data->context_un.mbox.ext = ext; dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; job->dd_data = dd_data; if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); if (rc != MBX_SUCCESS) { rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; goto job_done; } /* job finished, copy the data */ memcpy(pmbx, pmb, sizeof(*pmb)); bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, pmbx, size); /* not waiting mbox already done */ rc = 0; goto job_done; } rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) return 1; /* job started */ job_done: /* common exit for error or job completed inline */ if (pmboxq) mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_dma_page_free(phba, dmabuf); kfree(dd_data); job_cont: return rc; } /** * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. **/ static int lpfc_bsg_mbox_cmd(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_hba *phba = vport->phba; struct dfc_mbox_req *mbox_req; int rc = 0; /* mix-and-match backward compatibility */ bsg_reply->reply_payload_rcv_len = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2737 Mix-and-match backward compatibility " "between MBOX_REQ old size:%d and " "new request size:%d\n", (int)(job->request_len - sizeof(struct fc_bsg_request)), (int)sizeof(struct dfc_mbox_req)); mbox_req = (struct dfc_mbox_req *) bsg_request->rqst_data.h_vendor.vendor_cmd; mbox_req->extMboxTag = 0; mbox_req->extSeqNum = 0; } rc = lpfc_bsg_issue_mbox(phba, job, vport); if (rc == 0) { /* job done */ bsg_reply->result = 0; job->dd_data = NULL; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } else if (rc == 1) /* job submitted, will complete later*/ rc = 0; /* return zero, no error */ else { /* some error occurred */ bsg_reply->result = rc; job->dd_data = NULL; } return rc; } static int lpfc_forced_link_speed(struct bsg_job *job) { struct Scsi_Host *shost = fc_bsg_to_shost(job); struct lpfc_vport *vport = shost_priv(shost); struct lpfc_hba *phba = vport->phba; struct fc_bsg_reply *bsg_reply = job->reply; struct forced_link_speed_support_reply *forced_reply; int rc = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct get_forced_link_speed_support)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "0048 Received FORCED_LINK_SPEED request " "below minimum size\n"); rc = -EINVAL; goto job_error; } forced_reply = (struct forced_link_speed_support_reply *) bsg_reply->reply_data.vendor_reply.vendor_rsp; if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "0049 Received FORCED_LINK_SPEED reply below " "minimum size\n"); rc = -EINVAL; goto job_error; } forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED) ? LPFC_FORCED_LINK_SPEED_SUPPORTED : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; job_error: bsg_reply->result = rc; if (rc == 0) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } /** * lpfc_check_fwlog_support: Check FW log support on the adapter * @phba: Pointer to HBA context object. * * Check if FW Logging support by the adapter **/ int lpfc_check_fwlog_support(struct lpfc_hba *phba) { struct lpfc_ras_fwlog *ras_fwlog = NULL; ras_fwlog = &phba->ras_fwlog; if (!ras_fwlog->ras_hwsupport) return -EACCES; else if (!ras_fwlog->ras_enabled) return -EPERM; else return 0; } /** * lpfc_bsg_get_ras_config: Get RAS configuration settings * @job: fc_bsg_job to handle * * Get RAS configuration values set. **/ static int lpfc_bsg_get_ras_config(struct bsg_job *job) { struct Scsi_Host *shost = fc_bsg_to_shost(job); struct lpfc_vport *vport = shost_priv(shost); struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_hba *phba = vport->phba; struct lpfc_bsg_get_ras_config_reply *ras_reply; struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; int rc = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct lpfc_bsg_ras_req)) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "6192 FW_LOG request received " "below minimum size\n"); rc = -EINVAL; goto ras_job_error; } /* Check FW log status */ rc = lpfc_check_fwlog_support(phba); if (rc) goto ras_job_error; ras_reply = (struct lpfc_bsg_get_ras_config_reply *) bsg_reply->reply_data.vendor_reply.vendor_rsp; /* Current logging state */ spin_lock_irq(&phba->hbalock); if (ras_fwlog->state == ACTIVE) ras_reply->state = LPFC_RASLOG_STATE_RUNNING; else ras_reply->state = LPFC_RASLOG_STATE_STOPPED; spin_unlock_irq(&phba->hbalock); ras_reply->log_level = phba->ras_fwlog.fw_loglevel; ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize; ras_job_error: /* make error code available to userspace */ bsg_reply->result = rc; /* complete the job back to userspace */ if (!rc) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } /** * lpfc_bsg_set_ras_config: Set FW logging parameters * @job: fc_bsg_job to handle * * Set log-level parameters for FW-logging in host memory **/ static int lpfc_bsg_set_ras_config(struct bsg_job *job) { struct Scsi_Host *shost = fc_bsg_to_shost(job); struct lpfc_vport *vport = shost_priv(shost); struct lpfc_hba *phba = vport->phba; struct lpfc_bsg_set_ras_config_req *ras_req; struct fc_bsg_request *bsg_request = job->request; struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; struct fc_bsg_reply *bsg_reply = job->reply; uint8_t action = 0, log_level = 0; int rc = 0, action_status = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct lpfc_bsg_set_ras_config_req)) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "6182 Received RAS_LOG request " "below minimum size\n"); rc = -EINVAL; goto ras_job_error; } /* Check FW log status */ rc = lpfc_check_fwlog_support(phba); if (rc) goto ras_job_error; ras_req = (struct lpfc_bsg_set_ras_config_req *) bsg_request->rqst_data.h_vendor.vendor_cmd; action = ras_req->action; log_level = ras_req->log_level; if (action == LPFC_RASACTION_STOP_LOGGING) { /* Check if already disabled */ spin_lock_irq(&phba->hbalock); if (ras_fwlog->state != ACTIVE) { spin_unlock_irq(&phba->hbalock); rc = -ESRCH; goto ras_job_error; } spin_unlock_irq(&phba->hbalock); /* Disable logging */ lpfc_ras_stop_fwlog(phba); } else { /*action = LPFC_RASACTION_START_LOGGING*/ /* Even though FW-logging is active re-initialize * FW-logging with new log-level. Return status * "Logging already Running" to caller. **/ spin_lock_irq(&phba->hbalock); if (ras_fwlog->state != INACTIVE) action_status = -EINPROGRESS; spin_unlock_irq(&phba->hbalock); /* Enable logging */ rc = lpfc_sli4_ras_fwlog_init(phba, log_level, LPFC_RAS_ENABLE_LOGGING); if (rc) { rc = -EINVAL; goto ras_job_error; } /* Check if FW-logging is re-initialized */ if (action_status == -EINPROGRESS) rc = action_status; } ras_job_error: /* make error code available to userspace */ bsg_reply->result = rc; /* complete the job back to userspace */ if (!rc) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } /** * lpfc_bsg_get_ras_lwpd: Get log write position data * @job: fc_bsg_job to handle * * Get Offset/Wrap count of the log message written * in host memory **/ static int lpfc_bsg_get_ras_lwpd(struct bsg_job *job) { struct Scsi_Host *shost = fc_bsg_to_shost(job); struct lpfc_vport *vport = shost_priv(shost); struct lpfc_bsg_get_ras_lwpd *ras_reply; struct lpfc_hba *phba = vport->phba; struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; struct fc_bsg_reply *bsg_reply = job->reply; u32 *lwpd_ptr = NULL; int rc = 0; rc = lpfc_check_fwlog_support(phba); if (rc) goto ras_job_error; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct lpfc_bsg_ras_req)) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "6183 Received RAS_LOG request " "below minimum size\n"); rc = -EINVAL; goto ras_job_error; } ras_reply = (struct lpfc_bsg_get_ras_lwpd *) bsg_reply->reply_data.vendor_reply.vendor_rsp; if (!ras_fwlog->lwpd.virt) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "6193 Restart FW Logging\n"); rc = -EINVAL; goto ras_job_error; } /* Get lwpd offset */ lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt); ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff); /* Get wrap count */ ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff); ras_job_error: /* make error code available to userspace */ bsg_reply->result = rc; /* complete the job back to userspace */ if (!rc) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } /** * lpfc_bsg_get_ras_fwlog: Read FW log * @job: fc_bsg_job to handle * * Copy the FW log into the passed buffer. **/ static int lpfc_bsg_get_ras_fwlog(struct bsg_job *job) { struct Scsi_Host *shost = fc_bsg_to_shost(job); struct lpfc_vport *vport = shost_priv(shost); struct lpfc_hba *phba = vport->phba; struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_bsg_get_fwlog_req *ras_req; u32 rd_offset, rd_index, offset; void *src, *fwlog_buff; struct lpfc_ras_fwlog *ras_fwlog = NULL; struct lpfc_dmabuf *dmabuf, *next; int rc = 0; ras_fwlog = &phba->ras_fwlog; rc = lpfc_check_fwlog_support(phba); if (rc) goto ras_job_error; /* Logging to be stopped before reading */ spin_lock_irq(&phba->hbalock); if (ras_fwlog->state == ACTIVE) { spin_unlock_irq(&phba->hbalock); rc = -EINPROGRESS; goto ras_job_error; } spin_unlock_irq(&phba->hbalock); if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct lpfc_bsg_get_fwlog_req)) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "6184 Received RAS_LOG request " "below minimum size\n"); rc = -EINVAL; goto ras_job_error; } ras_req = (struct lpfc_bsg_get_fwlog_req *) bsg_request->rqst_data.h_vendor.vendor_cmd; rd_offset = ras_req->read_offset; /* Allocate memory to read fw log*/ fwlog_buff = vmalloc(ras_req->read_size); if (!fwlog_buff) { rc = -ENOMEM; goto ras_job_error; } rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE); offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE); list_for_each_entry_safe(dmabuf, next, &ras_fwlog->fwlog_buff_list, list) { if (dmabuf->buffer_tag < rd_index) continue; src = dmabuf->virt + offset; memcpy(fwlog_buff, src, ras_req->read_size); break; } bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, fwlog_buff, ras_req->read_size); vfree(fwlog_buff); ras_job_error: bsg_reply->result = rc; if (!rc) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } static int lpfc_get_trunk_info(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_trunk_info *event_reply; int rc = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2744 Received GET TRUNK _INFO request below " "minimum size\n"); rc = -EINVAL; goto job_error; } event_reply = (struct lpfc_trunk_info *) bsg_reply->reply_data.vendor_reply.vendor_rsp; if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2728 Received GET TRUNK _INFO reply below " "minimum size\n"); rc = -EINVAL; goto job_error; } if (event_reply == NULL) { rc = -EINVAL; goto job_error; } bsg_bf_set(lpfc_trunk_info_link_status, event_reply, (phba->link_state >= LPFC_LINK_UP) ? 1 : 0); bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply, (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0); bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply, (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0); bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply, (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0); bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply, (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0); bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply, bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)); bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply, bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)); bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply, bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)); bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply, bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)); event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000; event_reply->logical_speed = phba->sli4_hba.link_state.logical_speed / 1000; job_error: bsg_reply->result = rc; if (!rc) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } static int lpfc_get_cgnbuf_info(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; struct get_cgnbuf_info_req *cgnbuf_req; struct lpfc_cgn_info *cp; uint8_t *cgn_buff; int size, cinfosz; int rc = 0; if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct get_cgnbuf_info_req)) { rc = -ENOMEM; goto job_exit; } if (!phba->sli4_hba.pc_sli4_params.cmf) { rc = -ENOENT; goto job_exit; } if (!phba->cgn_i || !phba->cgn_i->virt) { rc = -ENOENT; goto job_exit; } cp = phba->cgn_i->virt; if (cp->cgn_info_version < LPFC_CGN_INFO_V3) { rc = -EPERM; goto job_exit; } cgnbuf_req = (struct get_cgnbuf_info_req *) bsg_request->rqst_data.h_vendor.vendor_cmd; /* For reset or size == 0 */ bsg_reply->reply_payload_rcv_len = 0; if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) { lpfc_init_congestion_stat(phba); goto job_exit; } /* We don't want to include the CRC at the end */ cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t); size = cgnbuf_req->read_size; if (!size) goto job_exit; if (size < cinfosz) { /* Just copy back what we can */ cinfosz = size; rc = -E2BIG; } /* Allocate memory to read congestion info */ cgn_buff = vmalloc(cinfosz); if (!cgn_buff) { rc = -ENOMEM; goto job_exit; } memcpy(cgn_buff, cp, cinfosz); bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, cgn_buff, cinfosz); vfree(cgn_buff); job_exit: bsg_reply->result = rc; if (!rc) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); else lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2724 GET CGNBUF error: %d\n", rc); return rc; } /** * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job * @job: fc_bsg_job to handle **/ static int lpfc_bsg_hst_vendor(struct bsg_job *job) { struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; int rc; switch (command) { case LPFC_BSG_VENDOR_SET_CT_EVENT: rc = lpfc_bsg_hba_set_event(job); break; case LPFC_BSG_VENDOR_GET_CT_EVENT: rc = lpfc_bsg_hba_get_event(job); break; case LPFC_BSG_VENDOR_SEND_MGMT_RESP: rc = lpfc_bsg_send_mgmt_rsp(job); break; case LPFC_BSG_VENDOR_DIAG_MODE: rc = lpfc_bsg_diag_loopback_mode(job); break; case LPFC_BSG_VENDOR_DIAG_MODE_END: rc = lpfc_sli4_bsg_diag_mode_end(job); break; case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK: rc = lpfc_bsg_diag_loopback_run(job); break; case LPFC_BSG_VENDOR_LINK_DIAG_TEST: rc = lpfc_sli4_bsg_link_diag_test(job); break; case LPFC_BSG_VENDOR_GET_MGMT_REV: rc = lpfc_bsg_get_dfc_rev(job); break; case LPFC_BSG_VENDOR_MBOX: rc = lpfc_bsg_mbox_cmd(job); break; case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: rc = lpfc_forced_link_speed(job); break; case LPFC_BSG_VENDOR_RAS_GET_LWPD: rc = lpfc_bsg_get_ras_lwpd(job); break; case LPFC_BSG_VENDOR_RAS_GET_FWLOG: rc = lpfc_bsg_get_ras_fwlog(job); break; case LPFC_BSG_VENDOR_RAS_GET_CONFIG: rc = lpfc_bsg_get_ras_config(job); break; case LPFC_BSG_VENDOR_RAS_SET_CONFIG: rc = lpfc_bsg_set_ras_config(job); break; case LPFC_BSG_VENDOR_GET_TRUNK_INFO: rc = lpfc_get_trunk_info(job); break; case LPFC_BSG_VENDOR_GET_CGNBUF_INFO: rc = lpfc_get_cgnbuf_info(job); break; default: rc = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; /* make error code available to userspace */ bsg_reply->result = rc; break; } return rc; } /** * lpfc_bsg_request - handle a bsg request from the FC transport * @job: bsg_job to handle **/ int lpfc_bsg_request(struct bsg_job *job) { struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; uint32_t msgcode; int rc; msgcode = bsg_request->msgcode; switch (msgcode) { case FC_BSG_HST_VENDOR: rc = lpfc_bsg_hst_vendor(job); break; case FC_BSG_RPT_ELS: rc = lpfc_bsg_rport_els(job); break; case FC_BSG_RPT_CT: rc = lpfc_bsg_send_mgmt_cmd(job); break; default: rc = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; /* make error code available to userspace */ bsg_reply->result = rc; break; } return rc; } /** * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport * @job: bsg_job that has timed out * * This function just aborts the job's IOCB. The aborted IOCB will return to * the waiting function which will handle passing the error back to userspace **/ int lpfc_bsg_timeout(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; struct lpfc_sli_ring *pring; struct bsg_job_data *dd_data; unsigned long flags; int rc = 0; LIST_HEAD(completions); struct lpfc_iocbq *check_iocb, *next_iocb; pring = lpfc_phba_elsring(phba); if (unlikely(!pring)) return -EIO; /* if job's driver data is NULL, the command completed or is in the * the process of completing. In this case, return status to request * so the timeout is retried. This avoids double completion issues * and the request will be pulled off the timer queue when the * command's completion handler executes. Otherwise, prevent the * command's completion handler from executing the job done callback * and continue processing to abort the outstanding the command. */ spin_lock_irqsave(&phba->ct_ev_lock, flags); dd_data = (struct bsg_job_data *)job->dd_data; if (dd_data) { dd_data->set_job = NULL; job->dd_data = NULL; } else { spin_unlock_irqrestore(&phba->ct_ev_lock, flags); return -EAGAIN; } switch (dd_data->type) { case TYPE_IOCB: /* Check to see if IOCB was issued to the port or not. If not, * remove it from the txq queue and call cancel iocbs. * Otherwise, call abort iotag */ cmdiocb = dd_data->context_un.iocb.cmdiocbq; spin_unlock_irqrestore(&phba->ct_ev_lock, flags); spin_lock_irqsave(&phba->hbalock, flags); /* make sure the I/O abort window is still open */ if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) { spin_unlock_irqrestore(&phba->hbalock, flags); return -EAGAIN; } list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, list) { if (check_iocb == cmdiocb) { list_move_tail(&check_iocb->list, &completions); break; } } if (list_empty(&completions)) lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL); spin_unlock_irqrestore(&phba->hbalock, flags); if (!list_empty(&completions)) { lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } break; case TYPE_EVT: spin_unlock_irqrestore(&phba->ct_ev_lock, flags); break; case TYPE_MBOX: /* Update the ext buf ctx state if needed */ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; spin_unlock_irqrestore(&phba->ct_ev_lock, flags); break; default: spin_unlock_irqrestore(&phba->ct_ev_lock, flags); break; } /* scsi transport fc fc_bsg_job_timeout expects a zero return code, * otherwise an error message will be displayed on the console * so always return success (zero) */ return rc; }
linux-master
drivers/scsi/lpfc/lpfc_bsg.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/lockdep.h> #include <linux/utsname.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc.h" #include "lpfc_scsi.h" #include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_debugfs.h" /* AlpaArray for assignment of scsid for scan-down and bind_method */ static uint8_t lpfcAlpaArray[] = { 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6, 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97, 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79, 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 }; static void lpfc_disc_timeout_handler(struct lpfc_vport *); static void lpfc_disc_flush_list(struct lpfc_vport *vport); static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); static int lpfc_fcf_inuse(struct lpfc_hba *); static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); static void lpfc_check_inactive_vmid(struct lpfc_hba *phba); static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba); static int lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp) { if (ndlp->nlp_fc4_type || ndlp->nlp_type & NLP_FABRIC) return 1; return 0; } /* The source of a terminate rport I/O is either a dev_loss_tmo * event or a call to fc_remove_host. While the rport should be * valid during these downcalls, the transport can call twice * in a single event. This routine provides somoe protection * as the NDLP isn't really free, just released to the pool. */ static int lpfc_rport_invalid(struct fc_rport *rport) { struct lpfc_rport_data *rdata; struct lpfc_nodelist *ndlp; if (!rport) { pr_err("**** %s: NULL rport, exit.\n", __func__); return -EINVAL; } rdata = rport->dd_data; if (!rdata) { pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n", __func__, rport, rport->scsi_target_id); return -EINVAL; } ndlp = rdata->pnode; if (!rdata->pnode) { pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n", __func__, rport, rport->scsi_target_id); return -EINVAL; } if (!ndlp->vport) { pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px " "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport, rport->scsi_target_id); return -EINVAL; } return 0; } void lpfc_terminate_rport_io(struct fc_rport *rport) { struct lpfc_rport_data *rdata; struct lpfc_nodelist *ndlp; struct lpfc_vport *vport; if (lpfc_rport_invalid(rport)) return; rdata = rport->dd_data; ndlp = rdata->pnode; vport = ndlp->vport; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport terminate: sid:x%x did:x%x flg:x%x", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); if (ndlp->nlp_sid != NLP_NO_SID) lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); } /* * This function will be called when dev_loss_tmo fire. */ void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) { struct lpfc_nodelist *ndlp; struct lpfc_vport *vport; struct lpfc_hba *phba; struct lpfc_work_evt *evtp; unsigned long iflags; ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; if (!ndlp) return; vport = ndlp->vport; phba = vport->phba; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport devlosscb: sid:x%x did:x%x flg:x%x", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "3181 dev_loss_callbk x%06x, rport x%px flg x%x " "load_flag x%x refcnt %u state %d xpt x%x\n", ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, vport->load_flag, kref_read(&ndlp->kref), ndlp->nlp_state, ndlp->fc4_xpt_flags); /* Don't schedule a worker thread event if the vport is going down. */ if (vport->load_flag & FC_UNLOADING) { spin_lock_irqsave(&ndlp->lock, iflags); ndlp->rport = NULL; /* The scsi_transport is done with the rport so lpfc cannot * call to unregister. Remove the scsi transport reference * and clean up the SCSI transport node details. */ if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) { ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; /* NVME transport-registered rports need the * NLP_XPT_REGD flag to complete an unregister. */ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_nlp_put(ndlp); spin_lock_irqsave(&ndlp->lock, iflags); } /* Only 1 thread can drop the initial node reference. If * another thread has set NLP_DROPPED, this thread is done. */ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) && !(ndlp->nlp_flag & NLP_DROPPED)) { ndlp->nlp_flag |= NLP_DROPPED; spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_nlp_put(ndlp); return; } spin_unlock_irqrestore(&ndlp->lock, iflags); return; } if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) return; if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6789 rport name %llx != node port name %llx", rport->port_name, wwn_to_u64(ndlp->nlp_portname.u.wwn)); evtp = &ndlp->dev_loss_evt; if (!list_empty(&evtp->evt_listp)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6790 rport name %llx dev_loss_evt pending\n", rport->port_name); return; } spin_lock_irqsave(&ndlp->lock, iflags); ndlp->nlp_flag |= NLP_IN_DEV_LOSS; /* If there is a PLOGI in progress, and we are in a * NLP_NPR_2B_DISC state, don't turn off the flag. */ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; /* * The backend does not expect any more calls associated with this * rport. Remove the association between rport and ndlp. */ ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL; ndlp->rport = NULL; spin_unlock_irqrestore(&ndlp->lock, iflags); if (phba->worker_thread) { /* We need to hold the node by incrementing the reference * count until this queued work is done */ evtp->evt_arg1 = lpfc_nlp_get(ndlp); spin_lock_irqsave(&phba->hbalock, iflags); if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); lpfc_worker_wake_up(phba); } spin_unlock_irqrestore(&phba->hbalock, iflags); } else { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "3188 worker thread is stopped %s x%06x, " " rport x%px flg x%x load_flag x%x refcnt " "%d\n", __func__, ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, vport->load_flag, kref_read(&ndlp->kref)); if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { spin_lock_irqsave(&ndlp->lock, iflags); /* Node is in dev loss. No further transaction. */ ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } } return; } /** * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport * @vport: Pointer to vport context object. * * This function checks for idle VMID entries related to a particular vport. If * found unused/idle, free them accordingly. **/ static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport) { u16 keep; u32 difftime = 0, r, bucket; u64 *lta; int cpu; struct lpfc_vmid *vmp; write_lock(&vport->vmid_lock); if (!vport->cur_vmid_cnt) goto out; /* iterate through the table */ hash_for_each(vport->hash_table, bucket, vmp, hnode) { keep = 0; if (vmp->flag & LPFC_VMID_REGISTERED) { /* check if the particular VMID is in use */ /* for all available per cpu variable */ for_each_possible_cpu(cpu) { /* if last access time is less than timeout */ lta = per_cpu_ptr(vmp->last_io_time, cpu); if (!lta) continue; difftime = (jiffies) - (*lta); if ((vport->vmid_inactivity_timeout * JIFFIES_PER_HR) > difftime) { keep = 1; break; } } /* if none of the cpus have been used by the vm, */ /* remove the entry if already registered */ if (!keep) { /* mark the entry for deregistration */ vmp->flag = LPFC_VMID_DE_REGISTER; write_unlock(&vport->vmid_lock); if (vport->vmid_priority_tagging) r = lpfc_vmid_uvem(vport, vmp, false); else r = lpfc_vmid_cmd(vport, SLI_CTAS_DAPP_IDENT, vmp); /* decrement number of active vms and mark */ /* entry in slot as free */ write_lock(&vport->vmid_lock); if (!r) { struct lpfc_vmid *ht = vmp; vport->cur_vmid_cnt--; ht->flag = LPFC_VMID_SLOT_FREE; free_percpu(ht->last_io_time); ht->last_io_time = NULL; hash_del(&ht->hnode); } } } } out: write_unlock(&vport->vmid_lock); } /** * lpfc_check_inactive_vmid - VMID inactivity checker * @phba: Pointer to hba context object. * * This function is called from the worker thread to determine if an entry in * the VMID table can be released since there was no I/O activity seen from that * particular VM for the specified time. When this happens, the entry in the * table is released and also the resources on the switch cleared. **/ static void lpfc_check_inactive_vmid(struct lpfc_hba *phba) { struct lpfc_vport *vport; struct lpfc_vport **vports; int i; vports = lpfc_create_vport_work_array(phba); if (!vports) return; for (i = 0; i <= phba->max_vports; i++) { if ((!vports[i]) && (i == 0)) vport = phba->pport; else vport = vports[i]; if (!vport) break; lpfc_check_inactive_vmid_one(vport); } lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss * @vport: Pointer to vport object. * @ndlp: Pointer to remote node object. * * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of * node during dev_loss_tmo processing, then this function restores the nlp_put * kref decrement from lpfc_dev_loss_tmo_handler. **/ void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { unsigned long iflags; spin_lock_irqsave(&ndlp->lock, iflags); if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) { ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS; spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_nlp_get(ndlp); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8438 Devloss timeout reversed on DID x%x " "refcnt %d ndlp %p flag x%x " "port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); spin_lock_irqsave(&ndlp->lock, iflags); } spin_unlock_irqrestore(&ndlp->lock, iflags); } /** * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler * @ndlp: Pointer to remote node object. * * This function is called from the worker thread when devloss timeout timer * expires. For SLI4 host, this routine shall return 1 when at lease one * remote node, including this @ndlp, is still in use of FCF; otherwise, this * routine shall return 0 when there is no remote node is still in use of FCF * when devloss timeout happened to this @ndlp. **/ static int lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) { struct lpfc_vport *vport; struct lpfc_hba *phba; uint8_t *name; int warn_on = 0; int fcf_inuse = 0; bool recovering = false; struct fc_vport *fc_vport = NULL; unsigned long iflags; vport = ndlp->vport; name = (uint8_t *)&ndlp->nlp_portname; phba = vport->phba; if (phba->sli_rev == LPFC_SLI_REV4) fcf_inuse = lpfc_fcf_inuse(phba); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport devlosstmo:did:x%x type:x%x id:x%x", ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n", __func__, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); /* If the driver is recovering the rport, ignore devloss. */ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0284 Devloss timeout Ignored on " "WWPN %x:%x:%x:%x:%x:%x:%x:%x " "NPort x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); spin_lock_irqsave(&ndlp->lock, iflags); ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; spin_unlock_irqrestore(&ndlp->lock, iflags); return fcf_inuse; } /* Fabric nodes are done. */ if (ndlp->nlp_type & NLP_FABRIC) { spin_lock_irqsave(&ndlp->lock, iflags); /* The driver has to account for a race between any fabric * node that's in recovery when dev_loss_tmo expires. When this * happens, the driver has to allow node recovery. */ switch (ndlp->nlp_DID) { case Fabric_DID: fc_vport = vport->fc_vport; if (fc_vport) { /* NPIV path. */ if (fc_vport->vport_state == FC_VPORT_INITIALIZING) recovering = true; } else { /* Physical port path. */ if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) recovering = true; } break; case Fabric_Cntl_DID: if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) recovering = true; break; case FDMI_DID: fallthrough; case NameServer_DID: if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) recovering = true; break; default: /* Ensure the nlp_DID at least has the correct prefix. * The fabric domain controller's last three nibbles * vary so we handle it in the default case. */ if (ndlp->nlp_DID & Fabric_DID_MASK) { if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) recovering = true; } break; } spin_unlock_irqrestore(&ndlp->lock, iflags); /* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing * the following lpfc_nlp_put is necessary after fabric node is * recovered. */ if (recovering) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8436 Devloss timeout marked on " "DID x%x refcnt %d ndlp %p " "flag x%x port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); spin_lock_irqsave(&ndlp->lock, iflags); ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS; spin_unlock_irqrestore(&ndlp->lock, iflags); } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { /* Fabric node fully recovered before this dev_loss_tmo * queue work is processed. Thus, ignore the * dev_loss_tmo event. */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8437 Devloss timeout ignored on " "DID x%x refcnt %d ndlp %p " "flag x%x port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); return fcf_inuse; } spin_lock_irqsave(&ndlp->lock, iflags); ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_nlp_put(ndlp); return fcf_inuse; } if (ndlp->nlp_sid != NLP_NO_SID) { warn_on = 1; lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); } if (warn_on) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0203 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " "NPort x%06x Data: x%x x%x x%x refcnt %d\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi, kref_read(&ndlp->kref)); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, "0204 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " "NPort x%06x Data: x%x x%x x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } spin_lock_irqsave(&ndlp->lock, iflags); ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; spin_unlock_irqrestore(&ndlp->lock, iflags); /* If we are devloss, but we are in the process of rediscovering the * ndlp, don't issue a NLP_EVT_DEVICE_RM event. */ if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { return fcf_inuse; } if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); return fcf_inuse; } static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba) { struct lpfc_vport *vport; struct lpfc_vport **vports; int i; vports = lpfc_create_vport_work_array(phba); if (!vports) return; for (i = 0; i <= phba->max_vports; i++) { if ((!vports[i]) && (i == 0)) vport = phba->pport; else vport = vports[i]; if (!vport) break; if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) { if (!lpfc_issue_els_qfpa(vport)) vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA; } } lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler * @phba: Pointer to hba context object. * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. * @nlp_did: remote node identifer with devloss timeout. * * This function is called from the worker thread after invoking devloss * timeout handler and releasing the reference count for the ndlp with * which the devloss timeout was handled for SLI4 host. For the devloss * timeout of the last remote node which had been in use of FCF, when this * routine is invoked, it shall be guaranteed that none of the remote are * in-use of FCF. When devloss timeout to the last remote using the FCF, * if the FIP engine is neither in FCF table scan process nor roundrobin * failover process, the in-use FCF shall be unregistered. If the FIP * engine is in FCF discovery process, the devloss timeout state shall * be set for either the FCF table scan process or roundrobin failover * process to unregister the in-use FCF. **/ static void lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, uint32_t nlp_did) { /* If devloss timeout happened to a remote node when FCF had no * longer been in-use, do nothing. */ if (!fcf_inuse) return; if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { if (phba->hba_flag & HBA_DEVLOSS_TMO) { spin_unlock_irq(&phba->hbalock); return; } phba->hba_flag |= HBA_DEVLOSS_TMO; lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2847 Last remote node (x%x) using " "FCF devloss tmo\n", nlp_did); } if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2868 Devloss tmo to FCF rediscovery " "in progress\n"); return; } if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2869 Devloss tmo to idle FIP engine, " "unreg in-use FCF and rescan.\n"); /* Unregister in-use FCF and rescan */ lpfc_unregister_fcf_rescan(phba); return; } spin_unlock_irq(&phba->hbalock); if (phba->hba_flag & FCF_TS_INPROG) lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2870 FCF table scan in progress\n"); if (phba->hba_flag & FCF_RR_INPROG) lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2871 FLOGI roundrobin FCF failover " "in progress\n"); } lpfc_unregister_unused_fcf(phba); } /** * lpfc_alloc_fast_evt - Allocates data structure for posting event * @phba: Pointer to hba context object. * * This function is called from the functions which need to post * events from interrupt context. This function allocates data * structure required for posting event. It also keeps track of * number of events pending and prevent event storm when there are * too many events. **/ struct lpfc_fast_path_event * lpfc_alloc_fast_evt(struct lpfc_hba *phba) { struct lpfc_fast_path_event *ret; /* If there are lot of fast event do not exhaust memory due to this */ if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) return NULL; ret = kzalloc(sizeof(struct lpfc_fast_path_event), GFP_ATOMIC); if (ret) { atomic_inc(&phba->fast_event_count); INIT_LIST_HEAD(&ret->work_evt.evt_listp); ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; } return ret; } /** * lpfc_free_fast_evt - Frees event data structure * @phba: Pointer to hba context object. * @evt: Event object which need to be freed. * * This function frees the data structure required for posting * events. **/ void lpfc_free_fast_evt(struct lpfc_hba *phba, struct lpfc_fast_path_event *evt) { atomic_dec(&phba->fast_event_count); kfree(evt); } /** * lpfc_send_fastpath_evt - Posts events generated from fast path * @phba: Pointer to hba context object. * @evtp: Event data structure. * * This function is called from worker thread, when the interrupt * context need to post an event. This function posts the event * to fc transport netlink interface. **/ static void lpfc_send_fastpath_evt(struct lpfc_hba *phba, struct lpfc_work_evt *evtp) { unsigned long evt_category, evt_sub_category; struct lpfc_fast_path_event *fast_evt_data; char *evt_data; uint32_t evt_data_size; struct Scsi_Host *shost; fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, work_evt); evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; evt_sub_category = (unsigned long) fast_evt_data->un. fabric_evt.subcategory; shost = lpfc_shost_from_vport(fast_evt_data->vport); if (evt_category == FC_REG_FABRIC_EVENT) { if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { evt_data = (char *) &fast_evt_data->un.read_check_error; evt_data_size = sizeof(fast_evt_data->un. read_check_error); } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { evt_data = (char *) &fast_evt_data->un.fabric_evt; evt_data_size = sizeof(fast_evt_data->un.fabric_evt); } else { lpfc_free_fast_evt(phba, fast_evt_data); return; } } else if (evt_category == FC_REG_SCSI_EVENT) { switch (evt_sub_category) { case LPFC_EVENT_QFULL: case LPFC_EVENT_DEVBSY: evt_data = (char *) &fast_evt_data->un.scsi_evt; evt_data_size = sizeof(fast_evt_data->un.scsi_evt); break; case LPFC_EVENT_CHECK_COND: evt_data = (char *) &fast_evt_data->un.check_cond_evt; evt_data_size = sizeof(fast_evt_data->un. check_cond_evt); break; case LPFC_EVENT_VARQUEDEPTH: evt_data = (char *) &fast_evt_data->un.queue_depth_evt; evt_data_size = sizeof(fast_evt_data->un. queue_depth_evt); break; default: lpfc_free_fast_evt(phba, fast_evt_data); return; } } else { lpfc_free_fast_evt(phba, fast_evt_data); return; } if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) fc_host_post_vendor_event(shost, fc_get_event_number(), evt_data_size, evt_data, LPFC_NL_VENDOR_ID); lpfc_free_fast_evt(phba, fast_evt_data); return; } static void lpfc_work_list_done(struct lpfc_hba *phba) { struct lpfc_work_evt *evtp = NULL; struct lpfc_nodelist *ndlp; int free_evt; int fcf_inuse; uint32_t nlp_did; bool hba_pci_err; spin_lock_irq(&phba->hbalock); while (!list_empty(&phba->work_list)) { list_remove_head((&phba->work_list), evtp, typeof(*evtp), evt_listp); spin_unlock_irq(&phba->hbalock); hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); free_evt = 1; switch (evtp->evt) { case LPFC_EVT_ELS_RETRY: ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); if (!hba_pci_err) { lpfc_els_retry_delay_handler(ndlp); free_evt = 0; /* evt is part of ndlp */ } /* decrement the node reference count held * for this queued work */ lpfc_nlp_put(ndlp); break; case LPFC_EVT_DEV_LOSS: ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); free_evt = 0; /* decrement the node reference count held for * this queued work */ nlp_did = ndlp->nlp_DID; lpfc_nlp_put(ndlp); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_post_dev_loss_tmo_handler(phba, fcf_inuse, nlp_did); break; case LPFC_EVT_RECOVER_PORT: ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); if (!hba_pci_err) { lpfc_sli_abts_recover_port(ndlp->vport, ndlp); free_evt = 0; } /* decrement the node reference count held for * this queued work */ lpfc_nlp_put(ndlp); break; case LPFC_EVT_ONLINE: if (phba->link_state < LPFC_LINK_DOWN) *(int *) (evtp->evt_arg1) = lpfc_online(phba); else *(int *) (evtp->evt_arg1) = 0; complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_OFFLINE_PREP: if (phba->link_state >= LPFC_LINK_DOWN) lpfc_offline_prep(phba, LPFC_MBX_WAIT); *(int *)(evtp->evt_arg1) = 0; complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_OFFLINE: lpfc_offline(phba); lpfc_sli_brdrestart(phba); *(int *)(evtp->evt_arg1) = lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY); lpfc_unblock_mgmt_io(phba); complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_WARM_START: lpfc_offline(phba); lpfc_reset_barrier(phba); lpfc_sli_brdreset(phba); lpfc_hba_down_post(phba); *(int *)(evtp->evt_arg1) = lpfc_sli_brdready(phba, HS_MBRDY); lpfc_unblock_mgmt_io(phba); complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_KILL: lpfc_offline(phba); *(int *)(evtp->evt_arg1) = (phba->pport->stopped) ? 0 : lpfc_sli_brdkill(phba); lpfc_unblock_mgmt_io(phba); complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_FASTPATH_MGMT_EVT: lpfc_send_fastpath_evt(phba, evtp); free_evt = 0; break; case LPFC_EVT_RESET_HBA: if (!(phba->pport->load_flag & FC_UNLOADING)) lpfc_reset_hba(phba); break; } if (free_evt) kfree(evtp); spin_lock_irq(&phba->hbalock); } spin_unlock_irq(&phba->hbalock); } static void lpfc_work_done(struct lpfc_hba *phba) { struct lpfc_sli_ring *pring; uint32_t ha_copy, status, control, work_port_events; struct lpfc_vport **vports; struct lpfc_vport *vport; int i; bool hba_pci_err; hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); spin_lock_irq(&phba->hbalock); ha_copy = phba->work_ha; phba->work_ha = 0; spin_unlock_irq(&phba->hbalock); if (hba_pci_err) ha_copy = 0; /* First, try to post the next mailbox command to SLI4 device */ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err) lpfc_sli4_post_async_mbox(phba); if (ha_copy & HA_ERATT) { /* Handle the error attention event */ lpfc_handle_eratt(phba); if (phba->fw_dump_cmpl) { complete(phba->fw_dump_cmpl); phba->fw_dump_cmpl = NULL; } } if (ha_copy & HA_MBATT) lpfc_sli_handle_mb_event(phba); if (ha_copy & HA_LATT) lpfc_handle_latt(phba); /* Handle VMID Events */ if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) { if (phba->pport->work_port_events & WORKER_CHECK_VMID_ISSUE_QFPA) { lpfc_check_vmid_qfpa_issue(phba); phba->pport->work_port_events &= ~WORKER_CHECK_VMID_ISSUE_QFPA; } if (phba->pport->work_port_events & WORKER_CHECK_INACTIVE_VMID) { lpfc_check_inactive_vmid(phba); phba->pport->work_port_events &= ~WORKER_CHECK_INACTIVE_VMID; } } /* Process SLI4 events */ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { if (phba->hba_flag & HBA_RRQ_ACTIVE) lpfc_handle_rrq_active(phba); if (phba->hba_flag & ELS_XRI_ABORT_EVENT) lpfc_sli4_els_xri_abort_event_proc(phba); if (phba->hba_flag & ASYNC_EVENT) lpfc_sli4_async_event_proc(phba); if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; spin_unlock_irq(&phba->hbalock); lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); } if (phba->fcf.fcf_flag & FCF_REDISC_EVT) lpfc_sli4_fcf_redisc_event_proc(phba); } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports; i++) { /* * We could have no vports in array if unloading, so if * this happens then just use the pport */ if (vports[i] == NULL && i == 0) vport = phba->pport; else vport = vports[i]; if (vport == NULL) break; spin_lock_irq(&vport->work_port_lock); work_port_events = vport->work_port_events; vport->work_port_events &= ~work_port_events; spin_unlock_irq(&vport->work_port_lock); if (hba_pci_err) continue; if (work_port_events & WORKER_DISC_TMO) lpfc_disc_timeout_handler(vport); if (work_port_events & WORKER_ELS_TMO) lpfc_els_timeout_handler(vport); if (work_port_events & WORKER_HB_TMO) lpfc_hb_timeout_handler(phba); if (work_port_events & WORKER_MBOX_TMO) lpfc_mbox_timeout_handler(phba); if (work_port_events & WORKER_FABRIC_BLOCK_TMO) lpfc_unblock_fabric_iocbs(phba); if (work_port_events & WORKER_RAMP_DOWN_QUEUE) lpfc_ramp_down_queue_handler(phba); if (work_port_events & WORKER_DELAYED_DISC_TMO) lpfc_delayed_disc_timeout_handler(vport); } lpfc_destroy_vport_work_array(phba, vports); pring = lpfc_phba_elsring(phba); status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); status >>= (4*LPFC_ELS_RING); if (pring && (status & HA_RXMASK || pring->flag & LPFC_DEFERRED_RING_EVENT || phba->hba_flag & HBA_SP_QUEUE_EVT)) { if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Preserve legacy behavior. */ if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) set_bit(LPFC_DATA_READY, &phba->data_flags); } else { /* Driver could have abort request completed in queue * when link goes down. Allow for this transition. */ if (phba->link_state >= LPFC_LINK_DOWN || phba->link_flag & LS_MDS_LOOPBACK) { pring->flag &= ~LPFC_DEFERRED_RING_EVENT; lpfc_sli_handle_slow_ring_event(phba, pring, (status & HA_RXMASK)); } } if (phba->sli_rev == LPFC_SLI_REV4) lpfc_drain_txq(phba); /* * Turn on Ring interrupts */ if (phba->sli_rev <= LPFC_SLI_REV3) { spin_lock_irq(&phba->hbalock); control = readl(phba->HCregaddr); if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { lpfc_debugfs_slow_ring_trc(phba, "WRK Enable ring: cntl:x%x hacopy:x%x", control, ha_copy, 0); control |= (HC_R0INT_ENA << LPFC_ELS_RING); writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } else { lpfc_debugfs_slow_ring_trc(phba, "WRK Ring ok: cntl:x%x hacopy:x%x", control, ha_copy, 0); } spin_unlock_irq(&phba->hbalock); } } lpfc_work_list_done(phba); } int lpfc_do_work(void *p) { struct lpfc_hba *phba = p; int rc; set_user_nice(current, MIN_NICE); current->flags |= PF_NOFREEZE; phba->data_flags = 0; while (!kthread_should_stop()) { /* wait and check worker queue activities */ rc = wait_event_interruptible(phba->work_waitq, (test_and_clear_bit(LPFC_DATA_READY, &phba->data_flags) || kthread_should_stop())); /* Signal wakeup shall terminate the worker thread */ if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0433 Wakeup on signal: rc=x%x\n", rc); break; } /* Attend pending lpfc data processing */ lpfc_work_done(phba); } phba->worker_thread = NULL; lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "0432 Worker thread stopped.\n"); return 0; } /* * This is only called to handle FC worker events. Since this a rare * occurrence, we allocate a struct lpfc_work_evt structure here instead of * embedding it in the IOCB. */ int lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, uint32_t evt) { struct lpfc_work_evt *evtp; unsigned long flags; /* * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will * be queued to worker thread for processing */ evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC); if (!evtp) return 0; evtp->evt_arg1 = arg1; evtp->evt_arg2 = arg2; evtp->evt = evt; spin_lock_irqsave(&phba->hbalock, flags); list_add_tail(&evtp->evt_listp, &phba->work_list); spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_worker_wake_up(phba); return 1; } void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp, *next_ndlp; list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || ((vport->port_type == LPFC_NPIV_PORT) && ((ndlp->nlp_DID == NameServer_DID) || (ndlp->nlp_DID == FDMI_DID) || (ndlp->nlp_DID == Fabric_Cntl_DID)))) lpfc_unreg_rpi(vport, ndlp); /* Leave Fabric nodes alone on link down */ if ((phba->sli_rev < LPFC_SLI_REV4) && (!remove && ndlp->nlp_type & NLP_FABRIC)) continue; /* Notify transport of connectivity loss to trigger cleanup. */ if (phba->nvmet_support && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) lpfc_nvmet_invalidate_host(phba, ndlp); lpfc_disc_state_machine(vport, ndlp, NULL, remove ? NLP_EVT_DEVICE_RM : NLP_EVT_DEVICE_RECOVERY); } if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_unreg_all_rpis(vport); lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); } } void lpfc_port_link_failure(struct lpfc_vport *vport) { lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); /* Cleanup any outstanding received buffers */ lpfc_cleanup_rcv_buffers(vport); /* Cleanup any outstanding RSCN activity */ lpfc_els_flush_rscn(vport); /* Cleanup any outstanding ELS commands */ lpfc_els_flush_cmd(vport); lpfc_cleanup_rpis(vport, 0); /* Turn off discovery timer if its running */ lpfc_can_disctmo(vport); } void lpfc_linkdown_port(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Link Down: state:x%x rtry:x%x flg:x%x", vport->port_state, vport->fc_ns_retry, vport->fc_flag); lpfc_port_link_failure(vport); /* Stop delayed Nport discovery */ spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_DISC_DELAYED; spin_unlock_irq(shost->host_lock); del_timer_sync(&vport->delayed_disc_tmo); if (phba->sli_rev == LPFC_SLI_REV4 && vport->port_type == LPFC_PHYSICAL_PORT && phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) { /* Assume success on link up */ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; } } int lpfc_linkdown(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_vport **vports; LPFC_MBOXQ_t *mb; int i; int offline; if (phba->link_state == LPFC_LINK_DOWN) return 0; /* Block all SCSI stack I/Os */ lpfc_scsi_dev_block(phba); offline = pci_channel_offline(phba->pcidev); phba->defer_flogi_acc_flag = false; /* Clear external loopback plug detected flag */ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); spin_unlock_irq(&phba->hbalock); if (phba->link_state > LPFC_LINK_DOWN) { phba->link_state = LPFC_LINK_DOWN; if (phba->sli4_hba.conf_trunk) { phba->trunk_link.link0.state = 0; phba->trunk_link.link1.state = 0; phba->trunk_link.link2.state = 0; phba->trunk_link.link3.state = 0; phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN; phba->sli4_hba.link_state.logical_speed = LPFC_LINK_SPEED_UNKNOWN; } spin_lock_irq(shost->host_lock); phba->pport->fc_flag &= ~FC_LBIT; spin_unlock_irq(shost->host_lock); } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { /* Issue a LINK DOWN event to all nodes */ lpfc_linkdown_port(vports[i]); vports[i]->fc_myDID = 0; if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { if (phba->nvmet_support) lpfc_nvmet_update_targetport(phba); else lpfc_nvme_update_localport(vports[i]); } } } lpfc_destroy_vport_work_array(phba, vports); /* Clean up any SLI3 firmware default rpi's */ if (phba->sli_rev > LPFC_SLI_REV3 || offline) goto skip_unreg_did; mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mb) { lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb); mb->vport = vport; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(mb, phba->mbox_mem_pool); } } skip_unreg_did: /* Setup myDID for link up if we are in pt2pt mode */ if (phba->pport->fc_flag & FC_PT2PT) { mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mb) { lpfc_config_link(phba, mb); mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mb->vport = vport; if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(mb, phba->mbox_mem_pool); } } spin_lock_irq(shost->host_lock); phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); phba->pport->rcv_flogi_cnt = 0; spin_unlock_irq(shost->host_lock); } return 0; } static void lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { /* On Linkup its safe to clean up the ndlp * from Fabric connections. */ if (ndlp->nlp_DID != Fabric_DID) lpfc_unreg_rpi(vport, ndlp); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { /* Fail outstanding IO now since device is * marked for PLOGI. */ lpfc_unreg_rpi(vport, ndlp); } } } static void lpfc_linkup_port(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; if ((vport->load_flag & FC_UNLOADING) != 0) return; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Link Up: top:x%x speed:x%x flg:x%x", phba->fc_topology, phba->fc_linkspeed, phba->link_flag); /* If NPIV is not enabled, only bring the physical port up */ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (vport != phba->pport)) return; if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); spin_lock_irq(shost->host_lock); if (phba->defer_flogi_acc_flag) vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); else vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); vport->fc_flag |= FC_NDISC_ACTIVE; vport->fc_ns_retry = 0; spin_unlock_irq(shost->host_lock); lpfc_setup_fdmi_mask(vport); lpfc_linkup_cleanup_nodes(vport); } static int lpfc_linkup(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); phba->link_state = LPFC_LINK_UP; /* Unblock fabric iocbs if they are blocked */ clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); del_timer_sync(&phba->fabric_block_timer); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); /* Clear the pport flogi counter in case the link down was * absorbed without an ACQE. No lock here - in worker thread * and discovery is synchronized. */ spin_lock_irq(shost->host_lock); phba->pport->rcv_flogi_cnt = 0; spin_unlock_irq(shost->host_lock); /* reinitialize initial HBA flag */ phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL); return 0; } /* * This routine handles processing a CLEAR_LA mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. SLI3 only. */ static void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_sli *psli = &phba->sli; MAILBOX_t *mb = &pmb->u.mb; uint32_t control; /* Since we don't do discovery right now, turn these off here */ psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT; psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT; /* Check for error */ if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0320 CLEAR_LA mbxStatus error x%x hba " "state x%x\n", mb->mbxStatus, vport->port_state); phba->link_state = LPFC_HBA_ERROR; goto out; } if (vport->port_type == LPFC_PHYSICAL_PORT) phba->link_state = LPFC_HBA_READY; spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); mempool_free(pmb, phba->mbox_mem_pool); return; out: /* Device Discovery completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0225 Device Discovery completes\n"); mempool_free(pmb, phba->mbox_mem_pool); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_ABORT_DISCOVERY; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); /* turn on Link Attention interrupts */ spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); return; } void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; LPFC_MBOXQ_t *sparam_mb; u16 status = pmb->u.mb.mbxStatus; int rc; mempool_free(pmb, phba->mbox_mem_pool); if (status) goto out; /* don't perform discovery for SLI4 loopback diagnostic test */ if ((phba->sli_rev == LPFC_SLI_REV4) && !(phba->hba_flag & HBA_FCOE_MODE) && (phba->link_flag & LS_LOOPBACK_MODE)) return; if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && vport->fc_flag & FC_PUBLIC_LOOP && !(vport->fc_flag & FC_LBIT)) { /* Need to wait for FAN - use discovery timer * for timeout. port_state is identically * LPFC_LOCAL_CFG_LINK while waiting for FAN */ lpfc_set_disctmo(vport); return; } /* Start discovery by sending a FLOGI. port_state is identically * LPFC_FLOGI while waiting for FLOGI cmpl. */ if (vport->port_state != LPFC_FLOGI) { /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if * bb-credit recovery is in place. */ if (phba->bbcredit_support && phba->cfg_enable_bbcr && !(phba->link_flag & LS_LOOPBACK_MODE)) { sparam_mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!sparam_mb) goto sparam_out; rc = lpfc_read_sparam(phba, sparam_mb, 0); if (rc) { mempool_free(sparam_mb, phba->mbox_mem_pool); goto sparam_out; } sparam_mb->vport = vport; sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_mbox_rsrc_cleanup(phba, sparam_mb, MBOX_THD_UNLOCKED); goto sparam_out; } phba->hba_flag |= HBA_DEFER_FLOGI; } else { lpfc_initial_flogi(vport); } } else { if (vport->fc_flag & FC_PT2PT) lpfc_disc_start(vport); } return; out: lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n", status, vport->port_state); sparam_out: lpfc_linkdown(phba); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0200 CONFIG_LINK bad hba state x%x\n", vport->port_state); lpfc_issue_clear_la(phba, vport); return; } /** * lpfc_sli4_clear_fcf_rr_bmask * @phba: pointer to the struct lpfc_hba for this port. * This fucnction resets the round robin bit mask and clears the * fcf priority list. The list deletions are done while holding the * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared * from the lpfc_fcf_pri record. **/ void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba) { struct lpfc_fcf_pri *fcf_pri; struct lpfc_fcf_pri *next_fcf_pri; memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(fcf_pri, next_fcf_pri, &phba->fcf.fcf_pri_list, list) { list_del_init(&fcf_pri->list); fcf_pri->fcf_rec.flag = 0; } spin_unlock_irq(&phba->hbalock); } static void lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; if (mboxq->u.mb.mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2017 REG_FCFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); goto fail_out; } /* Start FCoE discovery by sending a FLOGI. */ phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); /* Set the FCFI registered flag */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_REGISTERED; spin_unlock_irq(&phba->hbalock); /* If there is a pending FCoE event, restart FCF table scan. */ if ((!(phba->hba_flag & FCF_RR_INPROG)) && lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) goto fail_out; /* Mark successful completion of FCF table scan */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); phba->hba_flag &= ~FCF_TS_INPROG; if (vport->port_state != LPFC_FLOGI) { phba->hba_flag |= FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_issue_init_vfi(vport); goto out; } spin_unlock_irq(&phba->hbalock); goto out; fail_out: spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); out: mempool_free(mboxq, phba->mbox_mem_pool); } /** * lpfc_fab_name_match - Check if the fcf fabric name match. * @fab_name: pointer to fabric name. * @new_fcf_record: pointer to fcf record. * * This routine compare the fcf record's fabric name with provided * fabric name. If the fabric name are identical this function * returns 1 else return 0. **/ static uint32_t lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) { if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) return 0; if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) return 0; if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) return 0; if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) return 0; if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) return 0; if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) return 0; if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) return 0; if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)) return 0; return 1; } /** * lpfc_sw_name_match - Check if the fcf switch name match. * @sw_name: pointer to switch name. * @new_fcf_record: pointer to fcf record. * * This routine compare the fcf record's switch name with provided * switch name. If the switch name are identical this function * returns 1 else return 0. **/ static uint32_t lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) { if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) return 0; if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) return 0; if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) return 0; if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) return 0; if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) return 0; if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) return 0; if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) return 0; if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)) return 0; return 1; } /** * lpfc_mac_addr_match - Check if the fcf mac address match. * @mac_addr: pointer to mac address. * @new_fcf_record: pointer to fcf record. * * This routine compare the fcf record's mac address with HBA's * FCF mac address. If the mac addresses are identical this function * returns 1 else return 0. **/ static uint32_t lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record) { if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) return 0; if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) return 0; if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) return 0; if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) return 0; if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) return 0; if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record)) return 0; return 1; } static bool lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id) { return (curr_vlan_id == new_vlan_id); } /** * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. * @phba: pointer to lpfc hba data structure. * @fcf_index: Index for the lpfc_fcf_record. * @new_fcf_record: pointer to hba fcf record. * * This routine updates the driver FCF priority record from the new HBA FCF * record. The hbalock is asserted held in the code path calling this * routine. **/ static void __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, struct fcf_record *new_fcf_record ) { struct lpfc_fcf_pri *fcf_pri; fcf_pri = &phba->fcf.fcf_pri[fcf_index]; fcf_pri->fcf_rec.fcf_index = fcf_index; /* FCF record priority */ fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; } /** * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. * @fcf_rec: pointer to driver fcf record. * @new_fcf_record: pointer to fcf record. * * This routine copies the FCF information from the FCF * record to lpfc_hba data structure. **/ static void lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec, struct fcf_record *new_fcf_record) { /* Fabric name */ fcf_rec->fabric_name[0] = bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); fcf_rec->fabric_name[1] = bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); fcf_rec->fabric_name[2] = bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); fcf_rec->fabric_name[3] = bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); fcf_rec->fabric_name[4] = bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); fcf_rec->fabric_name[5] = bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); fcf_rec->fabric_name[6] = bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); fcf_rec->fabric_name[7] = bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); /* Mac address */ fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record); fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record); fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record); fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record); fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record); fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record); /* FCF record index */ fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); /* FCF record priority */ fcf_rec->priority = new_fcf_record->fip_priority; /* Switch name */ fcf_rec->switch_name[0] = bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); fcf_rec->switch_name[1] = bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); fcf_rec->switch_name[2] = bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); fcf_rec->switch_name[3] = bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); fcf_rec->switch_name[4] = bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); fcf_rec->switch_name[5] = bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); fcf_rec->switch_name[6] = bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); fcf_rec->switch_name[7] = bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); } /** * __lpfc_update_fcf_record - Update driver fcf record * @phba: pointer to lpfc hba data structure. * @fcf_rec: pointer to driver fcf record. * @new_fcf_record: pointer to hba fcf record. * @addr_mode: address mode to be set to the driver fcf record. * @vlan_id: vlan tag to be set to the driver fcf record. * @flag: flag bits to be set to the driver fcf record. * * This routine updates the driver FCF record from the new HBA FCF record * together with the address mode, vlan_id, and other informations. This * routine is called with the hbalock held. **/ static void __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, struct fcf_record *new_fcf_record, uint32_t addr_mode, uint16_t vlan_id, uint32_t flag) { lockdep_assert_held(&phba->hbalock); /* Copy the fields from the HBA's FCF record */ lpfc_copy_fcf_record(fcf_rec, new_fcf_record); /* Update other fields of driver FCF record */ fcf_rec->addr_mode = addr_mode; fcf_rec->vlan_id = vlan_id; fcf_rec->flag |= (flag | RECORD_VALID); __lpfc_update_fcf_record_pri(phba, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), new_fcf_record); } /** * lpfc_register_fcf - Register the FCF with hba. * @phba: pointer to lpfc hba data structure. * * This routine issues a register fcfi mailbox command to register * the fcf with HBA. **/ static void lpfc_register_fcf(struct lpfc_hba *phba) { LPFC_MBOXQ_t *fcf_mbxq; int rc; spin_lock_irq(&phba->hbalock); /* If the FCF is not available do nothing. */ if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); spin_unlock_irq(&phba->hbalock); return; } /* The FCF is already registered, start discovery */ if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); phba->hba_flag &= ~FCF_TS_INPROG; if (phba->pport->port_state != LPFC_FLOGI && phba->pport->fc_flag & FC_FABRIC) { phba->hba_flag |= FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_initial_flogi(phba->pport); return; } spin_unlock_irq(&phba->hbalock); return; } spin_unlock_irq(&phba->hbalock); fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!fcf_mbxq) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); spin_unlock_irq(&phba->hbalock); return; } lpfc_reg_fcfi(phba, fcf_mbxq); fcf_mbxq->vport = phba->pport; fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); spin_unlock_irq(&phba->hbalock); mempool_free(fcf_mbxq, phba->mbox_mem_pool); } return; } /** * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. * @phba: pointer to lpfc hba data structure. * @new_fcf_record: pointer to fcf record. * @boot_flag: Indicates if this record used by boot bios. * @addr_mode: The address mode to be used by this FCF * @vlan_id: The vlan id to be used as vlan tagging by this FCF. * * This routine compare the fcf record with connect list obtained from the * config region to decide if this FCF can be used for SAN discovery. It returns * 1 if this record can be used for SAN discovery else return zero. If this FCF * record can be used for SAN discovery, the boot_flag will indicate if this FCF * is used by boot bios and addr_mode will indicate the addressing mode to be * used for this FCF when the function returns. * If the FCF record need to be used with a particular vlan id, the vlan is * set in the vlan_id on return of the function. If not VLAN tagging need to * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID; **/ static int lpfc_match_fcf_conn_list(struct lpfc_hba *phba, struct fcf_record *new_fcf_record, uint32_t *boot_flag, uint32_t *addr_mode, uint16_t *vlan_id) { struct lpfc_fcf_conn_entry *conn_entry; int i, j, fcf_vlan_id = 0; /* Find the lowest VLAN id in the FCF record */ for (i = 0; i < 512; i++) { if (new_fcf_record->vlan_bitmap[i]) { fcf_vlan_id = i * 8; j = 0; while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) { j++; fcf_vlan_id++; } break; } } /* FCF not valid/available or solicitation in progress */ if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) || bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) return 0; if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { *boot_flag = 0; *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); if (phba->valid_vlan) *vlan_id = phba->vlan_id; else *vlan_id = LPFC_FCOE_NULL_VID; return 1; } /* * If there are no FCF connection table entry, driver connect to all * FCFs. */ if (list_empty(&phba->fcf_conn_rec_list)) { *boot_flag = 0; *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); /* * When there are no FCF connect entries, use driver's default * addressing mode - FPMA. */ if (*addr_mode & LPFC_FCF_FPMA) *addr_mode = LPFC_FCF_FPMA; /* If FCF record report a vlan id use that vlan id */ if (fcf_vlan_id) *vlan_id = fcf_vlan_id; else *vlan_id = LPFC_FCOE_NULL_VID; return 1; } list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) { if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) continue; if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, new_fcf_record)) continue; if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) && !lpfc_sw_name_match(conn_entry->conn_rec.switch_name, new_fcf_record)) continue; if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { /* * If the vlan bit map does not have the bit set for the * vlan id to be used, then it is not a match. */ if (!(new_fcf_record->vlan_bitmap [conn_entry->conn_rec.vlan_tag / 8] & (1 << (conn_entry->conn_rec.vlan_tag % 8)))) continue; } /* * If connection record does not support any addressing mode, * skip the FCF record. */ if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) continue; /* * Check if the connection record specifies a required * addressing mode. */ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { /* * If SPMA required but FCF not support this continue. */ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && !(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) & LPFC_FCF_SPMA)) continue; /* * If FPMA required but FCF not support this continue. */ if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && !(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) & LPFC_FCF_FPMA)) continue; } /* * This fcf record matches filtering criteria. */ if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) *boot_flag = 1; else *boot_flag = 0; /* * If user did not specify any addressing mode, or if the * preferred addressing mode specified by user is not supported * by FCF, allow fabric to pick the addressing mode. */ *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); /* * If the user specified a required address mode, assign that * address mode */ if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) *addr_mode = (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) ? LPFC_FCF_SPMA : LPFC_FCF_FPMA; /* * If the user specified a preferred address mode, use the * addr mode only if FCF support the addr_mode. */ else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && (*addr_mode & LPFC_FCF_SPMA)) *addr_mode = LPFC_FCF_SPMA; else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && (*addr_mode & LPFC_FCF_FPMA)) *addr_mode = LPFC_FCF_FPMA; /* If matching connect list has a vlan id, use it */ if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) *vlan_id = conn_entry->conn_rec.vlan_tag; /* * If no vlan id is specified in connect list, use the vlan id * in the FCF record */ else if (fcf_vlan_id) *vlan_id = fcf_vlan_id; else *vlan_id = LPFC_FCOE_NULL_VID; return 1; } return 0; } /** * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event. * @phba: pointer to lpfc hba data structure. * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned. * * This function check if there is any fcoe event pending while driver * scan FCF entries. If there is any pending event, it will restart the * FCF saning and return 1 else return 0. */ int lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) { /* * If the Link is up and no FCoE events while in the * FCF discovery, no need to restart FCF discovery. */ if ((phba->link_state >= LPFC_LINK_UP) && (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) return 0; lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2768 Pending link or FCF event during current " "handling of the previous event: link_state:x%x, " "evt_tag_at_scan:x%x, evt_tag_current:x%x\n", phba->link_state, phba->fcoe_eventtag_at_fcf_scan, phba->fcoe_eventtag); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_AVAILABLE; spin_unlock_irq(&phba->hbalock); if (phba->link_state >= LPFC_LINK_UP) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2780 Restart FCF table scan due to " "pending FCF event:evt_tag_at_scan:x%x, " "evt_tag_current:x%x\n", phba->fcoe_eventtag_at_fcf_scan, phba->fcoe_eventtag); lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); } else { /* * Do not continue FCF discovery and clear FCF_TS_INPROG * flag */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2833 Stop FCF discovery process due to link " "state change (x%x)\n", phba->link_state); spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); spin_unlock_irq(&phba->hbalock); } /* Unregister the currently registered FCF if required */ if (unreg_fcf) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_REGISTERED; spin_unlock_irq(&phba->hbalock); lpfc_sli4_unregister_fcf(phba); } return 1; } /** * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record * @phba: pointer to lpfc hba data structure. * @fcf_cnt: number of eligible fcf record seen so far. * * This function makes an running random selection decision on FCF record to * use through a sequence of @fcf_cnt eligible FCF records with equal * probability. To perform integer manunipulation of random numbers with * size unit32_t, a 16-bit random number returned from get_random_u16() is * taken as the random random number generated. * * Returns true when outcome is for the newly read FCF record should be * chosen; otherwise, return false when outcome is for keeping the previously * chosen FCF record. **/ static bool lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) { uint32_t rand_num; /* Get 16-bit uniform random number */ rand_num = get_random_u16(); /* Decision with probability 1/fcf_cnt */ if ((fcf_cnt * rand_num) < 0xFFFF) return true; else return false; } /** * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. * @next_fcf_index: pointer to holder of next fcf index. * * This routine parses the non-embedded fcf mailbox command by performing the * necessarily error checking, non-embedded read FCF record mailbox command * SGE parsing, and endianness swapping. * * Returns the pointer to the new FCF record in the non-embedded mailbox * command DMA memory if successfully, other NULL. */ static struct fcf_record * lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, uint16_t *next_fcf_index) { void *virt_addr; struct lpfc_mbx_sge sge; struct lpfc_mbx_read_fcf_tbl *read_fcf; uint32_t shdr_status, shdr_add_status, if_type; union lpfc_sli4_cfg_shdr *shdr; struct fcf_record *new_fcf_record; /* Get the first SGE entry from the non-embedded DMA memory. This * routine only uses a single SGE. */ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); if (unlikely(!mboxq->sge_array)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2524 Failed to get the non-embedded SGE " "virtual address\n"); return NULL; } virt_addr = mboxq->sge_array->addr[0]; shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; lpfc_sli_pcimem_bcopy(shdr, shdr, sizeof(union lpfc_sli4_cfg_shdr)); shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status) { if (shdr_status == STATUS_FCF_TABLE_EMPTY || if_type == LPFC_SLI_INTF_IF_TYPE_2) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2726 READ_FCF_RECORD Indicates empty " "FCF table.\n"); else lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2521 READ_FCF_RECORD mailbox failed " "with status x%x add_status x%x, " "mbx\n", shdr_status, shdr_add_status); return NULL; } /* Interpreting the returned information of the FCF record */ read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, sizeof(struct lpfc_mbx_read_fcf_tbl)); *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); new_fcf_record = (struct fcf_record *)(virt_addr + sizeof(struct lpfc_mbx_read_fcf_tbl)); lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, offsetof(struct fcf_record, vlan_bitmap)); new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137); new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138); return new_fcf_record; } /** * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record * @phba: pointer to lpfc hba data structure. * @fcf_record: pointer to the fcf record. * @vlan_id: the lowest vlan identifier associated to this fcf record. * @next_fcf_index: the index to the next fcf record in hba's fcf table. * * This routine logs the detailed FCF record if the LOG_FIP loggin is * enabled. **/ static void lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, struct fcf_record *fcf_record, uint16_t vlan_id, uint16_t next_fcf_index) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2764 READ_FCF_RECORD:\n" "\tFCF_Index : x%x\n" "\tFCF_Avail : x%x\n" "\tFCF_Valid : x%x\n" "\tFCF_SOL : x%x\n" "\tFIP_Priority : x%x\n" "\tMAC_Provider : x%x\n" "\tLowest VLANID : x%x\n" "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" "\tNext_FCF_Index: x%x\n", bf_get(lpfc_fcf_record_fcf_index, fcf_record), bf_get(lpfc_fcf_record_fcf_avail, fcf_record), bf_get(lpfc_fcf_record_fcf_valid, fcf_record), bf_get(lpfc_fcf_record_fcf_sol, fcf_record), fcf_record->fip_priority, bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), vlan_id, bf_get(lpfc_fcf_record_mac_0, fcf_record), bf_get(lpfc_fcf_record_mac_1, fcf_record), bf_get(lpfc_fcf_record_mac_2, fcf_record), bf_get(lpfc_fcf_record_mac_3, fcf_record), bf_get(lpfc_fcf_record_mac_4, fcf_record), bf_get(lpfc_fcf_record_mac_5, fcf_record), bf_get(lpfc_fcf_record_fab_name_0, fcf_record), bf_get(lpfc_fcf_record_fab_name_1, fcf_record), bf_get(lpfc_fcf_record_fab_name_2, fcf_record), bf_get(lpfc_fcf_record_fab_name_3, fcf_record), bf_get(lpfc_fcf_record_fab_name_4, fcf_record), bf_get(lpfc_fcf_record_fab_name_5, fcf_record), bf_get(lpfc_fcf_record_fab_name_6, fcf_record), bf_get(lpfc_fcf_record_fab_name_7, fcf_record), bf_get(lpfc_fcf_record_switch_name_0, fcf_record), bf_get(lpfc_fcf_record_switch_name_1, fcf_record), bf_get(lpfc_fcf_record_switch_name_2, fcf_record), bf_get(lpfc_fcf_record_switch_name_3, fcf_record), bf_get(lpfc_fcf_record_switch_name_4, fcf_record), bf_get(lpfc_fcf_record_switch_name_5, fcf_record), bf_get(lpfc_fcf_record_switch_name_6, fcf_record), bf_get(lpfc_fcf_record_switch_name_7, fcf_record), next_fcf_index); } /** * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF * @phba: pointer to lpfc hba data structure. * @fcf_rec: pointer to an existing FCF record. * @new_fcf_record: pointer to a new FCF record. * @new_vlan_id: vlan id from the new FCF record. * * This function performs matching test of a new FCF record against an existing * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id * will not be used as part of the FCF record matching criteria. * * Returns true if all the fields matching, otherwise returns false. */ static bool lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, struct fcf_record *new_fcf_record, uint16_t new_vlan_id) { if (new_vlan_id != LPFC_FCOE_IGNORE_VID) if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id)) return false; if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record)) return false; if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record)) return false; if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record)) return false; if (fcf_rec->priority != new_fcf_record->fip_priority) return false; return true; } /** * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf * @vport: Pointer to vport object. * @fcf_index: index to next fcf. * * This function processing the roundrobin fcf failover to next fcf index. * When this function is invoked, there will be a current fcf registered * for flogi. * Return: 0 for continue retrying flogi on currently registered fcf; * 1 for stop flogi on currently registered fcf; */ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) { struct lpfc_hba *phba = vport->phba; int rc; if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { spin_lock_irq(&phba->hbalock); if (phba->hba_flag & HBA_DEVLOSS_TMO) { spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2872 Devloss tmo with no eligible " "FCF, unregister in-use FCF (x%x) " "and rescan FCF table\n", phba->fcf.current_rec.fcf_indx); lpfc_unregister_fcf_rescan(phba); goto stop_flogi_current_fcf; } /* Mark the end to FLOGI roundrobin failover */ phba->hba_flag &= ~FCF_RR_INPROG; /* Allow action to new fcf asynchronous event */ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2865 No FCF available, stop roundrobin FCF " "failover and change port state:x%x/x%x\n", phba->pport->port_state, LPFC_VPORT_UNKNOWN); phba->pport->port_state = LPFC_VPORT_UNKNOWN; if (!phba->fcf.fcf_redisc_attempted) { lpfc_unregister_fcf(phba); rc = lpfc_sli4_redisc_fcf_table(phba); if (!rc) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "3195 Rediscover FCF table\n"); phba->fcf.fcf_redisc_attempted = 1; lpfc_sli4_clear_fcf_rr_bmask(phba); } else { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "3196 Rediscover FCF table " "failed. Status:x%x\n", rc); } } else { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "3197 Already rediscover FCF table " "attempted. No more retry\n"); } goto stop_flogi_current_fcf; } else { lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, "2794 Try FLOGI roundrobin FCF failover to " "(x%x)\n", fcf_index); rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); if (rc) lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, "2761 FLOGI roundrobin FCF failover " "failed (rc:x%x) to read FCF (x%x)\n", rc, phba->fcf.current_rec.fcf_indx); else goto stop_flogi_current_fcf; } return 0; stop_flogi_current_fcf: lpfc_can_disctmo(vport); return 1; } /** * lpfc_sli4_fcf_pri_list_del * @phba: pointer to lpfc hba data structure. * @fcf_index: the index of the fcf record to delete * This routine checks the on list flag of the fcf_index to be deleted. * If it is one the list then it is removed from the list, and the flag * is cleared. This routine grab the hbalock before removing the fcf * record from the list. **/ static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba, uint16_t fcf_index) { struct lpfc_fcf_pri *new_fcf_pri; new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "3058 deleting idx x%x pri x%x flg x%x\n", fcf_index, new_fcf_pri->fcf_rec.priority, new_fcf_pri->fcf_rec.flag); spin_lock_irq(&phba->hbalock); if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) { if (phba->fcf.current_rec.priority == new_fcf_pri->fcf_rec.priority) phba->fcf.eligible_fcf_cnt--; list_del_init(&new_fcf_pri->list); new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST; } spin_unlock_irq(&phba->hbalock); } /** * lpfc_sli4_set_fcf_flogi_fail * @phba: pointer to lpfc hba data structure. * @fcf_index: the index of the fcf record to update * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED * flag so the round robin selection for the particular priority level * will try a different fcf record that does not have this bit set. * If the fcf record is re-read for any reason this flag is cleared brfore * adding it to the priority list. **/ void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) { struct lpfc_fcf_pri *new_fcf_pri; new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; spin_lock_irq(&phba->hbalock); new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED; spin_unlock_irq(&phba->hbalock); } /** * lpfc_sli4_fcf_pri_list_add * @phba: pointer to lpfc hba data structure. * @fcf_index: the index of the fcf record to add * @new_fcf_record: pointer to a new FCF record. * This routine checks the priority of the fcf_index to be added. * If it is a lower priority than the current head of the fcf_pri list * then it is added to the list in the right order. * If it is the same priority as the current head of the list then it * is added to the head of the list and its bit in the rr_bmask is set. * If the fcf_index to be added is of a higher priority than the current * head of the list then the rr_bmask is cleared, its bit is set in the * rr_bmask and it is added to the head of the list. * returns: * 0=success 1=failure **/ static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index, struct fcf_record *new_fcf_record) { uint16_t current_fcf_pri; uint16_t last_index; struct lpfc_fcf_pri *fcf_pri; struct lpfc_fcf_pri *next_fcf_pri; struct lpfc_fcf_pri *new_fcf_pri; int ret; new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "3059 adding idx x%x pri x%x flg x%x\n", fcf_index, new_fcf_record->fip_priority, new_fcf_pri->fcf_rec.flag); spin_lock_irq(&phba->hbalock); if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) list_del_init(&new_fcf_pri->list); new_fcf_pri->fcf_rec.fcf_index = fcf_index; new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; if (list_empty(&phba->fcf.fcf_pri_list)) { list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); ret = lpfc_sli4_fcf_rr_index_set(phba, new_fcf_pri->fcf_rec.fcf_index); goto out; } last_index = find_first_bit(phba->fcf.fcf_rr_bmask, LPFC_SLI4_FCF_TBL_INDX_MAX); if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { ret = 0; /* Empty rr list */ goto out; } current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority; if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) { list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) { memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); /* fcfs_at_this_priority_level = 1; */ phba->fcf.eligible_fcf_cnt = 1; } else /* fcfs_at_this_priority_level++; */ phba->fcf.eligible_fcf_cnt++; ret = lpfc_sli4_fcf_rr_index_set(phba, new_fcf_pri->fcf_rec.fcf_index); goto out; } list_for_each_entry_safe(fcf_pri, next_fcf_pri, &phba->fcf.fcf_pri_list, list) { if (new_fcf_pri->fcf_rec.priority <= fcf_pri->fcf_rec.priority) { if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list) list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); else list_add(&new_fcf_pri->list, &((struct lpfc_fcf_pri *) fcf_pri->list.prev)->list); ret = 0; goto out; } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list || new_fcf_pri->fcf_rec.priority < next_fcf_pri->fcf_rec.priority) { list_add(&new_fcf_pri->list, &fcf_pri->list); ret = 0; goto out; } if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority) continue; } ret = 1; out: /* we use = instead of |= to clear the FLOGI_FAILED flag. */ new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST; spin_unlock_irq(&phba->hbalock); return ret; } /** * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. * * This function iterates through all the fcf records available in * HBA and chooses the optimal FCF record for discovery. After finding * the FCF for discovery it registers the FCF record and kicks start * discovery. * If FCF_IN_USE flag is set in currently used FCF, the routine tries to * use an FCF record which matches fabric name and mac address of the * currently used FCF record. * If the driver supports only one FCF, it will try to use the FCF record * used by BOOT_BIOS. */ void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct fcf_record *new_fcf_record; uint32_t boot_flag, addr_mode; uint16_t fcf_index, next_fcf_index; struct lpfc_fcf_rec *fcf_rec = NULL; uint16_t vlan_id = LPFC_FCOE_NULL_VID; bool select_new_fcf; int rc; /* If there is pending FCoE event restart FCF table scan */ if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { lpfc_sli4_mbox_cmd_free(phba, mboxq); return; } /* Parse the FCF record from the non-embedded mailbox command */ new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, &next_fcf_index); if (!new_fcf_record) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2765 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record.\n"); /* Let next new FCF event trigger fast failover */ spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_TS_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_sli4_mbox_cmd_free(phba, mboxq); return; } /* Check the FCF record against the connection list */ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, &addr_mode, &vlan_id); /* Log the FCF record information if turned on */ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, next_fcf_index); /* * If the fcf record does not match with connect list entries * read the next entry; otherwise, this is an eligible FCF * record for roundrobin FCF failover. */ if (!rc) { lpfc_sli4_fcf_pri_list_del(phba, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2781 FCF (x%x) failed connection " "list check: (x%x/x%x/%x)\n", bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record), bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record), bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)); if ((phba->fcf.fcf_flag & FCF_IN_USE) && lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, new_fcf_record, LPFC_FCOE_IGNORE_VID)) { if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != phba->fcf.current_rec.fcf_indx) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2862 FCF (x%x) matches property " "of in-use FCF (x%x)\n", bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), phba->fcf.current_rec.fcf_indx); goto read_next_fcf; } /* * In case the current in-use FCF record becomes * invalid/unavailable during FCF discovery that * was not triggered by fast FCF failover process, * treat it as fast FCF failover. */ if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) && !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2835 Invalid in-use FCF " "(x%x), enter FCF failover " "table scan.\n", phba->fcf.current_rec.fcf_indx); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); lpfc_sli4_mbox_cmd_free(phba, mboxq); lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); return; } } goto read_next_fcf; } else { fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); if (rc) goto read_next_fcf; } /* * If this is not the first FCF discovery of the HBA, use last * FCF record for the discovery. The condition that a rescan * matches the in-use FCF record: fabric name, switch name, mac * address, and vlan_id. */ spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_IN_USE) { if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, new_fcf_record, vlan_id)) { if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == phba->fcf.current_rec.fcf_indx) { phba->fcf.fcf_flag |= FCF_AVAILABLE; if (phba->fcf.fcf_flag & FCF_REDISC_PEND) /* Stop FCF redisc wait timer */ __lpfc_sli4_stop_fcf_redisc_wait_timer( phba); else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) /* Fast failover, mark completed */ phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2836 New FCF matches in-use " "FCF (x%x), port_state:x%x, " "fc_flag:x%x\n", phba->fcf.current_rec.fcf_indx, phba->pport->port_state, phba->pport->fc_flag); goto out; } else lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2863 New FCF (x%x) matches " "property of in-use FCF (x%x)\n", bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), phba->fcf.current_rec.fcf_indx); } /* * Read next FCF record from HBA searching for the matching * with in-use record only if not during the fast failover * period. In case of fast failover period, it shall try to * determine whether the FCF record just read should be the * next candidate. */ if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } } /* * Update on failover FCF record only if it's in FCF fast-failover * period; otherwise, update on current FCF record. */ if (phba->fcf.fcf_flag & FCF_REDISC_FOV) fcf_rec = &phba->fcf.failover_rec; else fcf_rec = &phba->fcf.current_rec; if (phba->fcf.fcf_flag & FCF_AVAILABLE) { /* * If the driver FCF record does not have boot flag * set and new hba fcf record has boot flag set, use * the new hba fcf record. */ if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) { /* Choose this FCF record */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2837 Update current FCF record " "(x%x) with new FCF record (x%x)\n", fcf_rec->fcf_indx, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, BOOT_ENABLE); spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* * If the driver FCF record has boot flag set and the * new hba FCF record does not have boot flag, read * the next FCF record. */ if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* * If the new hba FCF record has lower priority value * than the driver FCF record, use the new record. */ if (new_fcf_record->fip_priority < fcf_rec->priority) { /* Choose the new FCF record with lower priority */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2838 Update current FCF record " "(x%x) with new FCF record (x%x)\n", fcf_rec->fcf_indx, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, 0); /* Reset running random FCF selection count */ phba->fcf.eligible_fcf_cnt = 1; } else if (new_fcf_record->fip_priority == fcf_rec->priority) { /* Update running random FCF selection count */ phba->fcf.eligible_fcf_cnt++; select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, phba->fcf.eligible_fcf_cnt); if (select_new_fcf) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2839 Update current FCF record " "(x%x) with new FCF record (x%x)\n", fcf_rec->fcf_indx, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); /* Choose the new FCF by random selection */ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, 0); } } spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* * This is the first suitable FCF record, choose this record for * initial best-fit FCF. */ if (fcf_rec) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2840 Update initial FCF candidate " "with FCF (x%x)\n", bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, (boot_flag ? BOOT_ENABLE : 0)); phba->fcf.fcf_flag |= FCF_AVAILABLE; /* Setup initial running random FCF selection count */ phba->fcf.eligible_fcf_cnt = 1; } spin_unlock_irq(&phba->hbalock); goto read_next_fcf; read_next_fcf: lpfc_sli4_mbox_cmd_free(phba, mboxq); if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) { if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { /* * Case of FCF fast failover scan */ /* * It has not found any suitable FCF record, cancel * FCF scan inprogress, and do nothing */ if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2782 No suitable FCF found: " "(x%x/x%x)\n", phba->fcoe_eventtag_at_fcf_scan, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); spin_lock_irq(&phba->hbalock); if (phba->hba_flag & HBA_DEVLOSS_TMO) { phba->hba_flag &= ~FCF_TS_INPROG; spin_unlock_irq(&phba->hbalock); /* Unregister in-use FCF and rescan */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2864 On devloss tmo " "unreg in-use FCF and " "rescan FCF table\n"); lpfc_unregister_fcf_rescan(phba); return; } /* * Let next new FCF event trigger fast failover */ phba->hba_flag &= ~FCF_TS_INPROG; spin_unlock_irq(&phba->hbalock); return; } /* * It has found a suitable FCF record that is not * the same as in-use FCF record, unregister the * in-use FCF record, replace the in-use FCF record * with the new FCF record, mark FCF fast failover * completed, and then start register the new FCF * record. */ /* Unregister the current in-use FCF record */ lpfc_unregister_fcf(phba); /* Replace in-use record with the new record */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2842 Replace in-use FCF (x%x) " "with failover FCF (x%x)\n", phba->fcf.current_rec.fcf_indx, phba->fcf.failover_rec.fcf_indx); memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, sizeof(struct lpfc_fcf_rec)); /* * Mark the fast FCF failover rediscovery completed * and the start of the first round of the roundrobin * FCF failover. */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); /* Register to the new FCF record */ lpfc_register_fcf(phba); } else { /* * In case of transaction period to fast FCF failover, * do nothing when search to the end of the FCF table. */ if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || (phba->fcf.fcf_flag & FCF_REDISC_PEND)) return; if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && phba->fcf.fcf_flag & FCF_IN_USE) { /* * In case the current in-use FCF record no * longer existed during FCF discovery that * was not triggered by fast FCF failover * process, treat it as fast FCF failover. */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2841 In-use FCF record (x%x) " "not reported, entering fast " "FCF failover mode scanning.\n", phba->fcf.current_rec.fcf_indx); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); return; } /* Register to the new FCF record */ lpfc_register_fcf(phba); } } else lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); return; out: lpfc_sli4_mbox_cmd_free(phba, mboxq); lpfc_register_fcf(phba); return; } /** * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. * * This is the callback function for FLOGI failure roundrobin FCF failover * read FCF record mailbox command from the eligible FCF record bmask for * performing the failover. If the FCF read back is not valid/available, it * fails through to retrying FLOGI to the currently registered FCF again. * Otherwise, if the FCF read back is valid and available, it will set the * newly read FCF record to the failover FCF record, unregister currently * registered FCF record, copy the failover FCF record to the current * FCF record, and then register the current FCF record before proceeding * to trying FLOGI on the new failover FCF. */ void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct fcf_record *new_fcf_record; uint32_t boot_flag, addr_mode; uint16_t next_fcf_index, fcf_index; uint16_t current_fcf_index; uint16_t vlan_id = LPFC_FCOE_NULL_VID; int rc; /* If link state is not up, stop the roundrobin failover process */ if (phba->link_state < LPFC_LINK_UP) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; phba->hba_flag &= ~FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); goto out; } /* Parse the FCF record from the non-embedded mailbox command */ new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, &next_fcf_index); if (!new_fcf_record) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2766 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record. " "hba_flg x%x fcf_flg x%x\n", phba->hba_flag, phba->fcf.fcf_flag); lpfc_unregister_fcf_rescan(phba); goto out; } /* Get the needed parameters from FCF record */ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, &addr_mode, &vlan_id); /* Log the FCF record information if turned on */ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, next_fcf_index); fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); if (!rc) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2848 Remove ineligible FCF (x%x) from " "from roundrobin bmask\n", fcf_index); /* Clear roundrobin bmask bit for ineligible FCF */ lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); /* Perform next round of roundrobin FCF failover */ fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); if (rc) goto out; goto error_out; } if (fcf_index == phba->fcf.current_rec.fcf_indx) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2760 Perform FLOGI roundrobin FCF failover: " "FCF (x%x) back to FCF (x%x)\n", phba->fcf.current_rec.fcf_indx, fcf_index); /* Wait 500 ms before retrying FLOGI to current FCF */ msleep(500); lpfc_issue_init_vfi(phba->pport); goto out; } /* Upload new FCF record to the failover FCF record */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2834 Update current FCF (x%x) with new FCF (x%x)\n", phba->fcf.failover_rec.fcf_indx, fcf_index); spin_lock_irq(&phba->hbalock); __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, new_fcf_record, addr_mode, vlan_id, (boot_flag ? BOOT_ENABLE : 0)); spin_unlock_irq(&phba->hbalock); current_fcf_index = phba->fcf.current_rec.fcf_indx; /* Unregister the current in-use FCF record */ lpfc_unregister_fcf(phba); /* Replace in-use record with the new record */ memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, sizeof(struct lpfc_fcf_rec)); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2783 Perform FLOGI roundrobin FCF failover: FCF " "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); error_out: lpfc_register_fcf(phba); out: lpfc_sli4_mbox_cmd_free(phba, mboxq); } /** * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. * * This is the callback function of read FCF record mailbox command for * updating the eligible FCF bmask for FLOGI failure roundrobin FCF * failover when a new FCF event happened. If the FCF read back is * valid/available and it passes the connection list check, it updates * the bmask for the eligible FCF record for roundrobin failover. */ void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct fcf_record *new_fcf_record; uint32_t boot_flag, addr_mode; uint16_t fcf_index, next_fcf_index; uint16_t vlan_id = LPFC_FCOE_NULL_VID; int rc; /* If link state is not up, no need to proceed */ if (phba->link_state < LPFC_LINK_UP) goto out; /* If FCF discovery period is over, no need to proceed */ if (!(phba->fcf.fcf_flag & FCF_DISCOVERY)) goto out; /* Parse the FCF record from the non-embedded mailbox command */ new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, &next_fcf_index); if (!new_fcf_record) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2767 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record.\n"); goto out; } /* Check the connection list for eligibility */ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, &addr_mode, &vlan_id); /* Log the FCF record information if turned on */ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, next_fcf_index); if (!rc) goto out; /* Update the eligible FCF record index bmask */ fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); out: lpfc_sli4_mbox_cmd_free(phba, mboxq); } /** * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox data structure. * * This function handles completion of init vfi mailbox command. */ static void lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; /* * VFI not supported on interface type 0, just do the flogi * Also continue if the VFI is in use - just use the same one. */ if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0) && mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2891 Init VFI mailbox failed 0x%x\n", mboxq->u.mb.mbxStatus); mempool_free(mboxq, phba->mbox_mem_pool); lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; } lpfc_initial_flogi(vport); mempool_free(mboxq, phba->mbox_mem_pool); return; } /** * lpfc_issue_init_vfi - Issue init_vfi mailbox command. * @vport: pointer to lpfc_vport data structure. * * This function issue a init_vfi mailbox command to initialize the VFI and * VPI for the physical port. */ void lpfc_issue_init_vfi(struct lpfc_vport *vport) { LPFC_MBOXQ_t *mboxq; int rc; struct lpfc_hba *phba = vport->phba; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2892 Failed to allocate " "init_vfi mailbox\n"); return; } lpfc_init_vfi(mboxq, vport); mboxq->mbox_cmpl = lpfc_init_vfi_cmpl; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2893 Failed to issue init_vfi mailbox\n"); mempool_free(mboxq, vport->phba->mbox_mem_pool); } } /** * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox data structure. * * This function handles completion of init vpi mailbox command. */ void lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (mboxq->u.mb.mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2609 Init VPI mailbox failed 0x%x\n", mboxq->u.mb.mbxStatus); mempool_free(mboxq, phba->mbox_mem_pool); lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; } spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); /* If this port is physical port or FDISC is done, do reg_vpi */ if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) { ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2731 Cannot find fabric " "controller node\n"); else lpfc_register_new_vport(phba, vport, ndlp); mempool_free(mboxq, phba->mbox_mem_pool); return; } if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) lpfc_initial_fdisc(vport); else { lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2606 No NPIV Fabric support\n"); } mempool_free(mboxq, phba->mbox_mem_pool); return; } /** * lpfc_issue_init_vpi - Issue init_vpi mailbox command. * @vport: pointer to lpfc_vport data structure. * * This function issue a init_vpi mailbox command to initialize * VPI for the vport. */ void lpfc_issue_init_vpi(struct lpfc_vport *vport) { LPFC_MBOXQ_t *mboxq; int rc, vpi; if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { vpi = lpfc_alloc_vpi(vport->phba); if (!vpi) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "3303 Failed to obtain vport vpi\n"); lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; } vport->vpi = vpi; } mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2607 Failed to allocate " "init_vpi mailbox\n"); return; } lpfc_init_vpi(vport->phba, mboxq, vport->vpi); mboxq->vport = vport; mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2608 Failed to issue init_vpi mailbox\n"); mempool_free(mboxq, vport->phba->mbox_mem_pool); } } /** * lpfc_start_fdiscs - send fdiscs for each vports on this port. * @phba: pointer to lpfc hba data structure. * * This function loops through the list of vports on the @phba and issues an * FDISC if possible. */ void lpfc_start_fdiscs(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { if (vports[i]->port_type == LPFC_PHYSICAL_PORT) continue; /* There are no vpi for this vport */ if (vports[i]->vpi > phba->max_vpi) { lpfc_vport_set_state(vports[i], FC_VPORT_FAILED); continue; } if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { lpfc_vport_set_state(vports[i], FC_VPORT_LINKDOWN); continue; } if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { lpfc_issue_init_vpi(vports[i]); continue; } if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) lpfc_initial_fdisc(vports[i]); else { lpfc_vport_set_state(vports[i], FC_VPORT_NO_FABRIC_SUPP); lpfc_printf_vlog(vports[i], KERN_ERR, LOG_TRACE_EVENT, "0259 No NPIV " "Fabric support\n"); } } } lpfc_destroy_vport_work_array(phba, vports); } void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); /* * VFI not supported for interface type 0, so ignore any mailbox * error (except VFI in use) and continue with the discovery. */ if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0) && mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2018 REG_VFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { /* FLOGI failed, use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); goto out_free_mem; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); goto out_free_mem; } /* If the VFI is already registered, there is nothing else to do * Unless this was a VFI update and we are in PT2PT mode, then * we should drop through to set the port state to ready. */ if (vport->fc_flag & FC_VFI_REGISTERED) if (!(phba->sli_rev == LPFC_SLI_REV4 && vport->fc_flag & FC_PT2PT)) goto out_free_mem; /* The VPI is implicitly registered when the VFI is registered */ spin_lock_irq(shost->host_lock); vport->vpi_state |= LPFC_VPI_REGISTERED; vport->fc_flag |= FC_VFI_REGISTERED; vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); /* In case SLI4 FC loopback test, we are ready */ if ((phba->sli_rev == LPFC_SLI_REV4) && (phba->link_flag & LS_LOOPBACK_MODE)) { phba->link_state = LPFC_HBA_READY; goto out_free_mem; } lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x " "alpacnt:%d LinkState:%x topology:%x\n", vport->port_state, vport->fc_flag, vport->fc_myDID, vport->phba->alpa_map[0], phba->link_state, phba->fc_topology); if (vport->port_state == LPFC_FABRIC_CFG_LINK) { /* * For private loop or for NPort pt2pt, * just start discovery and we are done. */ if ((vport->fc_flag & FC_PT2PT) || ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && !(vport->fc_flag & FC_PUBLIC_LOOP))) { /* Use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ if (vport->fc_flag & FC_PT2PT) vport->port_state = LPFC_VPORT_READY; else lpfc_disc_start(vport); } else { lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } } out_free_mem: lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); } static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf; struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct serv_parm *sp = &vport->fc_sparam; uint32_t ed_tov; /* Check for error */ if (mb->mbxStatus) { /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0319 READ_SPARAM mbxStatus error x%x " "hba state x%x>\n", mb->mbxStatus, vport->port_state); lpfc_linkdown(phba); goto out; } memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, sizeof (struct serv_parm)); ed_tov = be32_to_cpu(sp->cmn.e_d_tov); if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ ed_tov = (ed_tov + 999999) / 1000000; phba->fc_edtov = ed_tov; phba->fc_ratov = (2 * ed_tov) / 1000; if (phba->fc_ratov < FF_DEF_RATOV) { /* RA_TOV should be atleast 10sec for initial flogi */ phba->fc_ratov = FF_DEF_RATOV; } lpfc_update_vport_wwn(vport); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); if (vport->port_type == LPFC_PHYSICAL_PORT) { memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); } lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); /* Check if sending the FLOGI is being deferred to after we get * up to date CSPs from MBX_READ_SPARAM. */ if (phba->hba_flag & HBA_DEFER_FLOGI) { lpfc_initial_flogi(vport); phba->hba_flag &= ~HBA_DEFER_FLOGI; } return; out: lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); lpfc_issue_clear_la(phba, vport); } static void lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) { struct lpfc_vport *vport = phba->pport; LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; struct Scsi_Host *shost; int i; int rc; struct fcf_record *fcf_record; uint32_t fc_flags = 0; unsigned long iflags; spin_lock_irqsave(&phba->hbalock, iflags); phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); if (!(phba->hba_flag & HBA_FCOE_MODE)) { switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { case LPFC_LINK_SPEED_1GHZ: case LPFC_LINK_SPEED_2GHZ: case LPFC_LINK_SPEED_4GHZ: case LPFC_LINK_SPEED_8GHZ: case LPFC_LINK_SPEED_10GHZ: case LPFC_LINK_SPEED_16GHZ: case LPFC_LINK_SPEED_32GHZ: case LPFC_LINK_SPEED_64GHZ: case LPFC_LINK_SPEED_128GHZ: case LPFC_LINK_SPEED_256GHZ: break; default: phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; break; } } if (phba->fc_topology && phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3314 Toplogy changed was 0x%x is 0x%x\n", phba->fc_topology, bf_get(lpfc_mbx_read_top_topology, la)); phba->fc_topology_changed = 1; } phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA); shost = lpfc_shost_from_vport(vport); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; /* if npiv is enabled and this adapter supports npiv log * a message that npiv is not supported in this topology */ if (phba->cfg_enable_npiv && phba->max_vpi) lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1309 Link Up Event npiv not supported in loop " "topology\n"); /* Get Loop Map information */ if (bf_get(lpfc_mbx_read_top_il, la)) fc_flags |= FC_LBIT; vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); i = la->lilpBde64.tus.f.bdeSize; if (i == 0) { phba->alpa_map[0] = 0; } else { if (vport->cfg_log_verbose & LOG_LINK_EVENT) { int numalpa, j, k; union { uint8_t pamap[16]; struct { uint32_t wd1; uint32_t wd2; uint32_t wd3; uint32_t wd4; } pa; } un; numalpa = phba->alpa_map[0]; j = 0; while (j < numalpa) { memset(un.pamap, 0, 16); for (k = 1; j < numalpa; k++) { un.pamap[k - 1] = phba->alpa_map[j + 1]; j++; if (k == 16) break; } /* Link Up Event ALPA map */ lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, "1304 Link Up Event " "ALPA map Data: x%x " "x%x x%x x%x\n", un.pa.wd1, un.pa.wd2, un.pa.wd3, un.pa.wd4); } } } } else { if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { if (phba->max_vpi && phba->cfg_enable_npiv && (phba->sli_rev >= LPFC_SLI_REV3)) phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; } vport->fc_myDID = phba->fc_pref_DID; fc_flags |= FC_LBIT; } spin_unlock_irqrestore(&phba->hbalock, iflags); if (fc_flags) { spin_lock_irqsave(shost->host_lock, iflags); vport->fc_flag |= fc_flags; spin_unlock_irqrestore(shost->host_lock, iflags); } lpfc_linkup(phba); sparam_mbox = NULL; sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!sparam_mbox) goto out; rc = lpfc_read_sparam(phba, sparam_mbox, 0); if (rc) { mempool_free(sparam_mbox, phba->mbox_mem_pool); goto out; } sparam_mbox->vport = vport; sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_mbox_rsrc_cleanup(phba, sparam_mbox, MBOX_THD_UNLOCKED); goto out; } if (!(phba->hba_flag & HBA_FCOE_MODE)) { cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!cfglink_mbox) goto out; vport->port_state = LPFC_LOCAL_CFG_LINK; lpfc_config_link(phba, cfglink_mbox); cfglink_mbox->vport = vport; cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(cfglink_mbox, phba->mbox_mem_pool); goto out; } } else { vport->port_state = LPFC_VPORT_UNKNOWN; /* * Add the driver's default FCF record at FCF index 0 now. This * is phase 1 implementation that support FCF index 0 and driver * defaults. */ if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { fcf_record = kzalloc(sizeof(struct fcf_record), GFP_KERNEL); if (unlikely(!fcf_record)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2554 Could not allocate memory for " "fcf record\n"); rc = -ENODEV; goto out; } lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, LPFC_FCOE_FCF_DEF_INDEX); rc = lpfc_sli4_add_fcf_record(phba, fcf_record); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2013 Could not manually add FCF " "record 0, status %d\n", rc); rc = -ENODEV; kfree(fcf_record); goto out; } kfree(fcf_record); } /* * The driver is expected to do FIP/FCF. Call the port * and get the FCF Table. */ spin_lock_irqsave(&phba->hbalock, iflags); if (phba->hba_flag & FCF_TS_INPROG) { spin_unlock_irqrestore(&phba->hbalock, iflags); return; } /* This is the initial FCF discovery scan */ phba->fcf.fcf_flag |= FCF_INIT_DISC; spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2778 Start FCF table scan at linkup\n"); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) { spin_lock_irqsave(&phba->hbalock, iflags); phba->fcf.fcf_flag &= ~FCF_INIT_DISC; spin_unlock_irqrestore(&phba->hbalock, iflags); goto out; } /* Reset FCF roundrobin bmask for new discovery */ lpfc_sli4_clear_fcf_rr_bmask(phba); } /* Prepare for LINK up registrations */ memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", init_utsname()->nodename); return; out: lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n", vport->port_state, sparam_mbox, cfglink_mbox); lpfc_issue_clear_la(phba, vport); return; } static void lpfc_enable_la(struct lpfc_hba *phba) { uint32_t control; struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; if (phba->sli_rev <= LPFC_SLI_REV3) { control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } spin_unlock_irq(&phba->hbalock); } static void lpfc_mbx_issue_link_down(struct lpfc_hba *phba) { lpfc_linkdown(phba); lpfc_enable_la(phba); lpfc_unregister_unused_fcf(phba); /* turn on Link Attention interrupts - no CLEAR_LA needed */ } /* * This routine handles processing a READ_TOPOLOGY mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. SLI4 only. */ void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_mbx_read_top *la; struct lpfc_sli_ring *pring; MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); uint8_t attn_type; unsigned long iflags; /* Unblock ELS traffic */ pring = lpfc_phba_elsring(phba); if (pring) pring->flag &= ~LPFC_STOP_IOCB_EVENT; /* Check for error */ if (mb->mbxStatus) { lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "1307 READ_LA mbox error x%x state x%x\n", mb->mbxStatus, vport->port_state); lpfc_mbx_issue_link_down(phba); phba->link_state = LPFC_HBA_ERROR; goto lpfc_mbx_cmpl_read_topology_free_mbuf; } la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; attn_type = bf_get(lpfc_mbx_read_top_att_type, la); memcpy(&phba->alpa_map[0], mp->virt, 128); spin_lock_irqsave(shost->host_lock, iflags); if (bf_get(lpfc_mbx_read_top_pb, la)) vport->fc_flag |= FC_BYPASSED_MODE; else vport->fc_flag &= ~FC_BYPASSED_MODE; spin_unlock_irqrestore(shost->host_lock, iflags); if (phba->fc_eventTag <= la->eventTag) { phba->fc_stat.LinkMultiEvent++; if (attn_type == LPFC_ATT_LINK_UP) if (phba->fc_eventTag != 0) lpfc_linkdown(phba); } phba->fc_eventTag = la->eventTag; phba->link_events++; if (attn_type == LPFC_ATT_LINK_UP) { phba->fc_stat.LinkUp++; if (phba->link_flag & LS_LOOPBACK_MODE) { lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1306 Link Up Event in loop back mode " "x%x received Data: x%x x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, bf_get(lpfc_mbx_read_top_alpa_granted, la), bf_get(lpfc_mbx_read_top_link_spd, la), phba->alpa_map[0]); } else { lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1303 Link Up Event x%x received " "Data: x%x x%x x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, bf_get(lpfc_mbx_read_top_alpa_granted, la), bf_get(lpfc_mbx_read_top_link_spd, la), phba->alpa_map[0], bf_get(lpfc_mbx_read_top_fa, la)); } lpfc_mbx_process_link_up(phba, la); if (phba->cmf_active_mode != LPFC_CFG_OFF) lpfc_cmf_signal_init(phba); if (phba->lmt & LMT_64Gb) lpfc_read_lds_params(phba); } else if (attn_type == LPFC_ATT_LINK_DOWN || attn_type == LPFC_ATT_UNEXP_WWPN) { phba->fc_stat.LinkDown++; if (phba->link_flag & LS_LOOPBACK_MODE) lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1308 Link Down Event in loop back mode " "x%x received " "Data: x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag); else if (attn_type == LPFC_ATT_UNEXP_WWPN) lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1313 Link Down Unexpected FA WWPN Event x%x " "received Data: x%x x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag, bf_get(lpfc_mbx_read_top_fa, la)); else lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1305 Link Down Event x%x received " "Data: x%x x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag, bf_get(lpfc_mbx_read_top_fa, la)); lpfc_mbx_issue_link_down(phba); } if ((phba->sli_rev < LPFC_SLI_REV4) && bf_get(lpfc_mbx_read_top_fa, la)) lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "1311 fa %d\n", bf_get(lpfc_mbx_read_top_fa, la)); lpfc_mbx_cmpl_read_topology_free_mbuf: lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); } /* * This routine handles processing a REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf; struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; /* The driver calls the state machine with the pmb pointer * but wants to make sure a stale ctx_buf isn't acted on. * The ctx_buf is restored later and cleaned up. */ pmb->ctx_buf = NULL; pmb->ctx_ndlp = NULL; lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY, "0002 rpi:%x DID:%x flg:%x %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { /* We rcvd a rscn after issuing this * mbox reg login, we may have cycled * back through the state and be * back at reg login state so this * mbox needs to be ignored becase * there is another reg login in * process. */ spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; spin_unlock_irq(&ndlp->lock); /* * We cannot leave the RPI registered because * if we go thru discovery again for this ndlp * a subsequent REG_RPI will fail. */ ndlp->nlp_flag |= NLP_RPI_REGISTERED; lpfc_unreg_rpi(vport, ndlp); } /* Call state machine */ lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); pmb->ctx_buf = mp; lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); /* decrement the node reference count held for this callback * function. */ lpfc_nlp_put(ndlp); return; } static void lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); switch (mb->mbxStatus) { case 0x0011: case 0x0020: lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0911 cmpl_unreg_vpi, mb status = 0x%x\n", mb->mbxStatus); break; /* If VPI is busy, reset the HBA */ case 0x9700: lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n", vport->vpi, mb->mbxStatus); if (!(phba->pport->load_flag & FC_UNLOADING)) lpfc_workq_post_event(phba, NULL, NULL, LPFC_EVT_RESET_HBA); } spin_lock_irq(shost->host_lock); vport->vpi_state &= ~LPFC_VPI_REGISTERED; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); mempool_free(pmb, phba->mbox_mem_pool); lpfc_cleanup_vports_rrqs(vport, NULL); /* * This shost reference might have been taken at the beginning of * lpfc_vport_delete() */ if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport)) scsi_host_put(shost); } int lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return 1; lpfc_unreg_vpi(phba, vport->vpi, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1800 Could not issue unreg_vpi\n"); mempool_free(mbox, phba->mbox_mem_pool); return rc; } return 0; } static void lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); MAILBOX_t *mb = &pmb->u.mb; switch (mb->mbxStatus) { case 0x0011: case 0x9601: case 0x9602: lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0912 cmpl_reg_vpi, mb status = 0x%x\n", mb->mbxStatus); lpfc_vport_set_state(vport, FC_VPORT_FAILED); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); spin_unlock_irq(shost->host_lock); vport->fc_myDID = 0; if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { if (phba->nvmet_support) lpfc_nvmet_update_targetport(phba); else lpfc_nvme_update_localport(vport); } goto out; } spin_lock_irq(shost->host_lock); vport->vpi_state |= LPFC_VPI_REGISTERED; vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); vport->num_disc_nodes = 0; /* go thru NPR list and issue ELS PLOGIs */ if (vport->fc_npr_cnt) lpfc_els_disc_plogi(vport); if (!vport->num_disc_nodes) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); } vport->port_state = LPFC_VPORT_READY; out: mempool_free(pmb, phba->mbox_mem_pool); return; } /** * lpfc_create_static_vport - Read HBA config region to create static vports. * @phba: pointer to lpfc hba data structure. * * This routine issue a DUMP mailbox command for config region 22 to get * the list of static vports to be created. The function create vports * based on the information returned from the HBA. **/ void lpfc_create_static_vport(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmb = NULL; MAILBOX_t *mb; struct static_vport_info *vport_info; int mbx_wait_rc = 0, i; struct fc_vport_identifiers vport_id; struct fc_vport *new_fc_vport; struct Scsi_Host *shost; struct lpfc_vport *vport; uint16_t offset = 0; uint8_t *vport_buff; struct lpfc_dmabuf *mp; uint32_t byte_count = 0; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0542 lpfc_create_static_vport failed to" " allocate mailbox memory\n"); return; } memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb = &pmb->u.mb; vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); if (!vport_info) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0543 lpfc_create_static_vport failed to" " allocate vport_info\n"); mempool_free(pmb, phba->mbox_mem_pool); return; } vport_buff = (uint8_t *) vport_info; do { /* While loop iteration forces a free dma buffer from * the previous loop because the mbox is reused and * the dump routine is a single-use construct. */ if (pmb->ctx_buf) { mp = (struct lpfc_dmabuf *)pmb->ctx_buf; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); pmb->ctx_buf = NULL; } if (lpfc_dump_static_vport(phba, pmb, offset)) goto out; pmb->vport = phba->pport; mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0544 lpfc_create_static_vport failed to" " issue dump mailbox command ret 0x%x " "status 0x%x\n", mbx_wait_rc, mb->mbxStatus); goto out; } if (phba->sli_rev == LPFC_SLI_REV4) { byte_count = pmb->u.mqe.un.mb_words[5]; mp = (struct lpfc_dmabuf *)pmb->ctx_buf; if (byte_count > sizeof(struct static_vport_info) - offset) byte_count = sizeof(struct static_vport_info) - offset; memcpy(vport_buff + offset, mp->virt, byte_count); offset += byte_count; } else { if (mb->un.varDmp.word_cnt > sizeof(struct static_vport_info) - offset) mb->un.varDmp.word_cnt = sizeof(struct static_vport_info) - offset; byte_count = mb->un.varDmp.word_cnt; lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, vport_buff + offset, byte_count); offset += byte_count; } } while (byte_count && offset < sizeof(struct static_vport_info)); if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) != VPORT_INFO_REV)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0545 lpfc_create_static_vport bad" " information header 0x%x 0x%x\n", le32_to_cpu(vport_info->signature), le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK); goto out; } shost = lpfc_shost_from_vport(phba->pport); for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { memset(&vport_id, 0, sizeof(vport_id)); vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn); vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn); if (!vport_id.port_name || !vport_id.node_name) continue; vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; vport_id.vport_type = FC_PORTTYPE_NPIV; vport_id.disable = false; new_fc_vport = fc_vport_create(shost, 0, &vport_id); if (!new_fc_vport) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0546 lpfc_create_static_vport failed to" " create vport\n"); continue; } vport = *(struct lpfc_vport **)new_fc_vport->dd_data; vport->vport_flag |= STATIC_VPORT; } out: kfree(vport_info); if (mbx_wait_rc != MBX_TIMEOUT) lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); } /* * This routine handles processing a Fabric REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; MAILBOX_t *mb = &pmb->u.mb; struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; struct Scsi_Host *shost; pmb->ctx_ndlp = NULL; if (mb->mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0258 Register Fabric login error: 0x%x\n", mb->mbxStatus); lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { /* FLOGI failed, use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); /* Decrement the reference count to ndlp after the * reference to the ndlp are done. */ lpfc_nlp_put(ndlp); return; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); /* Decrement the reference count to ndlp after the reference * to the ndlp are done. */ lpfc_nlp_put(ndlp); return; } if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); if (vport->port_state == LPFC_FABRIC_CFG_LINK) { /* when physical port receive logo donot start * vport discovery */ if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) lpfc_start_fdiscs(phba); else { shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ; spin_unlock_irq(shost->host_lock); } lpfc_do_scr_ns_plogi(phba, vport); } lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); /* Drop the reference count from the mbox at the end after * all the current reference to the ndlp have been done. */ lpfc_nlp_put(ndlp); return; } /* * This routine will issue a GID_FT for each FC4 Type supported * by the driver. ALL GID_FTs must complete before discovery is started. */ int lpfc_issue_gidft(struct lpfc_vport *vport) { /* Good status, issue CT Request to NameServer */ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) { if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) { /* Cannot issue NameServer FCP Query, so finish up * discovery */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0604 %s FC TYPE %x %s\n", "Failed to issue GID_FT to ", FC_TYPE_FCP, "Finishing discovery."); return 0; } vport->gidft_inp++; } if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) { /* Cannot issue NameServer NVME Query, so finish up * discovery */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0605 %s FC_TYPE %x %s %d\n", "Failed to issue GID_FT to ", FC_TYPE_NVME, "Finishing discovery: gidftinp ", vport->gidft_inp); if (vport->gidft_inp == 0) return 0; } else vport->gidft_inp++; } return vport->gidft_inp; } /** * lpfc_issue_gidpt - issue a GID_PT for all N_Ports * @vport: The virtual port for which this call is being executed. * * This routine will issue a GID_PT to get a list of all N_Ports * * Return value : * 0 - Failure to issue a GID_PT * 1 - GID_PT issued **/ int lpfc_issue_gidpt(struct lpfc_vport *vport) { /* Good status, issue CT Request to NameServer */ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) { /* Cannot issue NameServer FCP Query, so finish up * discovery */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0606 %s Port TYPE %x %s\n", "Failed to issue GID_PT to ", GID_PT_N_PORT, "Finishing discovery."); return 0; } vport->gidft_inp++; return 1; } /* * This routine handles processing a NameServer REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; struct lpfc_vport *vport = pmb->vport; int rc; pmb->ctx_ndlp = NULL; vport->gidft_inp = 0; if (mb->mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0260 Register NameServer error: 0x%x\n", mb->mbxStatus); out: /* decrement the node reference count held for this * callback function. */ lpfc_nlp_put(ndlp); lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); /* If the node is not registered with the scsi or nvme * transport, remove the fabric node. The failed reg_login * is terminal and forces the removal of the last node * reference. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); lpfc_nlp_put(ndlp); } if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { /* * RegLogin failed, use loop map to make discovery * list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); return; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; } if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0003 rpi:%x DID:%x flg:%x %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); if (vport->port_state < LPFC_VPORT_READY) { /* Link up discovery requires Fabric registration. */ lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP); if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_NVME); /* Issue SCR just before NameServer GID_FT Query */ lpfc_issue_els_scr(vport, 0); /* Link was bounced or a Fabric LOGO occurred. Start EDC * with initial FW values provided the congestion mode is * not off. Note that signals may or may not be supported * by the adapter but FPIN is provided by default for 1 * or both missing signals support. */ if (phba->cmf_active_mode != LPFC_CFG_OFF) { phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; phba->cgn_reg_signal = phba->cgn_init_reg_signal; rc = lpfc_issue_els_edc(vport, 0); lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_ELS | LOG_DISCOVERY, "4220 Issue EDC status x%x Data x%x\n", rc, phba->cgn_init_reg_signal); } else if (phba->lmt & LMT_64Gb) { /* may send link fault capability descriptor */ lpfc_issue_els_edc(vport, 0); } else { lpfc_issue_els_rdf(vport, 0); } } vport->fc_ns_retry = 0; if (lpfc_issue_gidft(vport) == 0) goto out; /* * At this point in time we may need to wait for multiple * SLI_CTNS_GID_FT CT commands to complete before we start discovery. * * decrement the node reference count held for this * callback function. */ lpfc_nlp_put(ndlp); lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); return; } /* * This routine handles processing a Fabric Controller REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is handed off to the SLI layer. */ void lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; MAILBOX_t *mb = &pmb->u.mb; struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; pmb->ctx_ndlp = NULL; if (mb->mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0933 %s: Register FC login error: 0x%x\n", __func__, mb->mbxStatus); goto out; } lpfc_check_nlp_post_devloss(vport, ndlp); if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n", __func__, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_state); ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); out: lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); /* Drop the reference count from the mbox at the end after * all the current reference to the ndlp have been done. */ lpfc_nlp_put(ndlp); } static void lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct fc_rport *rport; struct lpfc_rport_data *rdata; struct fc_rport_identifiers rport_ids; struct lpfc_hba *phba = vport->phba; unsigned long flags; if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) return; /* Remote port has reappeared. Re-register w/ FC transport */ rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); rport_ids.port_id = ndlp->nlp_DID; rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport add: did:x%x flg:x%x type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); /* Don't add the remote port if unloading. */ if (vport->load_flag & FC_UNLOADING) return; ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); if (!rport) { dev_printk(KERN_WARNING, &phba->pcidev->dev, "Warning: fc_remote_port_add failed\n"); return; } /* Successful port add. Complete initializing node data */ rport->maxframe_size = ndlp->nlp_maxframe; rport->supported_classes = ndlp->nlp_class_sup; rdata = rport->dd_data; rdata->pnode = lpfc_nlp_get(ndlp); if (!rdata->pnode) { dev_warn(&phba->pcidev->dev, "Warning - node ref failed. Unreg rport\n"); fc_remote_port_delete(rport); ndlp->rport = NULL; return; } spin_lock_irqsave(&ndlp->lock, flags); ndlp->fc4_xpt_flags |= SCSI_XPT_REGD; spin_unlock_irqrestore(&ndlp->lock, flags); if (ndlp->nlp_type & NLP_FCP_TARGET) rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; if (ndlp->nlp_type & NLP_FCP_INITIATOR) rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; if (ndlp->nlp_type & NLP_NVME_INITIATOR) rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; if (ndlp->nlp_type & NLP_NVME_TARGET) rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; if (ndlp->nlp_type & NLP_NVME_DISCOVERY) rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) fc_remote_port_rolechg(rport, rport_ids.roles); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "3183 %s rport x%px DID x%x, role x%x refcnt %d\n", __func__, rport, rport->port_id, rport->roles, kref_read(&ndlp->kref)); if ((rport->scsi_target_id != -1) && (rport->scsi_target_id < LPFC_MAX_TARGET)) { ndlp->nlp_sid = rport->scsi_target_id; } return; } static void lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) { struct fc_rport *rport = ndlp->rport; struct lpfc_vport *vport = ndlp->vport; if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) return; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport delete: did:x%x flg:x%x type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "3184 rport unregister x%06x, rport x%px " "xptflg x%x refcnt %d\n", ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); fc_remote_port_delete(rport); lpfc_nlp_put(ndlp); } static void lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); unsigned long iflags; spin_lock_irqsave(shost->host_lock, iflags); switch (state) { case NLP_STE_UNUSED_NODE: vport->fc_unused_cnt += count; break; case NLP_STE_PLOGI_ISSUE: vport->fc_plogi_cnt += count; break; case NLP_STE_ADISC_ISSUE: vport->fc_adisc_cnt += count; break; case NLP_STE_REG_LOGIN_ISSUE: vport->fc_reglogin_cnt += count; break; case NLP_STE_PRLI_ISSUE: vport->fc_prli_cnt += count; break; case NLP_STE_UNMAPPED_NODE: vport->fc_unmap_cnt += count; break; case NLP_STE_MAPPED_NODE: vport->fc_map_cnt += count; break; case NLP_STE_NPR_NODE: if (vport->fc_npr_cnt == 0 && count == -1) vport->fc_npr_cnt = 0; else vport->fc_npr_cnt += count; break; } spin_unlock_irqrestore(shost->host_lock, iflags); } /* Register a node with backend if not already done */ void lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { unsigned long iflags; lpfc_check_nlp_post_devloss(vport, ndlp); spin_lock_irqsave(&ndlp->lock, iflags); if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { /* Already registered with backend, trigger rescan */ spin_unlock_irqrestore(&ndlp->lock, iflags); if (ndlp->fc4_xpt_flags & NVME_XPT_REGD && ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) { lpfc_nvme_rescan_port(vport, ndlp); } return; } ndlp->fc4_xpt_flags |= NLP_XPT_REGD; spin_unlock_irqrestore(&ndlp->lock, iflags); if (lpfc_valid_xpt_node(ndlp)) { vport->phba->nport_event_cnt++; /* * Tell the fc transport about the port, if we haven't * already. If we have, and it's a scsi entity, be */ lpfc_register_remote_port(vport, ndlp); } /* We are done if we do not have any NVME remote node */ if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME)) return; /* Notify the NVME transport of this new rport. */ if (vport->phba->sli_rev >= LPFC_SLI_REV4 && ndlp->nlp_fc4_type & NLP_FC4_NVME) { if (vport->phba->nvmet_support == 0) { /* Register this rport with the transport. * Only NVME Target Rports are registered with * the transport. */ if (ndlp->nlp_type & NLP_NVME_TARGET) { vport->phba->nport_event_cnt++; lpfc_nvme_register_port(vport, ndlp); } } else { /* Just take an NDLP ref count since the * target does not register rports. */ lpfc_nlp_get(ndlp); } } } /* Unregister a node with backend if not already done */ void lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { unsigned long iflags; spin_lock_irqsave(&ndlp->lock, iflags); if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) { spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, "0999 %s Not regd: ndlp x%px rport x%px DID " "x%x FLG x%x XPT x%x\n", __func__, ndlp, ndlp->rport, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags); return; } ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; spin_unlock_irqrestore(&ndlp->lock, iflags); if (ndlp->rport && ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { vport->phba->nport_event_cnt++; lpfc_unregister_remote_port(ndlp); } else if (!ndlp->rport) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, "1999 %s NDLP in devloss x%px DID x%x FLG x%x" " XPT x%x refcnt %u\n", __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); } if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { vport->phba->nport_event_cnt++; if (vport->phba->nvmet_support == 0) { /* Start devloss if target. */ if (ndlp->nlp_type & NLP_NVME_TARGET) lpfc_nvme_unregister_port(vport, ndlp); } else { /* NVMET has no upcall. */ lpfc_nlp_put(ndlp); } } } /* * Adisc state change handling */ static void lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int new_state) { switch (new_state) { /* * Any state to ADISC_ISSUE * Do nothing, adisc cmpl handling will trigger state changes */ case NLP_STE_ADISC_ISSUE: break; /* * ADISC_ISSUE to mapped states * Trigger a registration with backend, it will be nop if * already registered */ case NLP_STE_UNMAPPED_NODE: ndlp->nlp_type |= NLP_FC_NODE; fallthrough; case NLP_STE_MAPPED_NODE: ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; lpfc_nlp_reg_node(vport, ndlp); break; /* * ADISC_ISSUE to non-mapped states * We are moving from ADISC_ISSUE to a non-mapped state because * ADISC failed, we would have skipped unregistering with * backend, attempt it now */ case NLP_STE_NPR_NODE: ndlp->nlp_flag &= ~NLP_RCV_PLOGI; fallthrough; default: lpfc_nlp_unreg_node(vport, ndlp); break; } } static void lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int old_state, int new_state) { /* Trap ADISC changes here */ if (new_state == NLP_STE_ADISC_ISSUE || old_state == NLP_STE_ADISC_ISSUE) { lpfc_handle_adisc_state(vport, ndlp, new_state); return; } if (new_state == NLP_STE_UNMAPPED_NODE) { ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; ndlp->nlp_type |= NLP_FC_NODE; } if (new_state == NLP_STE_MAPPED_NODE) ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; if (new_state == NLP_STE_NPR_NODE) ndlp->nlp_flag &= ~NLP_RCV_PLOGI; /* Reg/Unreg for FCP and NVME Transport interface */ if ((old_state == NLP_STE_MAPPED_NODE || old_state == NLP_STE_UNMAPPED_NODE)) { /* For nodes marked for ADISC, Handle unreg in ADISC cmpl * if linkup. In linkdown do unreg_node */ if (!(ndlp->nlp_flag & NLP_NPR_ADISC) || !lpfc_is_link_up(vport->phba)) lpfc_nlp_unreg_node(vport, ndlp); } if (new_state == NLP_STE_MAPPED_NODE || new_state == NLP_STE_UNMAPPED_NODE) lpfc_nlp_reg_node(vport, ndlp); /* * If the node just added to Mapped list was an FCP target, * but the remote port registration failed or assigned a target * id outside the presentable range - move the node to the * Unmapped List. */ if ((new_state == NLP_STE_MAPPED_NODE) && (ndlp->nlp_type & NLP_FCP_TARGET) && (!ndlp->rport || ndlp->rport->scsi_target_id == -1 || ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; spin_unlock_irq(&ndlp->lock); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } } static char * lpfc_nlp_state_name(char *buffer, size_t size, int state) { static char *states[] = { [NLP_STE_UNUSED_NODE] = "UNUSED", [NLP_STE_PLOGI_ISSUE] = "PLOGI", [NLP_STE_ADISC_ISSUE] = "ADISC", [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN", [NLP_STE_PRLI_ISSUE] = "PRLI", [NLP_STE_LOGO_ISSUE] = "LOGO", [NLP_STE_UNMAPPED_NODE] = "UNMAPPED", [NLP_STE_MAPPED_NODE] = "MAPPED", [NLP_STE_NPR_NODE] = "NPR", }; if (state < NLP_STE_MAX_STATE && states[state]) strscpy(buffer, states[state], size); else snprintf(buffer, size, "unknown (%d)", state); return buffer; } void lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int state) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); int old_state = ndlp->nlp_state; int node_dropped = ndlp->nlp_flag & NLP_DROPPED; char name1[16], name2[16]; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0904 NPort state transition x%06x, %s -> %s\n", ndlp->nlp_DID, lpfc_nlp_state_name(name1, sizeof(name1), old_state), lpfc_nlp_state_name(name2, sizeof(name2), state)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node statechg did:x%x old:%d ste:%d", ndlp->nlp_DID, old_state, state); if (node_dropped && old_state == NLP_STE_UNUSED_NODE && state != NLP_STE_UNUSED_NODE) { ndlp->nlp_flag &= ~NLP_DROPPED; lpfc_nlp_get(ndlp); } if (old_state == NLP_STE_NPR_NODE && state != NLP_STE_NPR_NODE) lpfc_cancel_retry_delay_tmo(vport, ndlp); if (old_state == NLP_STE_UNMAPPED_NODE) { ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; ndlp->nlp_type &= ~NLP_FC_NODE; } if (list_empty(&ndlp->nlp_listp)) { spin_lock_irq(shost->host_lock); list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); spin_unlock_irq(shost->host_lock); } else if (old_state) lpfc_nlp_counters(vport, old_state, -1); ndlp->nlp_state = state; lpfc_nlp_counters(vport, state, 1); lpfc_nlp_state_cleanup(vport, ndlp, old_state, state); } void lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (list_empty(&ndlp->nlp_listp)) { spin_lock_irq(shost->host_lock); list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); spin_unlock_irq(shost->host_lock); } } void lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); lpfc_cancel_retry_delay_tmo(vport, ndlp); if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) lpfc_nlp_counters(vport, ndlp->nlp_state, -1); spin_lock_irq(shost->host_lock); list_del_init(&ndlp->nlp_listp); spin_unlock_irq(shost->host_lock); lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, NLP_STE_UNUSED_NODE); } /** * lpfc_initialize_node - Initialize all fields of node object * @vport: Pointer to Virtual Port object. * @ndlp: Pointer to FC node object. * @did: FC_ID of the node. * * This function is always called when node object need to be initialized. * It initializes all the fields of the node object. Although the reference * to phba from @ndlp can be obtained indirectly through it's reference to * @vport, a direct reference to phba is taken here by @ndlp. This is due * to the life-span of the @ndlp might go beyond the existence of @vport as * the final release of ndlp is determined by its reference count. And, the * operation on @ndlp needs the reference to phba. **/ static inline void lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t did) { INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0); INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp); ndlp->nlp_DID = did; ndlp->vport = vport; ndlp->phba = vport->phba; ndlp->nlp_sid = NLP_NO_SID; ndlp->nlp_fc4_type = NLP_FC4_NONE; kref_init(&ndlp->kref); atomic_set(&ndlp->cmd_pending, 0); ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; } void lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { /* * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should * be used when lpfc wants to remove the "last" lpfc_nlp_put() to * release the ndlp from the vport when conditions are correct. */ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) return; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); if (vport->phba->sli_rev == LPFC_SLI_REV4) { lpfc_cleanup_vports_rrqs(vport, ndlp); lpfc_unreg_rpi(vport, ndlp); } /* NLP_DROPPED means another thread already removed the initial * reference from lpfc_nlp_init. If set, don't drop it again and * introduce an imbalance. */ spin_lock_irq(&ndlp->lock); if (!(ndlp->nlp_flag & NLP_DROPPED)) { ndlp->nlp_flag |= NLP_DROPPED; spin_unlock_irq(&ndlp->lock); lpfc_nlp_put(ndlp); return; } spin_unlock_irq(&ndlp->lock); } /* * Start / ReStart rescue timer for Discovery / RSCN handling */ void lpfc_set_disctmo(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; uint32_t tmo; if (vport->port_state == LPFC_LOCAL_CFG_LINK) { /* For FAN, timeout should be greater than edtov */ tmo = (((phba->fc_edtov + 999) / 1000) + 1); } else { /* Normal discovery timeout should be > than ELS/CT timeout * FC spec states we need 3 * ratov for CT requests */ tmo = ((phba->fc_ratov * 3) + 3); } if (!timer_pending(&vport->fc_disctmo)) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "set disc timer: tmo:x%x state:x%x flg:x%x", tmo, vport->port_state, vport->fc_flag); } mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_DISC_TMO; spin_unlock_irq(shost->host_lock); /* Start Discovery Timer state <hba_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0247 Start Discovery Timer state x%x " "Data: x%x x%lx x%x x%x\n", vport->port_state, tmo, (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, vport->fc_adisc_cnt); return; } /* * Cancel rescue timer for Discovery / RSCN handling */ int lpfc_can_disctmo(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); unsigned long iflags; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "can disc timer: state:x%x rtry:x%x flg:x%x", vport->port_state, vport->fc_ns_retry, vport->fc_flag); /* Turn off discovery timer if its running */ if (vport->fc_flag & FC_DISC_TMO || timer_pending(&vport->fc_disctmo)) { spin_lock_irqsave(shost->host_lock, iflags); vport->fc_flag &= ~FC_DISC_TMO; spin_unlock_irqrestore(shost->host_lock, iflags); del_timer_sync(&vport->fc_disctmo); spin_lock_irqsave(&vport->work_port_lock, iflags); vport->work_port_events &= ~WORKER_DISC_TMO; spin_unlock_irqrestore(&vport->work_port_lock, iflags); } /* Cancel Discovery Timer state <hba_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0248 Cancel Discovery Timer state x%x " "Data: x%x x%x x%x\n", vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, vport->fc_adisc_cnt); return 0; } /* * Check specified ring for outstanding IOCB on the SLI queue * Return true if iocb matches the specified nport */ int lpfc_check_sli_ndlp(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) { struct lpfc_vport *vport = ndlp->vport; u8 ulp_command; u16 ulp_context; u32 remote_id; if (iocb->vport != vport) return 0; ulp_command = get_job_cmnd(phba, iocb); ulp_context = get_job_ulpcontext(phba, iocb); remote_id = get_job_els_rsp64_did(phba, iocb); if (pring->ringno == LPFC_ELS_RING) { switch (ulp_command) { case CMD_GEN_REQUEST64_CR: if (iocb->ndlp == ndlp) return 1; fallthrough; case CMD_ELS_REQUEST64_CR: if (remote_id == ndlp->nlp_DID) return 1; fallthrough; case CMD_XMIT_ELS_RSP64_CX: if (iocb->ndlp == ndlp) return 1; } } else if (pring->ringno == LPFC_FCP_RING) { /* Skip match check if waiting to relogin to FCP target */ if ((ndlp->nlp_type & NLP_FCP_TARGET) && (ndlp->nlp_flag & NLP_DELAY_TMO)) { return 0; } if (ulp_context == ndlp->nlp_rpi) return 1; } return 0; } static void __lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring, struct list_head *dequeue_list) { struct lpfc_iocbq *iocb, *next_iocb; list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { /* Check to see if iocb matches the nport */ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) /* match, dequeue */ list_move_tail(&iocb->list, dequeue_list); } } static void lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) { struct lpfc_sli *psli = &phba->sli; uint32_t i; spin_lock_irq(&phba->hbalock); for (i = 0; i < psli->num_rings; i++) __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i], dequeue_list); spin_unlock_irq(&phba->hbalock); } static void lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) { struct lpfc_sli_ring *pring; struct lpfc_queue *qp = NULL; spin_lock_irq(&phba->hbalock); list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { pring = qp->pring; if (!pring) continue; spin_lock(&pring->ring_lock); __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list); spin_unlock(&pring->ring_lock); } spin_unlock_irq(&phba->hbalock); } /* * Free resources / clean up outstanding I/Os * associated with nlp_rpi in the LPFC_NODELIST entry. */ static int lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); lpfc_fabric_abort_nport(ndlp); /* * Everything that matches on txcmplq will be returned * by firmware with a no rpi error. */ if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { if (phba->sli_rev != LPFC_SLI_REV4) lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions); else lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions); } /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); return 0; } /** * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO * @phba: Pointer to HBA context object. * @pmb: Pointer to mailbox object. * * This function will issue an ELS LOGO command after completing * the UNREG_RPI. **/ static void lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct lpfc_nodelist *ndlp; ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp); if (!ndlp) return; lpfc_issue_els_logo(vport, ndlp, 0); /* Check to see if there are any deferred events to process */ if ((ndlp->nlp_flag & NLP_UNREG_INP) && (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1434 UNREG cmpl deferred logo x%x " "on NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); ndlp->nlp_flag &= ~NLP_UNREG_INP; ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { /* NLP_RELEASE_RPI is only set for SLI4 ports. */ if (ndlp->nlp_flag & NLP_RELEASE_RPI) { lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_RELEASE_RPI; ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; spin_unlock_irq(&ndlp->lock); } spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_UNREG_INP; spin_unlock_irq(&ndlp->lock); } /* The node has an outstanding reference for the unreg. Now * that the LOGO action and cleanup are finished, release * resources. */ lpfc_nlp_put(ndlp); mempool_free(pmb, phba->mbox_mem_pool); } /* * Sets the mailbox completion handler to be used for the * unreg_rpi command. The handler varies based on the state of * the port and what will be happening to the rpi next. */ static void lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) { unsigned long iflags; /* Driver always gets a reference on the mailbox job * in support of async jobs. */ mbox->ctx_ndlp = lpfc_nlp_get(ndlp); if (!mbox->ctx_ndlp) return; if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { mbox->mbox_cmpl = lpfc_nlp_logo_unreg; } else if (phba->sli_rev == LPFC_SLI_REV4 && (!(vport->load_flag & FC_UNLOADING)) && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) && (kref_read(&ndlp->kref) > 0)) { mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; } else { if (vport->load_flag & FC_UNLOADING) { if (phba->sli_rev == LPFC_SLI_REV4) { spin_lock_irqsave(&ndlp->lock, iflags); ndlp->nlp_flag |= NLP_RELEASE_RPI; spin_unlock_irqrestore(&ndlp->lock, iflags); } } mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } /* * Free rpi associated with LPFC_NODELIST entry. * This routine is called from lpfc_freenode(), when we are removing * a LPFC_NODELIST entry. It is also called if the driver initiates a * LOGO that completes successfully, and we are waiting to PLOGI back * to the remote NPort. In addition, it is called after we receive * and unsolicated ELS cmd, send back a rsp, the rsp completes and * we are waiting to PLOGI back to the remote NPort. */ int lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc, acc_plogi = 1; uint16_t rpi; if (ndlp->nlp_flag & NLP_RPI_REGISTERED || ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "3366 RPI x%x needs to be " "unregistered nlp_flag x%x " "did x%x\n", ndlp->nlp_rpi, ndlp->nlp_flag, ndlp->nlp_DID); /* If there is already an UNREG in progress for this ndlp, * no need to queue up another one. */ if (ndlp->nlp_flag & NLP_UNREG_INP) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1436 unreg_rpi SKIP UNREG x%x on " "NPort x%x deferred x%x flg x%x " "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp->nlp_flag, ndlp); goto out; } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { /* SLI4 ports require the physical rpi value. */ rpi = ndlp->nlp_rpi; if (phba->sli_rev == LPFC_SLI_REV4) rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; lpfc_unreg_login(phba, vport->vpi, rpi, mbox); mbox->vport = vport; lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox); if (!mbox->ctx_ndlp) { mempool_free(mbox, phba->mbox_mem_pool); return 1; } if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) /* * accept PLOGIs after unreg_rpi_cmpl */ acc_plogi = 0; if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && (!(vport->fc_flag & FC_OFFLINE_MODE))) ndlp->nlp_flag |= NLP_UNREG_INP; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1433 unreg_rpi UNREG x%x on " "NPort x%x deferred flg x%x " "Data:x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { ndlp->nlp_flag &= ~NLP_UNREG_INP; mempool_free(mbox, phba->mbox_mem_pool); acc_plogi = 1; lpfc_nlp_put(ndlp); } } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1444 Failed to allocate mempool " "unreg_rpi UNREG x%x, " "DID x%x, flag x%x, " "ndlp x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); /* Because mempool_alloc failed, we * will issue a LOGO here and keep the rpi alive if * not unloading. */ if (!(vport->load_flag & FC_UNLOADING)) { ndlp->nlp_flag &= ~NLP_UNREG_INP; lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } return 1; } lpfc_no_rpi(phba, ndlp); out: if (phba->sli_rev != LPFC_SLI_REV4) ndlp->nlp_rpi = 0; ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; ndlp->nlp_flag &= ~NLP_NPR_ADISC; if (acc_plogi) ndlp->nlp_flag &= ~NLP_LOGO_ACC; return 1; } ndlp->nlp_flag &= ~NLP_LOGO_ACC; return 0; } /** * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to unregister all the currently registered RPIs * to the HBA. **/ void lpfc_unreg_hba_rpis(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; int i; vports = lpfc_create_vport_work_array(phba); if (!vports) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2884 Vport array allocation failed \n"); return; } for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { /* The mempool_alloc might sleep */ spin_unlock_irq(shost->host_lock); lpfc_unreg_rpi(vports[i], ndlp); spin_lock_irq(shost->host_lock); } } spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); } void lpfc_unreg_all_rpis(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_sli4_unreg_all_rpis(vport); return; } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->ctx_ndlp = NULL; rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1836 Could not issue " "unreg_login(all_rpis) status %d\n", rc); } } void lpfc_unreg_default_rpis(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; /* Unreg DID is an SLI3 operation. */ if (phba->sli_rev > LPFC_SLI_REV3) return; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS, mbox); mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->ctx_ndlp = NULL; rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1815 Could not issue " "unreg_did (default rpis) status %d\n", rc); } } /* * Free resources associated with LPFC_NODELIST entry * so it can be freed. */ static int lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mb, *nextmb; /* Cleanup node for NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0900 Cleanup node for NPort x%x " "Data: x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_dequeue_node(vport, ndlp); /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */ /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { mb->ctx_ndlp = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } spin_lock_irq(&phba->hbalock); /* Cleanup REG_LOGIN completions which are not yet processed */ list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || (mb->mbox_flag & LPFC_MBX_IMED_UNREG) || (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp)) continue; mb->ctx_ndlp = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { list_del(&mb->list); lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED); /* Don't invoke lpfc_nlp_put. The driver is in * lpfc_nlp_release context. */ } } spin_unlock_irq(&phba->hbalock); lpfc_els_abort(phba, ndlp); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_DELAY_TMO; spin_unlock_irq(&ndlp->lock); ndlp->nlp_last_elscmd = 0; del_timer_sync(&ndlp->nlp_delayfunc); list_del_init(&ndlp->els_retry_evt.evt_listp); list_del_init(&ndlp->dev_loss_evt.evt_listp); list_del_init(&ndlp->recovery_evt.evt_listp); lpfc_cleanup_vports_rrqs(vport, ndlp); if (phba->sli_rev == LPFC_SLI_REV4) ndlp->nlp_flag |= NLP_RELEASE_RPI; return 0; } static int lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t did) { D_ID mydid, ndlpdid, matchdid; if (did == Bcast_DID) return 0; /* First check for Direct match */ if (ndlp->nlp_DID == did) return 1; /* Next check for area/domain identically equals 0 match */ mydid.un.word = vport->fc_myDID; if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { return 0; } matchdid.un.word = did; ndlpdid.un.word = ndlp->nlp_DID; if (matchdid.un.b.id == ndlpdid.un.b.id) { if ((mydid.un.b.domain == matchdid.un.b.domain) && (mydid.un.b.area == matchdid.un.b.area)) { /* This code is supposed to match the ID * for a private loop device that is * connect to fl_port. But we need to * check that the port did not just go * from pt2pt to fabric or we could end * up matching ndlp->nlp_DID 000001 to * fabric DID 0x20101 */ if ((ndlpdid.un.b.domain == 0) && (ndlpdid.un.b.area == 0)) { if (ndlpdid.un.b.id && vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) return 1; } return 0; } matchdid.un.word = ndlp->nlp_DID; if ((mydid.un.b.domain == ndlpdid.un.b.domain) && (mydid.un.b.area == ndlpdid.un.b.area)) { if ((matchdid.un.b.domain == 0) && (matchdid.un.b.area == 0)) { if (matchdid.un.b.id) return 1; } } } return 0; } /* Search for a nodelist entry */ static struct lpfc_nodelist * __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct lpfc_nodelist *ndlp; uint32_t data1; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (lpfc_matchdid(vport, ndlp, did)) { data1 = (((uint32_t)ndlp->nlp_state << 24) | ((uint32_t)ndlp->nlp_xri << 16) | ((uint32_t)ndlp->nlp_type << 8) ); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0929 FIND node DID " "Data: x%px x%x x%x x%x x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->nlp_rpi, ndlp->active_rrqs_xri_bitmap); return ndlp; } } /* FIND node did <did> NOT FOUND */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0932 FIND node did x%x NOT FOUND.\n", did); return NULL; } struct lpfc_nodelist * lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; unsigned long iflags; spin_lock_irqsave(shost->host_lock, iflags); ndlp = __lpfc_findnode_did(vport, did); spin_unlock_irqrestore(shost->host_lock, iflags); return ndlp; } struct lpfc_nodelist * lpfc_findnode_mapped(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; uint32_t data1; unsigned long iflags; spin_lock_irqsave(shost->host_lock, iflags); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || ndlp->nlp_state == NLP_STE_MAPPED_NODE) { data1 = (((uint32_t)ndlp->nlp_state << 24) | ((uint32_t)ndlp->nlp_xri << 16) | ((uint32_t)ndlp->nlp_type << 8) | ((uint32_t)ndlp->nlp_rpi & 0xff)); spin_unlock_irqrestore(shost->host_lock, iflags); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "2025 FIND node DID " "Data: x%px x%x x%x x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->active_rrqs_xri_bitmap); return ndlp; } } spin_unlock_irqrestore(shost->host_lock, iflags); /* FIND node did <did> NOT FOUND */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "2026 FIND mapped did NOT FOUND.\n"); return NULL; } struct lpfc_nodelist * lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) { struct lpfc_nodelist *ndlp; ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { if (vport->phba->nvmet_support) return NULL; if ((vport->fc_flag & FC_RSCN_MODE) != 0 && lpfc_rscn_payload_check(vport, did) == 0) return NULL; ndlp = lpfc_nlp_init(vport, did); if (!ndlp) return NULL; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6453 Setup New Node 2B_DISC x%x " "Data:x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); return ndlp; } /* The NVME Target does not want to actively manage an rport. * The goal is to allow the target to reset its state and clear * pending IO in preparation for the initiator to recover. */ if ((vport->fc_flag & FC_RSCN_MODE) && !(vport->fc_flag & FC_NDISC_ACTIVE)) { if (lpfc_rscn_payload_check(vport, did)) { /* Since this node is marked for discovery, * delay timeout is not needed. */ lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6455 Setup RSCN Node 2B_DISC x%x " "Data:x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); /* NVME Target mode waits until rport is known to be * impacted by the RSCN before it transitions. No * active management - just go to NPR provided the * node had a valid login. */ if (vport->phba->nvmet_support) return ndlp; /* If we've already received a PLOGI from this NPort * we don't need to try to discover it again. */ if (ndlp->nlp_flag & NLP_RCV_PLOGI && !(ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))) return NULL; if (ndlp->nlp_state > NLP_STE_UNUSED_NODE && ndlp->nlp_state < NLP_STE_PRLI_ISSUE) { lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); } spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6456 Skip Setup RSCN Node x%x " "Data:x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); ndlp = NULL; } } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6457 Setup Active Node 2B_DISC x%x " "Data:x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); /* If the initiator received a PLOGI from this NPort or if the * initiator is already in the process of discovery on it, * there's no need to try to discover it again. */ if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || (!vport->phba->nvmet_support && ndlp->nlp_flag & NLP_RCV_PLOGI)) return NULL; if (vport->phba->nvmet_support) return ndlp; /* Moving to NPR state clears unsolicited flags and * allows for rediscovery */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); } return ndlp; } /* Build a list of nodes to discover based on the loopmap */ void lpfc_disc_list_loopmap(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; int j; uint32_t alpa, index; if (!lpfc_is_link_up(phba)) return; if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) return; /* Check for loop map present or not */ if (phba->alpa_map[0]) { for (j = 1; j <= phba->alpa_map[0]; j++) { alpa = phba->alpa_map[j]; if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) continue; lpfc_setup_disc_node(vport, alpa); } } else { /* No alpamap, so try all alpa's */ for (j = 0; j < FC_MAXLOOP; j++) { /* If cfg_scan_down is set, start from highest * ALPA (0xef) to lowest (0x1). */ if (vport->cfg_scan_down) index = j; else index = FC_MAXLOOP - j - 1; alpa = lpfcAlpaArray[index]; if ((vport->fc_myDID & 0xff) == alpa) continue; lpfc_setup_disc_node(vport, alpa); } } return; } /* SLI3 only */ void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) { LPFC_MBOXQ_t *mbox; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING]; struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING]; int rc; /* * if it's not a physical port or if we already send * clear_la then don't send it. */ if ((phba->link_state >= LPFC_CLEAR_LA) || (vport->port_type != LPFC_PHYSICAL_PORT) || (phba->sli_rev == LPFC_SLI_REV4)) return; /* Link up discovery */ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { phba->link_state = LPFC_CLEAR_LA; lpfc_clear_la(phba, mbox); mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); lpfc_disc_flush_list(vport); extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; phba->link_state = LPFC_HBA_ERROR; } } } /* Reg_vpi to tell firmware to resume normal operations */ void lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) { LPFC_MBOXQ_t *regvpimbox; regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (regvpimbox) { lpfc_reg_vpi(vport, regvpimbox); regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; regvpimbox->vport = vport; if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) == MBX_NOT_FINISHED) { mempool_free(regvpimbox, phba->mbox_mem_pool); } } } /* Start Link up / RSCN discovery on NPR nodes */ void lpfc_disc_start(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; uint32_t num_sent; uint32_t clear_la_pending; if (!lpfc_is_link_up(phba)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "3315 Link is not up %x\n", phba->link_state); return; } if (phba->link_state == LPFC_CLEAR_LA) clear_la_pending = 1; else clear_la_pending = 0; if (vport->port_state < LPFC_VPORT_READY) vport->port_state = LPFC_DISC_AUTH; lpfc_set_disctmo(vport); vport->fc_prevDID = vport->fc_myDID; vport->num_disc_nodes = 0; /* Start Discovery state <hba_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0202 Start Discovery port state x%x " "flg x%x Data: x%x x%x x%x\n", vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, vport->fc_adisc_cnt, vport->fc_npr_cnt); /* First do ADISCs - if any */ num_sent = lpfc_els_disc_adisc(vport); if (num_sent) return; /* Register the VPI for SLI3, NPIV only. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { lpfc_issue_clear_la(phba, vport); lpfc_issue_reg_vpi(phba, vport); return; } /* * For SLI2, we need to set port_state to READY and continue * discovery. */ if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { /* If we get here, there is nothing to ADISC */ lpfc_issue_clear_la(phba, vport); if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { vport->num_disc_nodes = 0; /* go thru NPR nodes and issue ELS PLOGIs */ if (vport->fc_npr_cnt) lpfc_els_disc_plogi(vport); if (!vport->num_disc_nodes) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); } } vport->port_state = LPFC_VPORT_READY; } else { /* Next do PLOGIs - if any */ num_sent = lpfc_els_disc_plogi(vport); if (num_sent) return; if (vport->fc_flag & FC_RSCN_MODE) { /* Check to see if more RSCNs came in while we * were processing this one. */ if ((vport->fc_rscn_id_cnt == 0) && (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_RSCN_MODE; spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); } else lpfc_els_handle_rscn(vport); } } return; } /* * Ignore completion for all IOCBs on tx and txcmpl queue for ELS * ring the match the sppecified nodelist. */ static void lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); struct lpfc_iocbq *iocb, *next_iocb; struct lpfc_sli_ring *pring; u32 ulp_command; pring = lpfc_phba_elsring(phba); if (unlikely(!pring)) return; /* Error matching iocb on txq or txcmplq * First check the txq. */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { if (iocb->ndlp != ndlp) continue; ulp_command = get_job_cmnd(phba, iocb); if (ulp_command == CMD_ELS_REQUEST64_CR || ulp_command == CMD_XMIT_ELS_RSP64_CX) { list_move_tail(&iocb->list, &completions); } } /* Next check the txcmplq */ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { if (iocb->ndlp != ndlp) continue; ulp_command = get_job_cmnd(phba, iocb); if (ulp_command == CMD_ELS_REQUEST64_CR || ulp_command == CMD_XMIT_ELS_RSP64_CX) { lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); } } spin_unlock_irq(&phba->hbalock); /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } static void lpfc_disc_flush_list(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_hba *phba = vport->phba; if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { lpfc_free_tx(phba, ndlp); } } } } /* * lpfc_notify_xport_npr - notifies xport of node disappearance * @vport: Pointer to Virtual Port object. * * Transitions all ndlps to NPR state. When lpfc_nlp_set_state * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered * and transport notified that the node is gone. * Return Code: * none */ static void lpfc_notify_xport_npr(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp, *next_ndlp; list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } } void lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) { lpfc_els_flush_rscn(vport); lpfc_els_flush_cmd(vport); lpfc_disc_flush_list(vport); if (pci_channel_offline(vport->phba->pcidev)) lpfc_notify_xport_npr(vport); } /*****************************************************************************/ /* * NAME: lpfc_disc_timeout * * FUNCTION: Fibre Channel driver discovery timeout routine. * * EXECUTION ENVIRONMENT: interrupt only * * CALLED FROM: * Timer function * * RETURNS: * none */ /*****************************************************************************/ void lpfc_disc_timeout(struct timer_list *t) { struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo); struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long flags = 0; if (unlikely(!phba)) return; spin_lock_irqsave(&vport->work_port_lock, flags); tmo_posted = vport->work_port_events & WORKER_DISC_TMO; if (!tmo_posted) vport->work_port_events |= WORKER_DISC_TMO; spin_unlock_irqrestore(&vport->work_port_lock, flags); if (!tmo_posted) lpfc_worker_wake_up(phba); return; } static void lpfc_disc_timeout_handler(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_sli *psli = &phba->sli; struct lpfc_nodelist *ndlp, *next_ndlp; LPFC_MBOXQ_t *initlinkmbox; int rc, clrlaerr = 0; if (!(vport->fc_flag & FC_DISC_TMO)) return; spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_DISC_TMO; spin_unlock_irq(shost->host_lock); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "disc timeout: state:x%x rtry:x%x flg:x%x", vport->port_state, vport->fc_ns_retry, vport->fc_flag); switch (vport->port_state) { case LPFC_LOCAL_CFG_LINK: /* * port_state is identically LPFC_LOCAL_CFG_LINK while * waiting for FAN timeout */ lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "0221 FAN timeout\n"); /* Start discovery by sending FLOGI, clean up old rpis */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state != NLP_STE_NPR_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { /* Clean up the ndlp on Fabric connections */ lpfc_drop_node(vport, ndlp); } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { /* Fail outstanding IO now since device * is marked for PLOGI. */ lpfc_unreg_rpi(vport, ndlp); } } if (vport->port_state != LPFC_FLOGI) { if (phba->sli_rev <= LPFC_SLI_REV3) lpfc_initial_flogi(vport); else lpfc_issue_init_vfi(vport); return; } break; case LPFC_FDISC: case LPFC_FLOGI: /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ /* Initial FLOGI timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0222 Initial %s timeout\n", vport->vpi ? "FDISC" : "FLOGI"); /* Assume no Fabric and go on with discovery. * Check for outstanding ELS FLOGI to abort. */ /* FLOGI failed, so just use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ lpfc_disc_start(vport); break; case LPFC_FABRIC_CFG_LINK: /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for NameServer login */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0223 Timeout while waiting for " "NameServer login\n"); /* Next look for NameServer ndlp */ ndlp = lpfc_findnode_did(vport, NameServer_DID); if (ndlp) lpfc_els_abort(phba, ndlp); /* ReStart discovery */ goto restart_disc; case LPFC_NS_QRY: /* Check for wait for NameServer Rsp timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0224 NameServer Query timeout " "Data: x%x x%x\n", vport->fc_ns_retry, LPFC_MAX_NS_RETRY); if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { /* Try it one more time */ vport->fc_ns_retry++; vport->gidft_inp = 0; rc = lpfc_issue_gidft(vport); if (rc == 0) break; } vport->fc_ns_retry = 0; restart_disc: /* * Discovery is over. * set port_state to PORT_READY if SLI2. * cmpl_reg_vpi will set port_state to READY for SLI3. */ if (phba->sli_rev < LPFC_SLI_REV4) { if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) lpfc_issue_reg_vpi(phba, vport); else { lpfc_issue_clear_la(phba, vport); vport->port_state = LPFC_VPORT_READY; } } /* Setup and issue mailbox INITIALIZE LINK command */ initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!initlinkmbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0206 Device Discovery " "completion error\n"); phba->link_state = LPFC_HBA_ERROR; break; } lpfc_linkdown(phba); lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, phba->cfg_link_speed); initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; initlinkmbox->vport = vport; initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); lpfc_set_loopback_flag(phba); if (rc == MBX_NOT_FINISHED) mempool_free(initlinkmbox, phba->mbox_mem_pool); break; case LPFC_DISC_AUTH: /* Node Authentication timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0227 Node Authentication timeout\n"); lpfc_disc_flush_list(vport); /* * set port_state to PORT_READY if SLI2. * cmpl_reg_vpi will set port_state to READY for SLI3. */ if (phba->sli_rev < LPFC_SLI_REV4) { if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) lpfc_issue_reg_vpi(phba, vport); else { /* NPIV Not enabled */ lpfc_issue_clear_la(phba, vport); vport->port_state = LPFC_VPORT_READY; } } break; case LPFC_VPORT_READY: if (vport->fc_flag & FC_RSCN_MODE) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0231 RSCN timeout Data: x%x " "x%x x%x x%x\n", vport->fc_ns_retry, LPFC_MAX_NS_RETRY, vport->port_state, vport->gidft_inp); /* Cleanup any outstanding ELS commands */ lpfc_els_flush_cmd(vport); lpfc_els_flush_rscn(vport); lpfc_disc_flush_list(vport); } break; default: lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0273 Unexpected discovery timeout, " "vport State x%x\n", vport->port_state); break; } switch (phba->link_state) { case LPFC_CLEAR_LA: /* CLEAR LA timeout */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0228 CLEAR LA timeout\n"); clrlaerr = 1; break; case LPFC_LINK_UP: lpfc_issue_clear_la(phba, vport); fallthrough; case LPFC_LINK_UNKNOWN: case LPFC_WARM_START: case LPFC_INIT_START: case LPFC_INIT_MBX_CMDS: case LPFC_LINK_DOWN: case LPFC_HBA_ERROR: lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0230 Unexpected timeout, hba link " "state x%x\n", phba->link_state); clrlaerr = 1; break; case LPFC_HBA_READY: break; } if (clrlaerr) { lpfc_disc_flush_list(vport); if (phba->sli_rev != LPFC_SLI_REV4) { psli->sli3_ring[(LPFC_EXTRA_RING)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT; } vport->port_state = LPFC_VPORT_READY; } return; } /* * This routine handles processing a NameServer REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ * as the completion routine when the command is * handed off to the SLI layer. */ void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; struct lpfc_vport *vport = pmb->vport; pmb->ctx_ndlp = NULL; if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0004 rpi:%x DID:%x flg:%x %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); /* * Start issuing Fabric-Device Management Interface (FDMI) command to * 0xfffffa (FDMI well known port). * DHBA -> DPRT -> RHBA -> RPA (physical port) * DPRT -> RPRT (vports) */ if (vport->port_type == LPFC_PHYSICAL_PORT) { phba->link_flag &= ~LS_CT_VEN_RPA; /* For extra Vendor RPA */ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); } else { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); } /* decrement the node reference count held for this callback * function. */ lpfc_nlp_put(ndlp); lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); return; } static int lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param) { uint16_t *rpi = param; return ndlp->nlp_rpi == *rpi; } static int lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) { return memcmp(&ndlp->nlp_portname, param, sizeof(ndlp->nlp_portname)) == 0; } static struct lpfc_nodelist * __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) { struct lpfc_nodelist *ndlp; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (filter(ndlp, param)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "3185 FIND node filter %ps DID " "ndlp x%px did x%x flg x%x st x%x " "xri x%x type x%x rpi x%x\n", filter, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_xri, ndlp->nlp_type, ndlp->nlp_rpi); return ndlp; } } lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "3186 FIND node filter %ps NOT FOUND.\n", filter); return NULL; } /* * This routine looks up the ndlp lists for the given RPI. If rpi found it * returns the node list element pointer else return NULL. */ struct lpfc_nodelist * __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) { return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); } /* * This routine looks up the ndlp lists for the given WWPN. If WWPN found it * returns the node element list pointer else return NULL. */ struct lpfc_nodelist * lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; spin_lock_irq(shost->host_lock); ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn); spin_unlock_irq(shost->host_lock); return ndlp; } /* * This routine looks up the ndlp lists for the given RPI. If the rpi * is found, the routine returns the node element list pointer else * return NULL. */ struct lpfc_nodelist * lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); ndlp = __lpfc_findnode_rpi(vport, rpi); spin_unlock_irqrestore(shost->host_lock, flags); return ndlp; } /** * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier * @phba: pointer to lpfc hba data structure. * @vpi: the physical host virtual N_Port identifier. * * This routine finds a vport on a HBA (referred by @phba) through a * @vpi. The function walks the HBA's vport list and returns the address * of the vport with the matching @vpi. * * Return code * NULL - No vport with the matching @vpi found * Otherwise - Address to the vport with the matching @vpi. **/ struct lpfc_vport * lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) { struct lpfc_vport *vport; unsigned long flags; int i = 0; /* The physical ports are always vpi 0 - translate is unnecessary. */ if (vpi > 0) { /* * Translate the physical vpi to the logical vpi. The * vport stores the logical vpi. */ for (i = 0; i <= phba->max_vpi; i++) { if (vpi == phba->vpi_ids[i]) break; } if (i > phba->max_vpi) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2936 Could not find Vport mapped " "to vpi %d\n", vpi); return NULL; } } spin_lock_irqsave(&phba->port_list_lock, flags); list_for_each_entry(vport, &phba->port_list, listentry) { if (vport->vpi == i) { spin_unlock_irqrestore(&phba->port_list_lock, flags); return vport; } } spin_unlock_irqrestore(&phba->port_list_lock, flags); return NULL; } struct lpfc_nodelist * lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) { struct lpfc_nodelist *ndlp; int rpi = LPFC_RPI_ALLOC_ERROR; if (vport->phba->sli_rev == LPFC_SLI_REV4) { rpi = lpfc_sli4_alloc_rpi(vport->phba); if (rpi == LPFC_RPI_ALLOC_ERROR) return NULL; } ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); if (!ndlp) { if (vport->phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_free_rpi(vport->phba, rpi); return NULL; } memset(ndlp, 0, sizeof (struct lpfc_nodelist)); spin_lock_init(&ndlp->lock); lpfc_initialize_node(vport, ndlp, did); INIT_LIST_HEAD(&ndlp->nlp_listp); if (vport->phba->sli_rev == LPFC_SLI_REV4) { ndlp->nlp_rpi = rpi; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0007 Init New ndlp x%px, rpi:x%x DID:%x " "flg:x%x refcnt:%d\n", ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); ndlp->active_rrqs_xri_bitmap = mempool_alloc(vport->phba->active_rrq_pool, GFP_KERNEL); if (ndlp->active_rrqs_xri_bitmap) memset(ndlp->active_rrqs_xri_bitmap, 0, ndlp->phba->cfg_rrq_xri_bitmap_sz); } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node init: did:x%x", ndlp->nlp_DID, 0, 0); return ndlp; } /* This routine releases all resources associated with a specifc NPort's ndlp * and mempool_free's the nodelist. */ static void lpfc_nlp_release(struct kref *kref) { struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, kref); struct lpfc_vport *vport = ndlp->vport; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node release: did:x%x flg:x%x type:x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n", __func__, ndlp, ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_rpi); /* remove ndlp from action. */ lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cleanup_node(vport, ndlp); /* Not all ELS transactions have registered the RPI with the port. * In these cases the rpi usage is temporary and the node is * released when the WQE is completed. Catch this case to free the * RPI to the pool. Because this node is in the release path, a lock * is unnecessary. All references are gone and the node has been * dequeued. */ if (ndlp->nlp_flag & NLP_RELEASE_RPI) { if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR && !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) { lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; } } /* The node is not freed back to memory, it is released to a pool so * the node fields need to be cleaned up. */ ndlp->vport = NULL; ndlp->nlp_state = NLP_STE_FREED_NODE; ndlp->nlp_flag = 0; ndlp->fc4_xpt_flags = 0; /* free ndlp memory for final ndlp release */ if (ndlp->phba->sli_rev == LPFC_SLI_REV4) mempool_free(ndlp->active_rrqs_xri_bitmap, ndlp->phba->active_rrq_pool); mempool_free(ndlp, ndlp->phba->nlp_mem_pool); } /* This routine bumps the reference count for a ndlp structure to ensure * that one discovery thread won't free a ndlp while another discovery thread * is using it. */ struct lpfc_nodelist * lpfc_nlp_get(struct lpfc_nodelist *ndlp) { unsigned long flags; if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node get: did:x%x flg:x%x refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); /* The check of ndlp usage to prevent incrementing the * ndlp reference count that is in the process of being * released. */ spin_lock_irqsave(&ndlp->lock, flags); if (!kref_get_unless_zero(&ndlp->kref)) { spin_unlock_irqrestore(&ndlp->lock, flags); lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, "0276 %s: ndlp:x%px refcnt:%d\n", __func__, (void *)ndlp, kref_read(&ndlp->kref)); return NULL; } spin_unlock_irqrestore(&ndlp->lock, flags); } else { WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__); } return ndlp; } /* This routine decrements the reference count for a ndlp structure. If the * count goes to 0, this indicates the associated nodelist should be freed. */ int lpfc_nlp_put(struct lpfc_nodelist *ndlp) { if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node put: did:x%x flg:x%x refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); } else { WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__); } return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; } /** * lpfc_fcf_inuse - Check if FCF can be unregistered. * @phba: Pointer to hba context object. * * This function iterate through all FC nodes associated * will all vports to check if there is any node with * fc_rports associated with it. If there is an fc_rport * associated with the node, then the node is either in * discovered state or its devloss_timer is pending. */ static int lpfc_fcf_inuse(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i, ret = 0; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; vports = lpfc_create_vport_work_array(phba); /* If driver cannot allocate memory, indicate fcf is in use */ if (!vports) return 1; for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); /* * IF the CVL_RCVD bit is not set then we have sent the * flogi. * If dev_loss fires while we are waiting we do not want to * unreg the fcf. */ if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) { spin_unlock_irq(shost->host_lock); ret = 1; goto out; } list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { if (ndlp->rport && (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { ret = 1; spin_unlock_irq(shost->host_lock); goto out; } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { ret = 1; lpfc_printf_log(phba, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "2624 RPI %x DID %x flag %x " "still logged in\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); } } spin_unlock_irq(shost->host_lock); } out: lpfc_destroy_vport_work_array(phba, vports); return ret; } /** * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. * @phba: Pointer to hba context object. * @mboxq: Pointer to mailbox object. * * This function frees memory associated with the mailbox command. */ void lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (mboxq->u.mb.mbxStatus) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2555 UNREG_VFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); } spin_lock_irq(shost->host_lock); phba->pport->fc_flag &= ~FC_VFI_REGISTERED; spin_unlock_irq(shost->host_lock); mempool_free(mboxq, phba->mbox_mem_pool); return; } /** * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. * @phba: Pointer to hba context object. * @mboxq: Pointer to mailbox object. * * This function frees memory associated with the mailbox command. */ static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; if (mboxq->u.mb.mbxStatus) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2550 UNREG_FCFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); } mempool_free(mboxq, phba->mbox_mem_pool); return; } /** * lpfc_unregister_fcf_prep - Unregister fcf record preparation * @phba: Pointer to hba context object. * * This function prepare the HBA for unregistering the currently registered * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and * VFIs. */ int lpfc_unregister_fcf_prep(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; int i = 0, rc; /* Unregister RPIs */ if (lpfc_fcf_inuse(phba)) lpfc_unreg_hba_rpis(phba); /* At this point, all discovery is aborted */ phba->pport->port_state = LPFC_VPORT_UNKNOWN; /* Unregister VPIs */ vports = lpfc_create_vport_work_array(phba); if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { /* Stop FLOGI/FDISC retries */ ndlp = lpfc_findnode_did(vports[i], Fabric_DID); if (ndlp) lpfc_cancel_retry_delay_tmo(vports[i], ndlp); lpfc_cleanup_pending_mbox(vports[i]); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_unreg_all_rpis(vports[i]); lpfc_mbx_unreg_vpi(vports[i]); shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); if (ndlp) lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); lpfc_cleanup_pending_mbox(phba->pport); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_sli4_unreg_all_rpis(phba->pport); lpfc_mbx_unreg_vpi(phba->pport); shost = lpfc_shost_from_vport(phba->pport); spin_lock_irq(shost->host_lock); phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; spin_unlock_irq(shost->host_lock); } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); /* Unregister the physical port VFI */ rc = lpfc_issue_unreg_vfi(phba->pport); return rc; } /** * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record * @phba: Pointer to hba context object. * * This function issues synchronous unregister FCF mailbox command to HBA to * unregister the currently registered FCF record. The driver does not reset * the driver FCF usage state flags. * * Return 0 if successfully issued, none-zero otherwise. */ int lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mbox; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2551 UNREG_FCFI mbox allocation failed" "HBA state x%x\n", phba->pport->port_state); return -ENOMEM; } lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2552 Unregister FCFI command failed rc x%x " "HBA state x%x\n", rc, phba->pport->port_state); return -EINVAL; } return 0; } /** * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan * @phba: Pointer to hba context object. * * This function unregisters the currently reigstered FCF. This function * also tries to find another FCF for discovery by rescan the HBA FCF table. */ void lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) { int rc; /* Preparation for unregistering fcf */ rc = lpfc_unregister_fcf_prep(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2748 Failed to prepare for unregistering " "HBA's FCF record: rc=%d\n", rc); return; } /* Now, unregister FCF record and reset HBA FCF state */ rc = lpfc_sli4_unregister_fcf(phba); if (rc) return; /* Reset HBA FCF states after successful unregister FCF */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag = 0; spin_unlock_irq(&phba->hbalock); phba->fcf.current_rec.flag = 0; /* * If driver is not unloading, check if there is any other * FCF record that can be used for discovery. */ if ((phba->pport->load_flag & FC_UNLOADING) || (phba->link_state < LPFC_LINK_UP)) return; /* This is considered as the initial FCF discovery scan */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_INIT_DISC; spin_unlock_irq(&phba->hbalock); /* Reset FCF roundrobin bmask for new discovery */ lpfc_sli4_clear_fcf_rr_bmask(phba); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_INIT_DISC; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2553 lpfc_unregister_unused_fcf failed " "to read FCF record HBA state x%x\n", phba->pport->port_state); } } /** * lpfc_unregister_fcf - Unregister the currently registered fcf record * @phba: Pointer to hba context object. * * This function just unregisters the currently reigstered FCF. It does not * try to find another FCF for discovery. */ void lpfc_unregister_fcf(struct lpfc_hba *phba) { int rc; /* Preparation for unregistering fcf */ rc = lpfc_unregister_fcf_prep(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2749 Failed to prepare for unregistering " "HBA's FCF record: rc=%d\n", rc); return; } /* Now, unregister FCF record and reset HBA FCF state */ rc = lpfc_sli4_unregister_fcf(phba); if (rc) return; /* Set proper HBA FCF states after successful unregister FCF */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_REGISTERED; spin_unlock_irq(&phba->hbalock); } /** * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. * @phba: Pointer to hba context object. * * This function check if there are any connected remote port for the FCF and * if all the devices are disconnected, this function unregister FCFI. * This function also tries to use another FCF for discovery. */ void lpfc_unregister_unused_fcf(struct lpfc_hba *phba) { /* * If HBA is not running in FIP mode, if HBA does not support * FCoE, if FCF discovery is ongoing, or if FCF has not been * registered, do nothing. */ spin_lock_irq(&phba->hbalock); if (!(phba->hba_flag & HBA_FCOE_MODE) || !(phba->fcf.fcf_flag & FCF_REGISTERED) || !(phba->hba_flag & HBA_FIP_SUPPORT) || (phba->fcf.fcf_flag & FCF_DISCOVERY) || (phba->pport->port_state == LPFC_FLOGI)) { spin_unlock_irq(&phba->hbalock); return; } spin_unlock_irq(&phba->hbalock); if (lpfc_fcf_inuse(phba)) return; lpfc_unregister_fcf_rescan(phba); } /** * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. * @phba: Pointer to hba context object. * @buff: Buffer containing the FCF connection table as in the config * region. * This function create driver data structure for the FCF connection * record table read from config region 23. */ static void lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, uint8_t *buff) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; struct lpfc_fcf_conn_hdr *conn_hdr; struct lpfc_fcf_conn_rec *conn_rec; uint32_t record_count; int i; /* Free the current connect table */ list_for_each_entry_safe(conn_entry, next_conn_entry, &phba->fcf_conn_rec_list, list) { list_del_init(&conn_entry->list); kfree(conn_entry); } conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; record_count = conn_hdr->length * sizeof(uint32_t)/ sizeof(struct lpfc_fcf_conn_rec); conn_rec = (struct lpfc_fcf_conn_rec *) (buff + sizeof(struct lpfc_fcf_conn_hdr)); for (i = 0; i < record_count; i++) { if (!(conn_rec[i].flags & FCFCNCT_VALID)) continue; conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), GFP_KERNEL); if (!conn_entry) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2566 Failed to allocate connection" " table entry\n"); return; } memcpy(&conn_entry->conn_rec, &conn_rec[i], sizeof(struct lpfc_fcf_conn_rec)); list_add_tail(&conn_entry->list, &phba->fcf_conn_rec_list); } if (!list_empty(&phba->fcf_conn_rec_list)) { i = 0; list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) { conn_rec = &conn_entry->conn_rec; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3345 FCF connection list rec[%02d]: " "flags:x%04x, vtag:x%04x, " "fabric_name:x%02x:%02x:%02x:%02x:" "%02x:%02x:%02x:%02x, " "switch_name:x%02x:%02x:%02x:%02x:" "%02x:%02x:%02x:%02x\n", i++, conn_rec->flags, conn_rec->vlan_tag, conn_rec->fabric_name[0], conn_rec->fabric_name[1], conn_rec->fabric_name[2], conn_rec->fabric_name[3], conn_rec->fabric_name[4], conn_rec->fabric_name[5], conn_rec->fabric_name[6], conn_rec->fabric_name[7], conn_rec->switch_name[0], conn_rec->switch_name[1], conn_rec->switch_name[2], conn_rec->switch_name[3], conn_rec->switch_name[4], conn_rec->switch_name[5], conn_rec->switch_name[6], conn_rec->switch_name[7]); } } } /** * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. * @phba: Pointer to hba context object. * @buff: Buffer containing the FCoE parameter data structure. * * This function update driver data structure with config * parameters read from config region 23. */ static void lpfc_read_fcoe_param(struct lpfc_hba *phba, uint8_t *buff) { struct lpfc_fip_param_hdr *fcoe_param_hdr; struct lpfc_fcoe_params *fcoe_param; fcoe_param_hdr = (struct lpfc_fip_param_hdr *) buff; fcoe_param = (struct lpfc_fcoe_params *) (buff + sizeof(struct lpfc_fip_param_hdr)); if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) return; if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { phba->valid_vlan = 1; phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 0xFFF; } phba->fc_map[0] = fcoe_param->fc_map[0]; phba->fc_map[1] = fcoe_param->fc_map[1]; phba->fc_map[2] = fcoe_param->fc_map[2]; return; } /** * lpfc_get_rec_conf23 - Get a record type in config region data. * @buff: Buffer containing config region 23 data. * @size: Size of the data buffer. * @rec_type: Record type to be searched. * * This function searches config region data to find the beginning * of the record specified by record_type. If record found, this * function return pointer to the record else return NULL. */ static uint8_t * lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) { uint32_t offset = 0, rec_length; if ((buff[0] == LPFC_REGION23_LAST_REC) || (size < sizeof(uint32_t))) return NULL; rec_length = buff[offset + 1]; /* * One TLV record has one word header and number of data words * specified in the rec_length field of the record header. */ while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) <= size) { if (buff[offset] == rec_type) return &buff[offset]; if (buff[offset] == LPFC_REGION23_LAST_REC) return NULL; offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); rec_length = buff[offset + 1]; } return NULL; } /** * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. * @phba: Pointer to lpfc_hba data structure. * @buff: Buffer containing config region 23 data. * @size: Size of the data buffer. * * This function parses the FCoE config parameters in config region 23 and * populate driver data structure with the parameters. */ void lpfc_parse_fcoe_conf(struct lpfc_hba *phba, uint8_t *buff, uint32_t size) { uint32_t offset = 0; uint8_t *rec_ptr; /* * If data size is less than 2 words signature and version cannot be * verified. */ if (size < 2*sizeof(uint32_t)) return; /* Check the region signature first */ if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2567 Config region 23 has bad signature\n"); return; } offset += 4; /* Check the data structure version */ if (buff[offset] != LPFC_REGION23_VERSION) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2568 Config region 23 has bad version\n"); return; } offset += 4; /* Read FCoE param record */ rec_ptr = lpfc_get_rec_conf23(&buff[offset], size - offset, FCOE_PARAM_TYPE); if (rec_ptr) lpfc_read_fcoe_param(phba, rec_ptr); /* Read FCF connection table */ rec_ptr = lpfc_get_rec_conf23(&buff[offset], size - offset, FCOE_CONN_TBL_TYPE); if (rec_ptr) lpfc_read_fcf_conn_tbl(phba, rec_ptr); } /* * lpfc_error_lost_link - IO failure from link event or FW reset check. * * @vport: Pointer to lpfc_vport data structure. * @ulp_status: IO completion status. * @ulp_word4: Reason code for the ulp_status. * * This function evaluates the ulp_status and ulp_word4 values * for specific error values that indicate an internal link fault * or fw reset event for the completing IO. Callers require this * common data to decide next steps on the IO. * * Return: * false - No link or reset error occurred. * true - A link or reset error occurred. */ bool lpfc_error_lost_link(struct lpfc_vport *vport, u32 ulp_status, u32 ulp_word4) { /* Mask off the extra port data to get just the reason code. */ u32 rsn_code = IOERR_PARAM_MASK & ulp_word4; if (ulp_status == IOSTAT_LOCAL_REJECT && (rsn_code == IOERR_SLI_ABORTED || rsn_code == IOERR_LINK_DOWN || rsn_code == IOERR_SLI_DOWN)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI | LOG_ELS, "0408 Report link error true: <x%x:x%x>\n", ulp_status, ulp_word4); return true; } return false; }
linux-master
drivers/scsi/lpfc/lpfc_hbadisc.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2007-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/ctype.h> #include <linux/vmalloc.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc.h" #include "lpfc_scsi.h" #include "lpfc_nvme.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_version.h" #include "lpfc_compat.h" #include "lpfc_debugfs.h" #include "lpfc_bsg.h" #ifdef CONFIG_SCSI_LPFC_DEBUG_FS /* * debugfs interface * * To access this interface the user should: * # mount -t debugfs none /sys/kernel/debug * * The lpfc debugfs directory hierarchy is: * /sys/kernel/debug/lpfc/fnX/vportY * where X is the lpfc hba function unique_id * where Y is the vport VPI on that hba * * Debugging services available per vport: * discovery_trace * This is an ACSII readable file that contains a trace of the last * lpfc_debugfs_max_disc_trc events that happened on a specific vport. * See lpfc_debugfs.h for different categories of discovery events. * To enable the discovery trace, the following module parameters must be set: * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for * EACH vport. X MUST also be a power of 2. * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in * lpfc_debugfs.h . * * slow_ring_trace * This is an ACSII readable file that contains a trace of the last * lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA. * To enable the slow ring trace, the following module parameters must be set: * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support * lpfc_debugfs_max_slow_ring_trc=X Where X is the event trace depth for * the HBA. X MUST also be a power of 2. */ static int lpfc_debugfs_enable = 1; module_param(lpfc_debugfs_enable, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services"); /* This MUST be a power of 2 */ static int lpfc_debugfs_max_disc_trc; module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc, "Set debugfs discovery trace depth"); /* This MUST be a power of 2 */ static int lpfc_debugfs_max_slow_ring_trc; module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, "Set debugfs slow ring trace depth"); /* This MUST be a power of 2 */ static int lpfc_debugfs_max_nvmeio_trc; module_param(lpfc_debugfs_max_nvmeio_trc, int, 0444); MODULE_PARM_DESC(lpfc_debugfs_max_nvmeio_trc, "Set debugfs NVME IO trace depth"); static int lpfc_debugfs_mask_disc_trc; module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, "Set debugfs discovery trace mask"); #include <linux/debugfs.h> static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); static unsigned long lpfc_debugfs_start_time = 0L; /* iDiag */ static struct lpfc_idiag idiag; /** * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer * @vport: The vport to gather the log info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine gathers the lpfc discovery debugfs data from the @vport and * dumps it to @buf up to @size number of bytes. It will start at the next entry * in the log and process the log until the end of the buffer. Then it will * gather from the beginning of the log and process until the current entry. * * Notes: * Discovery logging will be disabled while while this routine dumps the log. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) { int i, index, len, enable; uint32_t ms; struct lpfc_debugfs_trc *dtp; char *buffer; buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL); if (!buffer) return 0; enable = lpfc_debugfs_enable; lpfc_debugfs_enable = 0; len = 0; index = (atomic_read(&vport->disc_trc_cnt) + 1) & (lpfc_debugfs_max_disc_trc - 1); for (i = index; i < lpfc_debugfs_max_disc_trc; i++) { dtp = vport->disc_trc + i; if (!dtp->fmt) continue; ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); len += scnprintf(buf+len, size-len, buffer, dtp->data1, dtp->data2, dtp->data3); } for (i = 0; i < index; i++) { dtp = vport->disc_trc + i; if (!dtp->fmt) continue; ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); len += scnprintf(buf+len, size-len, buffer, dtp->data1, dtp->data2, dtp->data3); } lpfc_debugfs_enable = enable; kfree(buffer); return len; } /** * lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer * @phba: The HBA to gather the log info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine gathers the lpfc slow ring debugfs data from the @phba and * dumps it to @buf up to @size number of bytes. It will start at the next entry * in the log and process the log until the end of the buffer. Then it will * gather from the beginning of the log and process until the current entry. * * Notes: * Slow ring logging will be disabled while while this routine dumps the log. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) { int i, index, len, enable; uint32_t ms; struct lpfc_debugfs_trc *dtp; char *buffer; buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL); if (!buffer) return 0; enable = lpfc_debugfs_enable; lpfc_debugfs_enable = 0; len = 0; index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) & (lpfc_debugfs_max_slow_ring_trc - 1); for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) { dtp = phba->slow_ring_trc + i; if (!dtp->fmt) continue; ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); len += scnprintf(buf+len, size-len, buffer, dtp->data1, dtp->data2, dtp->data3); } for (i = 0; i < index; i++) { dtp = phba->slow_ring_trc + i; if (!dtp->fmt) continue; ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); len += scnprintf(buf+len, size-len, buffer, dtp->data1, dtp->data2, dtp->data3); } lpfc_debugfs_enable = enable; kfree(buffer); return len; } static int lpfc_debugfs_last_hbq = -1; /** * lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer * @phba: The HBA to gather host buffer info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the host buffer queue info from the @phba to @buf up to * @size number of bytes. A header that describes the current hbq state will be * dumped to @buf first and then info on each hbq entry will be dumped to @buf * until @size bytes have been dumped or all the hbq info has been dumped. * * Notes: * This routine will rotate through each configured HBQ each time called. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) { int len = 0; int i, j, found, posted, low; uint32_t phys, raw_index, getidx; struct lpfc_hbq_init *hip; struct hbq_s *hbqs; struct lpfc_hbq_entry *hbqe; struct lpfc_dmabuf *d_buf; struct hbq_dmabuf *hbq_buf; if (phba->sli_rev != 3) return 0; spin_lock_irq(&phba->hbalock); /* toggle between multiple hbqs, if any */ i = lpfc_sli_hbq_count(); if (i > 1) { lpfc_debugfs_last_hbq++; if (lpfc_debugfs_last_hbq >= i) lpfc_debugfs_last_hbq = 0; } else lpfc_debugfs_last_hbq = 0; i = lpfc_debugfs_last_hbq; len += scnprintf(buf+len, size-len, "HBQ %d Info\n", i); hbqs = &phba->hbqs[i]; posted = 0; list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) posted++; hip = lpfc_hbq_defs[i]; len += scnprintf(buf+len, size-len, "idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n", hip->hbq_index, hip->profile, hip->rn, hip->buffer_count, hip->init_count, hip->add_count, posted); raw_index = phba->hbq_get[i]; getidx = le32_to_cpu(raw_index); len += scnprintf(buf+len, size-len, "entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n", hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx, hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx); hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt; for (j=0; j<hbqs->entry_count; j++) { len += scnprintf(buf+len, size-len, "%03d: %08x %04x %05x ", j, le32_to_cpu(hbqe->bde.addrLow), le32_to_cpu(hbqe->bde.tus.w), le32_to_cpu(hbqe->buffer_tag)); i = 0; found = 0; /* First calculate if slot has an associated posted buffer */ low = hbqs->hbqPutIdx - posted; if (low >= 0) { if ((j >= hbqs->hbqPutIdx) || (j < low)) { len += scnprintf(buf + len, size - len, "Unused\n"); goto skipit; } } else { if ((j >= hbqs->hbqPutIdx) && (j < (hbqs->entry_count+low))) { len += scnprintf(buf + len, size - len, "Unused\n"); goto skipit; } } /* Get the Buffer info for the posted buffer */ list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) { hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff); if (phys == le32_to_cpu(hbqe->bde.addrLow)) { len += scnprintf(buf+len, size-len, "Buf%d: x%px %06x\n", i, hbq_buf->dbuf.virt, hbq_buf->tag); found = 1; break; } i++; } if (!found) { len += scnprintf(buf+len, size-len, "No DMAinfo?\n"); } skipit: hbqe++; if (len > LPFC_HBQINFO_SIZE - 54) break; } spin_unlock_irq(&phba->hbalock); return len; } static int lpfc_debugfs_last_xripool; /** * lpfc_debugfs_commonxripools_data - Dump Hardware Queue info to a buffer * @phba: The HBA to gather host buffer info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the Hardware Queue info from the @phba to @buf up to * @size number of bytes. A header that describes the current hdwq state will be * dumped to @buf first and then info on each hdwq entry will be dumped to @buf * until @size bytes have been dumped or all the hdwq info has been dumped. * * Notes: * This routine will rotate through each configured Hardware Queue each * time called. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size) { struct lpfc_sli4_hdw_queue *qp; int len = 0; int i, out; unsigned long iflag; for (i = 0; i < phba->cfg_hdw_queue; i++) { if (len > (LPFC_DUMP_MULTIXRIPOOL_SIZE - 80)) break; qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool]; len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i); spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); spin_lock(&qp->io_buf_list_get_lock); spin_lock(&qp->io_buf_list_put_lock); out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs + qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs); len += scnprintf(buf + len, size - len, "tot:%d get:%d put:%d mt:%d " "ABTS scsi:%d nvme:%d Out:%d\n", qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs, qp->empty_io_bufs, qp->abts_scsi_io_bufs, qp->abts_nvme_io_bufs, out); spin_unlock(&qp->io_buf_list_put_lock); spin_unlock(&qp->io_buf_list_get_lock); spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); lpfc_debugfs_last_xripool++; if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue) lpfc_debugfs_last_xripool = 0; } return len; } /** * lpfc_debugfs_multixripools_data - Display multi-XRI pools information * @phba: The HBA to gather host buffer info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine displays current multi-XRI pools information including XRI * count in public, private and txcmplq. It also displays current high and * low watermark. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size) { u32 i; u32 hwq_count; struct lpfc_sli4_hdw_queue *qp; struct lpfc_multixri_pool *multixri_pool; struct lpfc_pvt_pool *pvt_pool; struct lpfc_pbl_pool *pbl_pool; u32 txcmplq_cnt; char tmp[LPFC_DEBUG_OUT_LINE_SZ] = {0}; if (phba->sli_rev != LPFC_SLI_REV4) return 0; if (!phba->sli4_hba.hdwq) return 0; if (!phba->cfg_xri_rebalancing) { i = lpfc_debugfs_commonxripools_data(phba, buf, size); return i; } /* * Pbl: Current number of free XRIs in public pool * Pvt: Current number of free XRIs in private pool * Busy: Current number of outstanding XRIs * HWM: Current high watermark * pvt_empty: Incremented by 1 when IO submission fails (no xri) * pbl_empty: Incremented by 1 when all pbl_pool are empty during * IO submission */ scnprintf(tmp, sizeof(tmp), "HWQ: Pbl Pvt Busy HWM | pvt_empty pbl_empty "); if (strlcat(buf, tmp, size) >= size) return strnlen(buf, size); #ifdef LPFC_MXP_STAT /* * MAXH: Max high watermark seen so far * above_lmt: Incremented by 1 if xri_owned > xri_limit during * IO submission * below_lmt: Incremented by 1 if xri_owned <= xri_limit during * IO submission * locPbl_hit: Incremented by 1 if successfully get a batch of XRI from * local pbl_pool * othPbl_hit: Incremented by 1 if successfully get a batch of XRI from * other pbl_pool */ scnprintf(tmp, sizeof(tmp), "MAXH above_lmt below_lmt locPbl_hit othPbl_hit"); if (strlcat(buf, tmp, size) >= size) return strnlen(buf, size); /* * sPbl: snapshot of Pbl 15 sec after stat gets cleared * sPvt: snapshot of Pvt 15 sec after stat gets cleared * sBusy: snapshot of Busy 15 sec after stat gets cleared */ scnprintf(tmp, sizeof(tmp), " | sPbl sPvt sBusy"); if (strlcat(buf, tmp, size) >= size) return strnlen(buf, size); #endif scnprintf(tmp, sizeof(tmp), "\n"); if (strlcat(buf, tmp, size) >= size) return strnlen(buf, size); hwq_count = phba->cfg_hdw_queue; for (i = 0; i < hwq_count; i++) { qp = &phba->sli4_hba.hdwq[i]; multixri_pool = qp->p_multixri_pool; if (!multixri_pool) continue; pbl_pool = &multixri_pool->pbl_pool; pvt_pool = &multixri_pool->pvt_pool; txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; scnprintf(tmp, sizeof(tmp), "%03d: %4d %4d %4d %4d | %10d %10d ", i, pbl_pool->count, pvt_pool->count, txcmplq_cnt, pvt_pool->high_watermark, qp->empty_io_bufs, multixri_pool->pbl_empty_count); if (strlcat(buf, tmp, size) >= size) break; #ifdef LPFC_MXP_STAT scnprintf(tmp, sizeof(tmp), "%4d %10d %10d %10d %10d", multixri_pool->stat_max_hwm, multixri_pool->above_limit_count, multixri_pool->below_limit_count, multixri_pool->local_pbl_hit_count, multixri_pool->other_pbl_hit_count); if (strlcat(buf, tmp, size) >= size) break; scnprintf(tmp, sizeof(tmp), " | %4d %4d %5d", multixri_pool->stat_pbl_count, multixri_pool->stat_pvt_count, multixri_pool->stat_busy_count); if (strlcat(buf, tmp, size) >= size) break; #endif scnprintf(tmp, sizeof(tmp), "\n"); if (strlcat(buf, tmp, size) >= size) break; } return strnlen(buf, size); } #ifdef LPFC_HDWQ_LOCK_STAT static int lpfc_debugfs_last_lock; /** * lpfc_debugfs_lockstat_data - Dump Hardware Queue info to a buffer * @phba: The HBA to gather host buffer info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the Hardware Queue info from the @phba to @buf up to * @size number of bytes. A header that describes the current hdwq state will be * dumped to @buf first and then info on each hdwq entry will be dumped to @buf * until @size bytes have been dumped or all the hdwq info has been dumped. * * Notes: * This routine will rotate through each configured Hardware Queue each * time called. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_lockstat_data(struct lpfc_hba *phba, char *buf, int size) { struct lpfc_sli4_hdw_queue *qp; int len = 0; int i; if (phba->sli_rev != LPFC_SLI_REV4) return 0; if (!phba->sli4_hba.hdwq) return 0; for (i = 0; i < phba->cfg_hdw_queue; i++) { if (len > (LPFC_HDWQINFO_SIZE - 100)) break; qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_lock]; len += scnprintf(buf + len, size - len, "HdwQ %03d Lock ", i); if (phba->cfg_xri_rebalancing) { len += scnprintf(buf + len, size - len, "get_pvt:%d mv_pvt:%d " "mv2pub:%d mv2pvt:%d " "put_pvt:%d put_pub:%d wq:%d\n", qp->lock_conflict.alloc_pvt_pool, qp->lock_conflict.mv_from_pvt_pool, qp->lock_conflict.mv_to_pub_pool, qp->lock_conflict.mv_to_pvt_pool, qp->lock_conflict.free_pvt_pool, qp->lock_conflict.free_pub_pool, qp->lock_conflict.wq_access); } else { len += scnprintf(buf + len, size - len, "get:%d put:%d free:%d wq:%d\n", qp->lock_conflict.alloc_xri_get, qp->lock_conflict.alloc_xri_put, qp->lock_conflict.free_xri, qp->lock_conflict.wq_access); } lpfc_debugfs_last_lock++; if (lpfc_debugfs_last_lock >= phba->cfg_hdw_queue) lpfc_debugfs_last_lock = 0; } return len; } #endif static int lpfc_debugfs_last_hba_slim_off; /** * lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer * @phba: The HBA to gather SLIM info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the current contents of HBA SLIM for the HBA associated * with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data. * * Notes: * This routine will only dump up to 1024 bytes of data each time called and * should be called multiple times to dump the entire HBA SLIM. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) { int len = 0; int i, off; uint32_t *ptr; char *buffer; buffer = kmalloc(1024, GFP_KERNEL); if (!buffer) return 0; off = 0; spin_lock_irq(&phba->hbalock); len += scnprintf(buf+len, size-len, "HBA SLIM\n"); lpfc_memcpy_from_slim(buffer, phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024); ptr = (uint32_t *)&buffer[0]; off = lpfc_debugfs_last_hba_slim_off; /* Set it up for the next time */ lpfc_debugfs_last_hba_slim_off += 1024; if (lpfc_debugfs_last_hba_slim_off >= 4096) lpfc_debugfs_last_hba_slim_off = 0; i = 1024; while (i > 0) { len += scnprintf(buf+len, size-len, "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), *(ptr+5), *(ptr+6), *(ptr+7)); ptr += 8; i -= (8 * sizeof(uint32_t)); off += (8 * sizeof(uint32_t)); } spin_unlock_irq(&phba->hbalock); kfree(buffer); return len; } /** * lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer * @phba: The HBA to gather Host SLIM info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the current contents of host SLIM for the host associated * with @phba to @buf up to @size bytes of data. The dump will contain the * Mailbox, PCB, Rings, and Registers that are located in host memory. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) { int len = 0; int i, off; uint32_t word0, word1, word2, word3; uint32_t *ptr; struct lpfc_pgp *pgpp; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; off = 0; spin_lock_irq(&phba->hbalock); len += scnprintf(buf+len, size-len, "SLIM Mailbox\n"); ptr = (uint32_t *)phba->slim2p.virt; i = sizeof(MAILBOX_t); while (i > 0) { len += scnprintf(buf+len, size-len, "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), *(ptr+5), *(ptr+6), *(ptr+7)); ptr += 8; i -= (8 * sizeof(uint32_t)); off += (8 * sizeof(uint32_t)); } len += scnprintf(buf+len, size-len, "SLIM PCB\n"); ptr = (uint32_t *)phba->pcb; i = sizeof(PCB_t); while (i > 0) { len += scnprintf(buf+len, size-len, "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), *(ptr+5), *(ptr+6), *(ptr+7)); ptr += 8; i -= (8 * sizeof(uint32_t)); off += (8 * sizeof(uint32_t)); } if (phba->sli_rev <= LPFC_SLI_REV3) { for (i = 0; i < 4; i++) { pgpp = &phba->port_gp[i]; pring = &psli->sli3_ring[i]; len += scnprintf(buf+len, size-len, "Ring %d: CMD GetInx:%d " "(Max:%d Next:%d " "Local:%d flg:x%x) " "RSP PutInx:%d Max:%d\n", i, pgpp->cmdGetInx, pring->sli.sli3.numCiocb, pring->sli.sli3.next_cmdidx, pring->sli.sli3.local_getidx, pring->flag, pgpp->rspPutInx, pring->sli.sli3.numRiocb); } word0 = readl(phba->HAregaddr); word1 = readl(phba->CAregaddr); word2 = readl(phba->HSregaddr); word3 = readl(phba->HCregaddr); len += scnprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x " "HC:%08x\n", word0, word1, word2, word3); } spin_unlock_irq(&phba->hbalock); return len; } /** * lpfc_debugfs_nodelist_data - Dump target node list to a buffer * @vport: The vport to gather target node info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the current target node list associated with @vport to * @buf up to @size bytes of data. Each node entry in the dump will contain a * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields. * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) { int len = 0; int i, iocnt, outio, cnt; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; unsigned char *statep; struct nvme_fc_local_port *localport; struct nvme_fc_remote_port *nrport = NULL; struct lpfc_nvme_rport *rport; cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); outio = 0; len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n"); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { iocnt = 0; if (!cnt) { len += scnprintf(buf+len, size-len, "Missing Nodelist Entries\n"); break; } cnt--; switch (ndlp->nlp_state) { case NLP_STE_UNUSED_NODE: statep = "UNUSED"; break; case NLP_STE_PLOGI_ISSUE: statep = "PLOGI "; break; case NLP_STE_ADISC_ISSUE: statep = "ADISC "; break; case NLP_STE_REG_LOGIN_ISSUE: statep = "REGLOG"; break; case NLP_STE_PRLI_ISSUE: statep = "PRLI "; break; case NLP_STE_LOGO_ISSUE: statep = "LOGO "; break; case NLP_STE_UNMAPPED_NODE: statep = "UNMAP "; iocnt = 1; break; case NLP_STE_MAPPED_NODE: statep = "MAPPED"; iocnt = 1; break; case NLP_STE_NPR_NODE: statep = "NPR "; break; default: statep = "UNKNOWN"; } len += scnprintf(buf+len, size-len, "%s DID:x%06x ", statep, ndlp->nlp_DID); len += scnprintf(buf+len, size-len, "WWPN x%016llx ", wwn_to_u64(ndlp->nlp_portname.u.wwn)); len += scnprintf(buf+len, size-len, "WWNN x%016llx ", wwn_to_u64(ndlp->nlp_nodename.u.wwn)); len += scnprintf(buf+len, size-len, "RPI:x%04x ", ndlp->nlp_rpi); len += scnprintf(buf+len, size-len, "flag:x%08x ", ndlp->nlp_flag); if (!ndlp->nlp_type) len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE "); if (ndlp->nlp_type & NLP_FC_NODE) len += scnprintf(buf+len, size-len, "FC_NODE "); if (ndlp->nlp_type & NLP_FABRIC) { len += scnprintf(buf+len, size-len, "FABRIC "); iocnt = 0; } if (ndlp->nlp_type & NLP_FCP_TARGET) len += scnprintf(buf+len, size-len, "FCP_TGT sid:%d ", ndlp->nlp_sid); if (ndlp->nlp_type & NLP_FCP_INITIATOR) len += scnprintf(buf+len, size-len, "FCP_INITIATOR "); if (ndlp->nlp_type & NLP_NVME_TARGET) len += scnprintf(buf + len, size - len, "NVME_TGT sid:%d ", NLP_NO_SID); if (ndlp->nlp_type & NLP_NVME_INITIATOR) len += scnprintf(buf + len, size - len, "NVME_INITIATOR "); len += scnprintf(buf+len, size-len, "refcnt:%d", kref_read(&ndlp->kref)); if (iocnt) { i = atomic_read(&ndlp->cmd_pending); len += scnprintf(buf + len, size - len, " OutIO:x%x Qdepth x%x", i, ndlp->cmd_qdepth); outio += i; } len += scnprintf(buf+len, size-len, " xpt:x%x", ndlp->fc4_xpt_flags); if (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) len += scnprintf(buf+len, size-len, " defer:%x", ndlp->nlp_defer_did); len += scnprintf(buf+len, size-len, "\n"); } spin_unlock_irq(shost->host_lock); len += scnprintf(buf + len, size - len, "\nOutstanding IO x%x\n", outio); if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) { len += scnprintf(buf + len, size - len, "\nNVME Targetport Entry ...\n"); /* Port state is only one of two values for now. */ if (phba->targetport->port_id) statep = "REGISTERED"; else statep = "INIT"; len += scnprintf(buf + len, size - len, "TGT WWNN x%llx WWPN x%llx State %s\n", wwn_to_u64(vport->fc_nodename.u.wwn), wwn_to_u64(vport->fc_portname.u.wwn), statep); len += scnprintf(buf + len, size - len, " Targetport DID x%06x\n", phba->targetport->port_id); goto out_exit; } len += scnprintf(buf + len, size - len, "\nNVME Lport/Rport Entries ...\n"); localport = vport->localport; if (!localport) goto out_exit; spin_lock_irq(shost->host_lock); /* Port state is only one of two values for now. */ if (localport->port_id) statep = "ONLINE"; else statep = "UNKNOWN "; len += scnprintf(buf + len, size - len, "Lport DID x%06x PortState %s\n", localport->port_id, statep); len += scnprintf(buf + len, size - len, "\tRport List:\n"); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { /* local short-hand pointer. */ spin_lock(&ndlp->lock); rport = lpfc_ndlp_get_nrport(ndlp); if (rport) nrport = rport->remoteport; else nrport = NULL; spin_unlock(&ndlp->lock); if (!nrport) continue; /* Port state is only one of two values for now. */ switch (nrport->port_state) { case FC_OBJSTATE_ONLINE: statep = "ONLINE"; break; case FC_OBJSTATE_UNKNOWN: statep = "UNKNOWN "; break; default: statep = "UNSUPPORTED"; break; } /* Tab in to show lport ownership. */ len += scnprintf(buf + len, size - len, "\t%s Port ID:x%06x ", statep, nrport->port_id); len += scnprintf(buf + len, size - len, "WWPN x%llx ", nrport->port_name); len += scnprintf(buf + len, size - len, "WWNN x%llx ", nrport->node_name); /* An NVME rport can have multiple roles. */ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) len += scnprintf(buf + len, size - len, "INITIATOR "); if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) len += scnprintf(buf + len, size - len, "TARGET "); if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) len += scnprintf(buf + len, size - len, "DISCSRVC "); if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | FC_PORT_ROLE_NVME_DISCOVERY)) len += scnprintf(buf + len, size - len, "UNKNOWN ROLE x%x", nrport->port_role); /* Terminate the string. */ len += scnprintf(buf + len, size - len, "\n"); } spin_unlock_irq(shost->host_lock); out_exit: return len; } /** * lpfc_debugfs_nvmestat_data - Dump target node list to a buffer * @vport: The vport to gather target node info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the NVME statistics associated with @vport * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) { struct lpfc_hba *phba = vport->phba; struct lpfc_nvmet_tgtport *tgtp; struct lpfc_async_xchg_ctx *ctxp, *next_ctxp; struct nvme_fc_local_port *localport; struct lpfc_fc4_ctrl_stat *cstat; struct lpfc_nvme_lport *lport; uint64_t data1, data2, data3; uint64_t tot, totin, totout; int cnt, i; int len = 0; if (phba->nvmet_support) { if (!phba->targetport) return len; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; len += scnprintf(buf + len, size - len, "\nNVME Targetport Statistics\n"); len += scnprintf(buf + len, size - len, "LS: Rcv %08x Drop %08x Abort %08x\n", atomic_read(&tgtp->rcv_ls_req_in), atomic_read(&tgtp->rcv_ls_req_drop), atomic_read(&tgtp->xmt_ls_abort)); if (atomic_read(&tgtp->rcv_ls_req_in) != atomic_read(&tgtp->rcv_ls_req_out)) { len += scnprintf(buf + len, size - len, "Rcv LS: in %08x != out %08x\n", atomic_read(&tgtp->rcv_ls_req_in), atomic_read(&tgtp->rcv_ls_req_out)); } len += scnprintf(buf + len, size - len, "LS: Xmt %08x Drop %08x Cmpl %08x\n", atomic_read(&tgtp->xmt_ls_rsp), atomic_read(&tgtp->xmt_ls_drop), atomic_read(&tgtp->xmt_ls_rsp_cmpl)); len += scnprintf(buf + len, size - len, "LS: RSP Abort %08x xb %08x Err %08x\n", atomic_read(&tgtp->xmt_ls_rsp_aborted), atomic_read(&tgtp->xmt_ls_rsp_xb_set), atomic_read(&tgtp->xmt_ls_rsp_error)); len += scnprintf(buf + len, size - len, "FCP: Rcv %08x Defer %08x Release %08x " "Drop %08x\n", atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_defer), atomic_read(&tgtp->xmt_fcp_release), atomic_read(&tgtp->rcv_fcp_cmd_drop)); if (atomic_read(&tgtp->rcv_fcp_cmd_in) != atomic_read(&tgtp->rcv_fcp_cmd_out)) { len += scnprintf(buf + len, size - len, "Rcv FCP: in %08x != out %08x\n", atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_out)); } len += scnprintf(buf + len, size - len, "FCP Rsp: read %08x readrsp %08x " "write %08x rsp %08x\n", atomic_read(&tgtp->xmt_fcp_read), atomic_read(&tgtp->xmt_fcp_read_rsp), atomic_read(&tgtp->xmt_fcp_write), atomic_read(&tgtp->xmt_fcp_rsp)); len += scnprintf(buf + len, size - len, "FCP Rsp Cmpl: %08x err %08x drop %08x\n", atomic_read(&tgtp->xmt_fcp_rsp_cmpl), atomic_read(&tgtp->xmt_fcp_rsp_error), atomic_read(&tgtp->xmt_fcp_rsp_drop)); len += scnprintf(buf + len, size - len, "FCP Rsp Abort: %08x xb %08x xricqe %08x\n", atomic_read(&tgtp->xmt_fcp_rsp_aborted), atomic_read(&tgtp->xmt_fcp_rsp_xb_set), atomic_read(&tgtp->xmt_fcp_xri_abort_cqe)); len += scnprintf(buf + len, size - len, "ABORT: Xmt %08x Cmpl %08x\n", atomic_read(&tgtp->xmt_fcp_abort), atomic_read(&tgtp->xmt_fcp_abort_cmpl)); len += scnprintf(buf + len, size - len, "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x", atomic_read(&tgtp->xmt_abort_sol), atomic_read(&tgtp->xmt_abort_unsol), atomic_read(&tgtp->xmt_abort_rsp), atomic_read(&tgtp->xmt_abort_rsp_error)); len += scnprintf(buf + len, size - len, "\n"); cnt = 0; spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { cnt++; } spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); if (cnt) { len += scnprintf(buf + len, size - len, "ABORT: %d ctx entries\n", cnt); spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) break; len += scnprintf(buf + len, size - len, "Entry: oxid %x state %x " "flag %x\n", ctxp->oxid, ctxp->state, ctxp->flag); } spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); } /* Calculate outstanding IOs */ tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); tot += atomic_read(&tgtp->xmt_fcp_release); tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; len += scnprintf(buf + len, size - len, "IO_CTX: %08x WAIT: cur %08x tot %08x\n" "CTX Outstanding %08llx\n", phba->sli4_hba.nvmet_xri_cnt, phba->sli4_hba.nvmet_io_wait_cnt, phba->sli4_hba.nvmet_io_wait_total, tot); } else { if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) return len; localport = vport->localport; if (!localport) return len; lport = (struct lpfc_nvme_lport *)localport->private; if (!lport) return len; len += scnprintf(buf + len, size - len, "\nNVME HDWQ Statistics\n"); len += scnprintf(buf + len, size - len, "LS: Xmt %016x Cmpl %016x\n", atomic_read(&lport->fc4NvmeLsRequests), atomic_read(&lport->fc4NvmeLsCmpls)); totin = 0; totout = 0; for (i = 0; i < phba->cfg_hdw_queue; i++) { cstat = &phba->sli4_hba.hdwq[i].nvme_cstat; tot = cstat->io_cmpls; totin += tot; data1 = cstat->input_requests; data2 = cstat->output_requests; data3 = cstat->control_requests; totout += (data1 + data2 + data3); /* Limit to 32, debugfs display buffer limitation */ if (i >= 32) continue; len += scnprintf(buf + len, PAGE_SIZE - len, "HDWQ (%d): Rd %016llx Wr %016llx " "IO %016llx ", i, data1, data2, data3); len += scnprintf(buf + len, PAGE_SIZE - len, "Cmpl %016llx OutIO %016llx\n", tot, ((data1 + data2 + data3) - tot)); } len += scnprintf(buf + len, PAGE_SIZE - len, "Total FCP Cmpl %016llx Issue %016llx " "OutIO %016llx\n", totin, totout, totout - totin); len += scnprintf(buf + len, size - len, "LS Xmt Err: Abrt %08x Err %08x " "Cmpl Err: xb %08x Err %08x\n", atomic_read(&lport->xmt_ls_abort), atomic_read(&lport->xmt_ls_err), atomic_read(&lport->cmpl_ls_xb), atomic_read(&lport->cmpl_ls_err)); len += scnprintf(buf + len, size - len, "FCP Xmt Err: noxri %06x nondlp %06x " "qdepth %06x wqerr %06x err %06x Abrt %06x\n", atomic_read(&lport->xmt_fcp_noxri), atomic_read(&lport->xmt_fcp_bad_ndlp), atomic_read(&lport->xmt_fcp_qdepth), atomic_read(&lport->xmt_fcp_wqerr), atomic_read(&lport->xmt_fcp_err), atomic_read(&lport->xmt_fcp_abort)); len += scnprintf(buf + len, size - len, "FCP Cmpl Err: xb %08x Err %08x\n", atomic_read(&lport->cmpl_fcp_xb), atomic_read(&lport->cmpl_fcp_err)); } return len; } /** * lpfc_debugfs_scsistat_data - Dump target node list to a buffer * @vport: The vport to gather target node info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the SCSI statistics associated with @vport * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_scsistat_data(struct lpfc_vport *vport, char *buf, int size) { int len; struct lpfc_hba *phba = vport->phba; struct lpfc_fc4_ctrl_stat *cstat; u64 data1, data2, data3; u64 tot, totin, totout; int i; char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) || (phba->sli_rev != LPFC_SLI_REV4)) return 0; scnprintf(buf, size, "SCSI HDWQ Statistics\n"); totin = 0; totout = 0; for (i = 0; i < phba->cfg_hdw_queue; i++) { cstat = &phba->sli4_hba.hdwq[i].scsi_cstat; tot = cstat->io_cmpls; totin += tot; data1 = cstat->input_requests; data2 = cstat->output_requests; data3 = cstat->control_requests; totout += (data1 + data2 + data3); scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx " "IO %016llx ", i, data1, data2, data3); if (strlcat(buf, tmp, size) >= size) goto buffer_done; scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n", tot, ((data1 + data2 + data3) - tot)); if (strlcat(buf, tmp, size) >= size) goto buffer_done; } scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx " "OutIO %016llx\n", totin, totout, totout - totin); strlcat(buf, tmp, size); buffer_done: len = strnlen(buf, size); return len; } void lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { uint64_t seg1, seg2, seg3, seg4; uint64_t segsum; if (!lpfc_cmd->ts_last_cmd || !lpfc_cmd->ts_cmd_start || !lpfc_cmd->ts_cmd_wqput || !lpfc_cmd->ts_isr_cmpl || !lpfc_cmd->ts_data_io) return; if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_cmd_start) return; if (lpfc_cmd->ts_cmd_start < lpfc_cmd->ts_last_cmd) return; if (lpfc_cmd->ts_cmd_wqput < lpfc_cmd->ts_cmd_start) return; if (lpfc_cmd->ts_isr_cmpl < lpfc_cmd->ts_cmd_wqput) return; if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_isr_cmpl) return; /* * Segment 1 - Time from Last FCP command cmpl is handed * off to NVME Layer to start of next command. * Segment 2 - Time from Driver receives a IO cmd start * from NVME Layer to WQ put is done on IO cmd. * Segment 3 - Time from Driver WQ put is done on IO cmd * to MSI-X ISR for IO cmpl. * Segment 4 - Time from MSI-X ISR for IO cmpl to when * cmpl is handled off to the NVME Layer. */ seg1 = lpfc_cmd->ts_cmd_start - lpfc_cmd->ts_last_cmd; if (seg1 > 5000000) /* 5 ms - for sequential IOs only */ seg1 = 0; /* Calculate times relative to start of IO */ seg2 = (lpfc_cmd->ts_cmd_wqput - lpfc_cmd->ts_cmd_start); segsum = seg2; seg3 = lpfc_cmd->ts_isr_cmpl - lpfc_cmd->ts_cmd_start; if (segsum > seg3) return; seg3 -= segsum; segsum += seg3; seg4 = lpfc_cmd->ts_data_io - lpfc_cmd->ts_cmd_start; if (segsum > seg4) return; seg4 -= segsum; phba->ktime_data_samples++; phba->ktime_seg1_total += seg1; if (seg1 < phba->ktime_seg1_min) phba->ktime_seg1_min = seg1; else if (seg1 > phba->ktime_seg1_max) phba->ktime_seg1_max = seg1; phba->ktime_seg2_total += seg2; if (seg2 < phba->ktime_seg2_min) phba->ktime_seg2_min = seg2; else if (seg2 > phba->ktime_seg2_max) phba->ktime_seg2_max = seg2; phba->ktime_seg3_total += seg3; if (seg3 < phba->ktime_seg3_min) phba->ktime_seg3_min = seg3; else if (seg3 > phba->ktime_seg3_max) phba->ktime_seg3_max = seg3; phba->ktime_seg4_total += seg4; if (seg4 < phba->ktime_seg4_min) phba->ktime_seg4_min = seg4; else if (seg4 > phba->ktime_seg4_max) phba->ktime_seg4_max = seg4; lpfc_cmd->ts_last_cmd = 0; lpfc_cmd->ts_cmd_start = 0; lpfc_cmd->ts_cmd_wqput = 0; lpfc_cmd->ts_isr_cmpl = 0; lpfc_cmd->ts_data_io = 0; } /** * lpfc_debugfs_ioktime_data - Dump target node list to a buffer * @vport: The vport to gather target node info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the NVME statistics associated with @vport * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_ioktime_data(struct lpfc_vport *vport, char *buf, int size) { struct lpfc_hba *phba = vport->phba; int len = 0; if (phba->nvmet_support == 0) { /* Initiator */ len += scnprintf(buf + len, PAGE_SIZE - len, "ktime %s: Total Samples: %lld\n", (phba->ktime_on ? "Enabled" : "Disabled"), phba->ktime_data_samples); if (phba->ktime_data_samples == 0) return len; len += scnprintf( buf + len, PAGE_SIZE - len, "Segment 1: Last Cmd cmpl " "done -to- Start of next Cmd (in driver)\n"); len += scnprintf( buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg1_total, phba->ktime_data_samples), phba->ktime_seg1_min, phba->ktime_seg1_max); len += scnprintf( buf + len, PAGE_SIZE - len, "Segment 2: Driver start of Cmd " "-to- Firmware WQ doorbell\n"); len += scnprintf( buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg2_total, phba->ktime_data_samples), phba->ktime_seg2_min, phba->ktime_seg2_max); len += scnprintf( buf + len, PAGE_SIZE - len, "Segment 3: Firmware WQ doorbell -to- " "MSI-X ISR cmpl\n"); len += scnprintf( buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg3_total, phba->ktime_data_samples), phba->ktime_seg3_min, phba->ktime_seg3_max); len += scnprintf( buf + len, PAGE_SIZE - len, "Segment 4: MSI-X ISR cmpl -to- " "Cmd cmpl done\n"); len += scnprintf( buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg4_total, phba->ktime_data_samples), phba->ktime_seg4_min, phba->ktime_seg4_max); len += scnprintf( buf + len, PAGE_SIZE - len, "Total IO avg time: %08lld\n", div_u64(phba->ktime_seg1_total + phba->ktime_seg2_total + phba->ktime_seg3_total + phba->ktime_seg4_total, phba->ktime_data_samples)); return len; } /* NVME Target */ len += scnprintf(buf + len, PAGE_SIZE-len, "ktime %s: Total Samples: %lld %lld\n", (phba->ktime_on ? "Enabled" : "Disabled"), phba->ktime_data_samples, phba->ktime_status_samples); if (phba->ktime_data_samples == 0) return len; len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 1: MSI-X ISR Rcv cmd -to- " "cmd pass to NVME Layer\n"); len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg1_total, phba->ktime_data_samples), phba->ktime_seg1_min, phba->ktime_seg1_max); len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 2: cmd pass to NVME Layer- " "-to- Driver rcv cmd OP (action)\n"); len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg2_total, phba->ktime_data_samples), phba->ktime_seg2_min, phba->ktime_seg2_max); len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 3: Driver rcv cmd OP -to- " "Firmware WQ doorbell: cmd\n"); len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg3_total, phba->ktime_data_samples), phba->ktime_seg3_min, phba->ktime_seg3_max); len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 4: Firmware WQ doorbell: cmd " "-to- MSI-X ISR for cmd cmpl\n"); len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg4_total, phba->ktime_data_samples), phba->ktime_seg4_min, phba->ktime_seg4_max); len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 5: MSI-X ISR for cmd cmpl " "-to- NVME layer passed cmd done\n"); len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg5_total, phba->ktime_data_samples), phba->ktime_seg5_min, phba->ktime_seg5_max); if (phba->ktime_status_samples == 0) { len += scnprintf(buf + len, PAGE_SIZE-len, "Total: cmd received by MSI-X ISR " "-to- cmd completed on wire\n"); len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld " "max %08lld\n", div_u64(phba->ktime_seg10_total, phba->ktime_data_samples), phba->ktime_seg10_min, phba->ktime_seg10_max); return len; } len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 6: NVME layer passed cmd done " "-to- Driver rcv rsp status OP\n"); len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg6_total, phba->ktime_status_samples), phba->ktime_seg6_min, phba->ktime_seg6_max); len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 7: Driver rcv rsp status OP " "-to- Firmware WQ doorbell: status\n"); len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg7_total, phba->ktime_status_samples), phba->ktime_seg7_min, phba->ktime_seg7_max); len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 8: Firmware WQ doorbell: status" " -to- MSI-X ISR for status cmpl\n"); len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg8_total, phba->ktime_status_samples), phba->ktime_seg8_min, phba->ktime_seg8_max); len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 9: MSI-X ISR for status cmpl " "-to- NVME layer passed status done\n"); len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg9_total, phba->ktime_status_samples), phba->ktime_seg9_min, phba->ktime_seg9_max); len += scnprintf(buf + len, PAGE_SIZE-len, "Total: cmd received by MSI-X ISR -to- " "cmd completed on wire\n"); len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg10_total, phba->ktime_status_samples), phba->ktime_seg10_min, phba->ktime_seg10_max); return len; } /** * lpfc_debugfs_nvmeio_trc_data - Dump NVME IO trace list to a buffer * @phba: The phba to gather target node info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the NVME IO trace associated with @phba * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size) { struct lpfc_debugfs_nvmeio_trc *dtp; int i, state, index, skip; int len = 0; state = phba->nvmeio_trc_on; index = (atomic_read(&phba->nvmeio_trc_cnt) + 1) & (phba->nvmeio_trc_size - 1); skip = phba->nvmeio_trc_output_idx; len += scnprintf(buf + len, size - len, "%s IO Trace %s: next_idx %d skip %d size %d\n", (phba->nvmet_support ? "NVME" : "NVMET"), (state ? "Enabled" : "Disabled"), index, skip, phba->nvmeio_trc_size); if (!phba->nvmeio_trc || state) return len; /* trace MUST bhe off to continue */ for (i = index; i < phba->nvmeio_trc_size; i++) { if (skip) { skip--; continue; } dtp = phba->nvmeio_trc + i; phba->nvmeio_trc_output_idx++; if (!dtp->fmt) continue; len += scnprintf(buf + len, size - len, dtp->fmt, dtp->data1, dtp->data2, dtp->data3); if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) { phba->nvmeio_trc_output_idx = 0; len += scnprintf(buf + len, size - len, "Trace Complete\n"); goto out; } if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) { len += scnprintf(buf + len, size - len, "Trace Continue (%d of %d)\n", phba->nvmeio_trc_output_idx, phba->nvmeio_trc_size); goto out; } } for (i = 0; i < index; i++) { if (skip) { skip--; continue; } dtp = phba->nvmeio_trc + i; phba->nvmeio_trc_output_idx++; if (!dtp->fmt) continue; len += scnprintf(buf + len, size - len, dtp->fmt, dtp->data1, dtp->data2, dtp->data3); if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) { phba->nvmeio_trc_output_idx = 0; len += scnprintf(buf + len, size - len, "Trace Complete\n"); goto out; } if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) { len += scnprintf(buf + len, size - len, "Trace Continue (%d of %d)\n", phba->nvmeio_trc_output_idx, phba->nvmeio_trc_size); goto out; } } len += scnprintf(buf + len, size - len, "Trace Done\n"); out: return len; } /** * lpfc_debugfs_hdwqstat_data - Dump I/O stats to a buffer * @vport: The vport to gather target node info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: * This routine dumps the NVME + SCSI statistics associated with @vport * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size) { struct lpfc_hba *phba = vport->phba; struct lpfc_hdwq_stat *c_stat; int i, j, len; uint32_t tot_xmt; uint32_t tot_rcv; uint32_t tot_cmpl; char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; scnprintf(tmp, sizeof(tmp), "HDWQ Stats:\n\n"); if (strlcat(buf, tmp, size) >= size) goto buffer_done; scnprintf(tmp, sizeof(tmp), "(NVME Accounting: %s) ", (phba->hdwqstat_on & (LPFC_CHECK_NVME_IO | LPFC_CHECK_NVMET_IO) ? "Enabled" : "Disabled")); if (strlcat(buf, tmp, size) >= size) goto buffer_done; scnprintf(tmp, sizeof(tmp), "(SCSI Accounting: %s) ", (phba->hdwqstat_on & LPFC_CHECK_SCSI_IO ? "Enabled" : "Disabled")); if (strlcat(buf, tmp, size) >= size) goto buffer_done; scnprintf(tmp, sizeof(tmp), "\n\n"); if (strlcat(buf, tmp, size) >= size) goto buffer_done; for (i = 0; i < phba->cfg_hdw_queue; i++) { tot_rcv = 0; tot_xmt = 0; tot_cmpl = 0; for_each_present_cpu(j) { c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, j); /* Only display for this HDWQ */ if (i != c_stat->hdwq_no) continue; /* Only display non-zero counters */ if (!c_stat->xmt_io && !c_stat->cmpl_io && !c_stat->rcv_io) continue; if (!tot_xmt && !tot_cmpl && !tot_rcv) { /* Print HDWQ string only the first time */ scnprintf(tmp, sizeof(tmp), "[HDWQ %d]:\t", i); if (strlcat(buf, tmp, size) >= size) goto buffer_done; } tot_xmt += c_stat->xmt_io; tot_cmpl += c_stat->cmpl_io; if (phba->nvmet_support) tot_rcv += c_stat->rcv_io; scnprintf(tmp, sizeof(tmp), "| [CPU %d]: ", j); if (strlcat(buf, tmp, size) >= size) goto buffer_done; if (phba->nvmet_support) { scnprintf(tmp, sizeof(tmp), "XMT 0x%x CMPL 0x%x RCV 0x%x |", c_stat->xmt_io, c_stat->cmpl_io, c_stat->rcv_io); if (strlcat(buf, tmp, size) >= size) goto buffer_done; } else { scnprintf(tmp, sizeof(tmp), "XMT 0x%x CMPL 0x%x |", c_stat->xmt_io, c_stat->cmpl_io); if (strlcat(buf, tmp, size) >= size) goto buffer_done; } } /* Check if nothing to display */ if (!tot_xmt && !tot_cmpl && !tot_rcv) continue; scnprintf(tmp, sizeof(tmp), "\t->\t[HDWQ Total: "); if (strlcat(buf, tmp, size) >= size) goto buffer_done; if (phba->nvmet_support) { scnprintf(tmp, sizeof(tmp), "XMT 0x%x CMPL 0x%x RCV 0x%x]\n\n", tot_xmt, tot_cmpl, tot_rcv); if (strlcat(buf, tmp, size) >= size) goto buffer_done; } else { scnprintf(tmp, sizeof(tmp), "XMT 0x%x CMPL 0x%x]\n\n", tot_xmt, tot_cmpl); if (strlcat(buf, tmp, size) >= size) goto buffer_done; } } buffer_done: len = strnlen(buf, size); return len; } #endif /** * lpfc_debugfs_disc_trc - Store discovery trace log * @vport: The vport to associate this trace string with for retrieval. * @mask: Log entry classification. * @fmt: Format string to be displayed when dumping the log. * @data1: 1st data parameter to be applied to @fmt. * @data2: 2nd data parameter to be applied to @fmt. * @data3: 3rd data parameter to be applied to @fmt. * * Description: * This routine is used by the driver code to add a debugfs log entry to the * discovery trace buffer associated with @vport. Only entries with a @mask that * match the current debugfs discovery mask will be saved. Entries that do not * match will be thrown away. @fmt, @data1, @data2, and @data3 are used like * printf when displaying the log. **/ inline void lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, uint32_t data1, uint32_t data2, uint32_t data3) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct lpfc_debugfs_trc *dtp; int index; if (!(lpfc_debugfs_mask_disc_trc & mask)) return; if (!lpfc_debugfs_enable || !lpfc_debugfs_max_disc_trc || !vport || !vport->disc_trc) return; index = atomic_inc_return(&vport->disc_trc_cnt) & (lpfc_debugfs_max_disc_trc - 1); dtp = vport->disc_trc + index; dtp->fmt = fmt; dtp->data1 = data1; dtp->data2 = data2; dtp->data3 = data3; dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); dtp->jif = jiffies; #endif return; } /** * lpfc_debugfs_slow_ring_trc - Store slow ring trace log * @phba: The phba to associate this trace string with for retrieval. * @fmt: Format string to be displayed when dumping the log. * @data1: 1st data parameter to be applied to @fmt. * @data2: 2nd data parameter to be applied to @fmt. * @data3: 3rd data parameter to be applied to @fmt. * * Description: * This routine is used by the driver code to add a debugfs log entry to the * discovery trace buffer associated with @vport. @fmt, @data1, @data2, and * @data3 are used like printf when displaying the log. **/ inline void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, uint32_t data1, uint32_t data2, uint32_t data3) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct lpfc_debugfs_trc *dtp; int index; if (!lpfc_debugfs_enable || !lpfc_debugfs_max_slow_ring_trc || !phba || !phba->slow_ring_trc) return; index = atomic_inc_return(&phba->slow_ring_trc_cnt) & (lpfc_debugfs_max_slow_ring_trc - 1); dtp = phba->slow_ring_trc + index; dtp->fmt = fmt; dtp->data1 = data1; dtp->data2 = data2; dtp->data3 = data3; dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); dtp->jif = jiffies; #endif return; } /** * lpfc_debugfs_nvme_trc - Store NVME/NVMET trace log * @phba: The phba to associate this trace string with for retrieval. * @fmt: Format string to be displayed when dumping the log. * @data1: 1st data parameter to be applied to @fmt. * @data2: 2nd data parameter to be applied to @fmt. * @data3: 3rd data parameter to be applied to @fmt. * * Description: * This routine is used by the driver code to add a debugfs log entry to the * nvme trace buffer associated with @phba. @fmt, @data1, @data2, and * @data3 are used like printf when displaying the log. **/ inline void lpfc_debugfs_nvme_trc(struct lpfc_hba *phba, char *fmt, uint16_t data1, uint16_t data2, uint32_t data3) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct lpfc_debugfs_nvmeio_trc *dtp; int index; if (!phba->nvmeio_trc_on || !phba->nvmeio_trc) return; index = atomic_inc_return(&phba->nvmeio_trc_cnt) & (phba->nvmeio_trc_size - 1); dtp = phba->nvmeio_trc + index; dtp->fmt = fmt; dtp->data1 = data1; dtp->data2 = data2; dtp->data3 = data3; #endif } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS /** * lpfc_debugfs_disc_trc_open - Open the discovery trace log * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return a negative * error value. **/ static int lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file) { struct lpfc_vport *vport = inode->i_private; struct lpfc_debug *debug; int size; int rc = -ENOMEM; if (!lpfc_debugfs_max_disc_trc) { rc = -ENOSPC; goto out; } debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ size = (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE); size = PAGE_ALIGN(size); debug->buffer = kmalloc(size, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_disc_trc_data(vport, debug->buffer, size); file->private_data = debug; rc = 0; out: return rc; } /** * lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return a negative * error value. **/ static int lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int size; int rc = -ENOMEM; if (!lpfc_debugfs_max_slow_ring_trc) { rc = -ENOSPC; goto out; } debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ size = (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE); size = PAGE_ALIGN(size); debug->buffer = kmalloc(size, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_slow_ring_trc_data(phba, debug->buffer, size); file->private_data = debug; rc = 0; out: return rc; } /** * lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return a negative * error value. **/ static int lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_hbqinfo_data(phba, debug->buffer, LPFC_HBQINFO_SIZE); file->private_data = debug; rc = 0; out: return rc; } /** * lpfc_debugfs_multixripools_open - Open the multixripool debugfs buffer * @inode: The inode pointer that contains a hba pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the hba from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this hba, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return a negative * error value. **/ static int lpfc_debugfs_multixripools_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kzalloc(LPFC_DUMP_MULTIXRIPOOL_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_multixripools_data( phba, debug->buffer, LPFC_DUMP_MULTIXRIPOOL_SIZE); debug->i_private = inode->i_private; file->private_data = debug; rc = 0; out: return rc; } #ifdef LPFC_HDWQ_LOCK_STAT /** * lpfc_debugfs_lockstat_open - Open the lockstat debugfs buffer * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return a negative * error value. **/ static int lpfc_debugfs_lockstat_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kmalloc(LPFC_HDWQINFO_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_lockstat_data(phba, debug->buffer, LPFC_HBQINFO_SIZE); file->private_data = debug; rc = 0; out: return rc; } static ssize_t lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; struct lpfc_sli4_hdw_queue *qp; char mybuf[64]; char *pbuf; int i; size_t bsize; memset(mybuf, 0, sizeof(mybuf)); bsize = min(nbytes, (sizeof(mybuf) - 1)); if (copy_from_user(mybuf, buf, bsize)) return -EFAULT; pbuf = &mybuf[0]; if ((strncmp(pbuf, "reset", strlen("reset")) == 0) || (strncmp(pbuf, "zero", strlen("zero")) == 0)) { for (i = 0; i < phba->cfg_hdw_queue; i++) { qp = &phba->sli4_hba.hdwq[i]; qp->lock_conflict.alloc_xri_get = 0; qp->lock_conflict.alloc_xri_put = 0; qp->lock_conflict.free_xri = 0; qp->lock_conflict.wq_access = 0; qp->lock_conflict.alloc_pvt_pool = 0; qp->lock_conflict.mv_from_pvt_pool = 0; qp->lock_conflict.mv_to_pub_pool = 0; qp->lock_conflict.mv_to_pvt_pool = 0; qp->lock_conflict.free_pvt_pool = 0; qp->lock_conflict.free_pub_pool = 0; qp->lock_conflict.wq_access = 0; } } return bsize; } #endif static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba, char *buffer, int size) { int copied = 0; struct lpfc_dmabuf *dmabuf, *next; memset(buffer, 0, size); spin_lock_irq(&phba->hbalock); if (phba->ras_fwlog.state != ACTIVE) { spin_unlock_irq(&phba->hbalock); return -EINVAL; } spin_unlock_irq(&phba->hbalock); list_for_each_entry_safe(dmabuf, next, &phba->ras_fwlog.fwlog_buff_list, list) { /* Check if copying will go over size and a '\0' char */ if ((copied + LPFC_RAS_MAX_ENTRY_SIZE) >= (size - 1)) { memcpy(buffer + copied, dmabuf->virt, size - copied - 1); copied += size - copied - 1; break; } memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE); copied += LPFC_RAS_MAX_ENTRY_SIZE; } return copied; } static int lpfc_debugfs_ras_log_release(struct inode *inode, struct file *file) { struct lpfc_debug *debug = file->private_data; vfree(debug->buffer); kfree(debug); return 0; } /** * lpfc_debugfs_ras_log_open - Open the RAS log debugfs buffer * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return a negative * error value. **/ static int lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int size; int rc = -ENOMEM; spin_lock_irq(&phba->hbalock); if (phba->ras_fwlog.state != ACTIVE) { spin_unlock_irq(&phba->hbalock); rc = -EINVAL; goto out; } spin_unlock_irq(&phba->hbalock); if (check_mul_overflow(LPFC_RAS_MIN_BUFF_POST_SIZE, phba->cfg_ras_fwlog_buffsize, &size)) goto out; debug = kzalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; debug->buffer = vmalloc(size); if (!debug->buffer) goto free_debug; debug->len = lpfc_debugfs_ras_log_data(phba, debug->buffer, size); if (debug->len < 0) { rc = -EINVAL; goto free_buffer; } file->private_data = debug; return 0; free_buffer: vfree(debug->buffer); free_debug: kfree(debug); out: return rc; } /** * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return a negative * error value. **/ static int lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer, LPFC_DUMPHBASLIM_SIZE); file->private_data = debug; rc = 0; out: return rc; } /** * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return a negative * error value. **/ static int lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer, LPFC_DUMPHOSTSLIM_SIZE); file->private_data = debug; rc = 0; out: return rc; } static ssize_t lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct dentry *dent = file->f_path.dentry; struct lpfc_hba *phba = file->private_data; char cbuf[32]; uint64_t tmp = 0; int cnt = 0; if (dent == phba->debug_writeGuard) cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt); else if (dent == phba->debug_writeApp) cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt); else if (dent == phba->debug_writeRef) cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt); else if (dent == phba->debug_readGuard) cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt); else if (dent == phba->debug_readApp) cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt); else if (dent == phba->debug_readRef) cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt); else if (dent == phba->debug_InjErrNPortID) cnt = scnprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid); else if (dent == phba->debug_InjErrWWPN) { memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name)); tmp = cpu_to_be64(tmp); cnt = scnprintf(cbuf, 32, "0x%016llx\n", tmp); } else if (dent == phba->debug_InjErrLBA) { if (phba->lpfc_injerr_lba == (sector_t)(-1)) cnt = scnprintf(cbuf, 32, "off\n"); else cnt = scnprintf(cbuf, 32, "0x%llx\n", (uint64_t) phba->lpfc_injerr_lba); } else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0547 Unknown debugfs error injection entry\n"); return simple_read_from_buffer(buf, nbytes, ppos, &cbuf, cnt); } static ssize_t lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct dentry *dent = file->f_path.dentry; struct lpfc_hba *phba = file->private_data; char dstbuf[33]; uint64_t tmp = 0; int size; memset(dstbuf, 0, 33); size = (nbytes < 32) ? nbytes : 32; if (copy_from_user(dstbuf, buf, size)) return -EFAULT; if (dent == phba->debug_InjErrLBA) { if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') && (dstbuf[2] == 'f')) tmp = (uint64_t)(-1); } if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp))) return -EINVAL; if (dent == phba->debug_writeGuard) phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp; else if (dent == phba->debug_writeApp) phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp; else if (dent == phba->debug_writeRef) phba->lpfc_injerr_wref_cnt = (uint32_t)tmp; else if (dent == phba->debug_readGuard) phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp; else if (dent == phba->debug_readApp) phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp; else if (dent == phba->debug_readRef) phba->lpfc_injerr_rref_cnt = (uint32_t)tmp; else if (dent == phba->debug_InjErrLBA) phba->lpfc_injerr_lba = (sector_t)tmp; else if (dent == phba->debug_InjErrNPortID) phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID); else if (dent == phba->debug_InjErrWWPN) { tmp = cpu_to_be64(tmp); memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name)); } else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0548 Unknown debugfs error injection entry\n"); return nbytes; } static int lpfc_debugfs_dif_err_release(struct inode *inode, struct file *file) { return 0; } /** * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * * Description: * This routine is the entry point for the debugfs open file operation. It gets * the vport from the i_private field in @inode, allocates the necessary buffer * for the log, fills the buffer from the in-memory log for this vport, and then * returns a pointer to that log in the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return a negative * error value. **/ static int lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file) { struct lpfc_vport *vport = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_nodelist_data(vport, debug->buffer, LPFC_NODELIST_SIZE); file->private_data = debug; rc = 0; out: return rc; } /** * lpfc_debugfs_lseek - Seek through a debugfs file * @file: The file pointer to seek through. * @off: The offset to seek to or the amount to seek by. * @whence: Indicates how to seek. * * Description: * This routine is the entry point for the debugfs lseek file operation. The * @whence parameter indicates whether @off is the offset to directly seek to, * or if it is a value to seek forward or reverse by. This function figures out * what the new offset of the debugfs file will be and assigns that value to the * f_pos field of @file. * * Returns: * This function returns the new offset if successful and returns a negative * error if unable to process the seek. **/ static loff_t lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) { struct lpfc_debug *debug = file->private_data; return fixed_size_llseek(file, off, whence, debug->len); } /** * lpfc_debugfs_read - Read a debugfs file * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from from the buffer indicated in the private_data * field of @file. It will start reading at @ppos and copy up to @nbytes of * data to @buf. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_debugfs_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer, debug->len); } /** * lpfc_debugfs_release - Release the buffer used to store debugfs file data * @inode: The inode pointer that contains a vport pointer. (unused) * @file: The file pointer that contains the buffer to release. * * Description: * This routine frees the buffer that was allocated when the debugfs file was * opened. * * Returns: * This function returns zero. **/ static int lpfc_debugfs_release(struct inode *inode, struct file *file) { struct lpfc_debug *debug = file->private_data; kfree(debug->buffer); kfree(debug); return 0; } /** * lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics * @file: The file pointer to read from. * @buf: The buffer to copy the user data from. * @nbytes: The number of bytes to get. * @ppos: The position in the file to start reading from. * * Description: * This routine clears multi-XRI pools statistics when buf contains "clear". * * Return Value: * It returns the @nbytges passing in from debugfs user space when successful. * In case of error conditions, it returns proper error code back to the user * space. **/ static ssize_t lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; char mybuf[64]; char *pbuf; u32 i; u32 hwq_count; struct lpfc_sli4_hdw_queue *qp; struct lpfc_multixri_pool *multixri_pool; if (nbytes > sizeof(mybuf) - 1) nbytes = sizeof(mybuf) - 1; memset(mybuf, 0, sizeof(mybuf)); if (copy_from_user(mybuf, buf, nbytes)) return -EFAULT; pbuf = &mybuf[0]; if ((strncmp(pbuf, "clear", strlen("clear"))) == 0) { hwq_count = phba->cfg_hdw_queue; for (i = 0; i < hwq_count; i++) { qp = &phba->sli4_hba.hdwq[i]; multixri_pool = qp->p_multixri_pool; if (!multixri_pool) continue; qp->empty_io_bufs = 0; multixri_pool->pbl_empty_count = 0; #ifdef LPFC_MXP_STAT multixri_pool->above_limit_count = 0; multixri_pool->below_limit_count = 0; multixri_pool->stat_max_hwm = 0; multixri_pool->local_pbl_hit_count = 0; multixri_pool->other_pbl_hit_count = 0; multixri_pool->stat_pbl_count = 0; multixri_pool->stat_pvt_count = 0; multixri_pool->stat_busy_count = 0; multixri_pool->stat_snapshot_taken = 0; #endif } return strlen(pbuf); } return -EINVAL; } static int lpfc_debugfs_nvmestat_open(struct inode *inode, struct file *file) { struct lpfc_vport *vport = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kmalloc(LPFC_NVMESTAT_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_nvmestat_data(vport, debug->buffer, LPFC_NVMESTAT_SIZE); debug->i_private = inode->i_private; file->private_data = debug; rc = 0; out: return rc; } static ssize_t lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; struct lpfc_hba *phba = vport->phba; struct lpfc_nvmet_tgtport *tgtp; char mybuf[64]; char *pbuf; if (!phba->targetport) return -ENXIO; if (nbytes > sizeof(mybuf) - 1) nbytes = sizeof(mybuf) - 1; memset(mybuf, 0, sizeof(mybuf)); if (copy_from_user(mybuf, buf, nbytes)) return -EFAULT; pbuf = &mybuf[0]; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; if ((strncmp(pbuf, "reset", strlen("reset")) == 0) || (strncmp(pbuf, "zero", strlen("zero")) == 0)) { atomic_set(&tgtp->rcv_ls_req_in, 0); atomic_set(&tgtp->rcv_ls_req_out, 0); atomic_set(&tgtp->rcv_ls_req_drop, 0); atomic_set(&tgtp->xmt_ls_abort, 0); atomic_set(&tgtp->xmt_ls_abort_cmpl, 0); atomic_set(&tgtp->xmt_ls_rsp, 0); atomic_set(&tgtp->xmt_ls_drop, 0); atomic_set(&tgtp->xmt_ls_rsp_error, 0); atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); atomic_set(&tgtp->rcv_fcp_cmd_in, 0); atomic_set(&tgtp->rcv_fcp_cmd_out, 0); atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); atomic_set(&tgtp->xmt_fcp_drop, 0); atomic_set(&tgtp->xmt_fcp_read_rsp, 0); atomic_set(&tgtp->xmt_fcp_read, 0); atomic_set(&tgtp->xmt_fcp_write, 0); atomic_set(&tgtp->xmt_fcp_rsp, 0); atomic_set(&tgtp->xmt_fcp_release, 0); atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); atomic_set(&tgtp->xmt_fcp_rsp_error, 0); atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); atomic_set(&tgtp->xmt_fcp_abort, 0); atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0); atomic_set(&tgtp->xmt_abort_sol, 0); atomic_set(&tgtp->xmt_abort_unsol, 0); atomic_set(&tgtp->xmt_abort_rsp, 0); atomic_set(&tgtp->xmt_abort_rsp_error, 0); } return nbytes; } static int lpfc_debugfs_scsistat_open(struct inode *inode, struct file *file) { struct lpfc_vport *vport = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kzalloc(LPFC_SCSISTAT_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_scsistat_data(vport, debug->buffer, LPFC_SCSISTAT_SIZE); debug->i_private = inode->i_private; file->private_data = debug; rc = 0; out: return rc; } static ssize_t lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; struct lpfc_hba *phba = vport->phba; char mybuf[6] = {0}; int i; if (copy_from_user(mybuf, buf, (nbytes >= sizeof(mybuf)) ? (sizeof(mybuf) - 1) : nbytes)) return -EFAULT; if ((strncmp(&mybuf[0], "reset", strlen("reset")) == 0) || (strncmp(&mybuf[0], "zero", strlen("zero")) == 0)) { for (i = 0; i < phba->cfg_hdw_queue; i++) { memset(&phba->sli4_hba.hdwq[i].scsi_cstat, 0, sizeof(phba->sli4_hba.hdwq[i].scsi_cstat)); } } return nbytes; } static int lpfc_debugfs_ioktime_open(struct inode *inode, struct file *file) { struct lpfc_vport *vport = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kmalloc(LPFC_IOKTIME_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_ioktime_data(vport, debug->buffer, LPFC_IOKTIME_SIZE); debug->i_private = inode->i_private; file->private_data = debug; rc = 0; out: return rc; } static ssize_t lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; struct lpfc_hba *phba = vport->phba; char mybuf[64]; char *pbuf; if (nbytes > sizeof(mybuf) - 1) nbytes = sizeof(mybuf) - 1; memset(mybuf, 0, sizeof(mybuf)); if (copy_from_user(mybuf, buf, nbytes)) return -EFAULT; pbuf = &mybuf[0]; if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { phba->ktime_data_samples = 0; phba->ktime_status_samples = 0; phba->ktime_seg1_total = 0; phba->ktime_seg1_max = 0; phba->ktime_seg1_min = 0xffffffff; phba->ktime_seg2_total = 0; phba->ktime_seg2_max = 0; phba->ktime_seg2_min = 0xffffffff; phba->ktime_seg3_total = 0; phba->ktime_seg3_max = 0; phba->ktime_seg3_min = 0xffffffff; phba->ktime_seg4_total = 0; phba->ktime_seg4_max = 0; phba->ktime_seg4_min = 0xffffffff; phba->ktime_seg5_total = 0; phba->ktime_seg5_max = 0; phba->ktime_seg5_min = 0xffffffff; phba->ktime_seg6_total = 0; phba->ktime_seg6_max = 0; phba->ktime_seg6_min = 0xffffffff; phba->ktime_seg7_total = 0; phba->ktime_seg7_max = 0; phba->ktime_seg7_min = 0xffffffff; phba->ktime_seg8_total = 0; phba->ktime_seg8_max = 0; phba->ktime_seg8_min = 0xffffffff; phba->ktime_seg9_total = 0; phba->ktime_seg9_max = 0; phba->ktime_seg9_min = 0xffffffff; phba->ktime_seg10_total = 0; phba->ktime_seg10_max = 0; phba->ktime_seg10_min = 0xffffffff; phba->ktime_on = 1; return strlen(pbuf); } else if ((strncmp(pbuf, "off", sizeof("off") - 1) == 0)) { phba->ktime_on = 0; return strlen(pbuf); } else if ((strncmp(pbuf, "zero", sizeof("zero") - 1) == 0)) { phba->ktime_data_samples = 0; phba->ktime_status_samples = 0; phba->ktime_seg1_total = 0; phba->ktime_seg1_max = 0; phba->ktime_seg1_min = 0xffffffff; phba->ktime_seg2_total = 0; phba->ktime_seg2_max = 0; phba->ktime_seg2_min = 0xffffffff; phba->ktime_seg3_total = 0; phba->ktime_seg3_max = 0; phba->ktime_seg3_min = 0xffffffff; phba->ktime_seg4_total = 0; phba->ktime_seg4_max = 0; phba->ktime_seg4_min = 0xffffffff; phba->ktime_seg5_total = 0; phba->ktime_seg5_max = 0; phba->ktime_seg5_min = 0xffffffff; phba->ktime_seg6_total = 0; phba->ktime_seg6_max = 0; phba->ktime_seg6_min = 0xffffffff; phba->ktime_seg7_total = 0; phba->ktime_seg7_max = 0; phba->ktime_seg7_min = 0xffffffff; phba->ktime_seg8_total = 0; phba->ktime_seg8_max = 0; phba->ktime_seg8_min = 0xffffffff; phba->ktime_seg9_total = 0; phba->ktime_seg9_max = 0; phba->ktime_seg9_min = 0xffffffff; phba->ktime_seg10_total = 0; phba->ktime_seg10_max = 0; phba->ktime_seg10_min = 0xffffffff; return strlen(pbuf); } return -EINVAL; } static int lpfc_debugfs_nvmeio_trc_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kmalloc(LPFC_NVMEIO_TRC_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_nvmeio_trc_data(phba, debug->buffer, LPFC_NVMEIO_TRC_SIZE); debug->i_private = inode->i_private; file->private_data = debug; rc = 0; out: return rc; } static ssize_t lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; int i; unsigned long sz; char mybuf[64]; char *pbuf; if (nbytes > sizeof(mybuf) - 1) nbytes = sizeof(mybuf) - 1; memset(mybuf, 0, sizeof(mybuf)); if (copy_from_user(mybuf, buf, nbytes)) return -EFAULT; pbuf = &mybuf[0]; if ((strncmp(pbuf, "off", sizeof("off") - 1) == 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0570 nvmeio_trc_off\n"); phba->nvmeio_trc_output_idx = 0; phba->nvmeio_trc_on = 0; return strlen(pbuf); } else if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0571 nvmeio_trc_on\n"); phba->nvmeio_trc_output_idx = 0; phba->nvmeio_trc_on = 1; return strlen(pbuf); } /* We must be off to allocate the trace buffer */ if (phba->nvmeio_trc_on != 0) return -EINVAL; /* If not on or off, the parameter is the trace buffer size */ i = kstrtoul(pbuf, 0, &sz); if (i) return -EINVAL; phba->nvmeio_trc_size = (uint32_t)sz; /* It must be a power of 2 - round down */ i = 0; while (sz > 1) { sz = sz >> 1; i++; } sz = (1 << i); if (phba->nvmeio_trc_size != sz) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0572 nvmeio_trc_size changed to %ld\n", sz); phba->nvmeio_trc_size = (uint32_t)sz; /* If one previously exists, free it */ kfree(phba->nvmeio_trc); /* Allocate new trace buffer and initialize */ phba->nvmeio_trc = kzalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) * sz), GFP_KERNEL); if (!phba->nvmeio_trc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0573 Cannot create debugfs " "nvmeio_trc buffer\n"); return -ENOMEM; } atomic_set(&phba->nvmeio_trc_cnt, 0); phba->nvmeio_trc_on = 0; phba->nvmeio_trc_output_idx = 0; return strlen(pbuf); } static int lpfc_debugfs_hdwqstat_open(struct inode *inode, struct file *file) { struct lpfc_vport *vport = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ debug->buffer = kcalloc(1, LPFC_SCSISTAT_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } debug->len = lpfc_debugfs_hdwqstat_data(vport, debug->buffer, LPFC_SCSISTAT_SIZE); debug->i_private = inode->i_private; file->private_data = debug; rc = 0; out: return rc; } static ssize_t lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; struct lpfc_hba *phba = vport->phba; struct lpfc_hdwq_stat *c_stat; char mybuf[64]; char *pbuf; int i; if (nbytes > sizeof(mybuf) - 1) nbytes = sizeof(mybuf) - 1; memset(mybuf, 0, sizeof(mybuf)); if (copy_from_user(mybuf, buf, nbytes)) return -EFAULT; pbuf = &mybuf[0]; if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { if (phba->nvmet_support) phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO; else phba->hdwqstat_on |= (LPFC_CHECK_NVME_IO | LPFC_CHECK_SCSI_IO); return strlen(pbuf); } else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) { if (phba->nvmet_support) phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO; else phba->hdwqstat_on |= LPFC_CHECK_NVME_IO; return strlen(pbuf); } else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) { if (!phba->nvmet_support) phba->hdwqstat_on |= LPFC_CHECK_SCSI_IO; return strlen(pbuf); } else if ((strncmp(pbuf, "nvme_off", sizeof("nvme_off") - 1) == 0)) { phba->hdwqstat_on &= ~(LPFC_CHECK_NVME_IO | LPFC_CHECK_NVMET_IO); return strlen(pbuf); } else if ((strncmp(pbuf, "scsi_off", sizeof("scsi_off") - 1) == 0)) { phba->hdwqstat_on &= ~LPFC_CHECK_SCSI_IO; return strlen(pbuf); } else if ((strncmp(pbuf, "off", sizeof("off") - 1) == 0)) { phba->hdwqstat_on = LPFC_CHECK_OFF; return strlen(pbuf); } else if ((strncmp(pbuf, "zero", sizeof("zero") - 1) == 0)) { for_each_present_cpu(i) { c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, i); c_stat->xmt_io = 0; c_stat->cmpl_io = 0; c_stat->rcv_io = 0; } return strlen(pbuf); } return -EINVAL; } /* * --------------------------------- * iDiag debugfs file access methods * --------------------------------- * * All access methods are through the proper SLI4 PCI function's debugfs * iDiag directory: * * /sys/kernel/debug/lpfc/fn<#>/iDiag */ /** * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space * @buf: The pointer to the user space buffer. * @nbytes: The number of bytes in the user space buffer. * @idiag_cmd: pointer to the idiag command struct. * * This routine reads data from debugfs user space buffer and parses the * buffer for getting the idiag command and arguments. The while space in * between the set of data is used as the parsing separator. * * This routine returns 0 when successful, it returns proper error code * back to the user space in error conditions. */ static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes, struct lpfc_idiag_cmd *idiag_cmd) { char mybuf[64]; char *pbuf, *step_str; int i; size_t bsize; memset(mybuf, 0, sizeof(mybuf)); memset(idiag_cmd, 0, sizeof(*idiag_cmd)); bsize = min(nbytes, (sizeof(mybuf)-1)); if (copy_from_user(mybuf, buf, bsize)) return -EFAULT; pbuf = &mybuf[0]; step_str = strsep(&pbuf, "\t "); /* The opcode must present */ if (!step_str) return -EINVAL; idiag_cmd->opcode = simple_strtol(step_str, NULL, 0); if (idiag_cmd->opcode == 0) return -EINVAL; for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) { step_str = strsep(&pbuf, "\t "); if (!step_str) return i; idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0); } return i; } /** * lpfc_idiag_open - idiag open debugfs * @inode: The inode pointer that contains a pointer to phba. * @file: The file pointer to attach the file operation. * * Description: * This routine is the entry point for the debugfs open file operation. It * gets the reference to phba from the i_private field in @inode, it then * allocates buffer for the file operation, performs the necessary PCI config * space read into the allocated buffer according to the idiag user command * setup, and then returns a pointer to buffer in the private_data field in * @file. * * Returns: * This function returns zero if successful. On error it will return an * negative error value. **/ static int lpfc_idiag_open(struct inode *inode, struct file *file) { struct lpfc_debug *debug; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) return -ENOMEM; debug->i_private = inode->i_private; debug->buffer = NULL; file->private_data = debug; return 0; } /** * lpfc_idiag_release - Release idiag access file operation * @inode: The inode pointer that contains a vport pointer. (unused) * @file: The file pointer that contains the buffer to release. * * Description: * This routine is the generic release routine for the idiag access file * operation, it frees the buffer that was allocated when the debugfs file * was opened. * * Returns: * This function returns zero. **/ static int lpfc_idiag_release(struct inode *inode, struct file *file) { struct lpfc_debug *debug = file->private_data; /* Free the buffers to the file operation */ kfree(debug->buffer); kfree(debug); return 0; } /** * lpfc_idiag_cmd_release - Release idiag cmd access file operation * @inode: The inode pointer that contains a vport pointer. (unused) * @file: The file pointer that contains the buffer to release. * * Description: * This routine frees the buffer that was allocated when the debugfs file * was opened. It also reset the fields in the idiag command struct in the * case of command for write operation. * * Returns: * This function returns zero. **/ static int lpfc_idiag_cmd_release(struct inode *inode, struct file *file) { struct lpfc_debug *debug = file->private_data; if (debug->op == LPFC_IDIAG_OP_WR) { switch (idiag.cmd.opcode) { case LPFC_IDIAG_CMD_PCICFG_WR: case LPFC_IDIAG_CMD_PCICFG_ST: case LPFC_IDIAG_CMD_PCICFG_CL: case LPFC_IDIAG_CMD_QUEACC_WR: case LPFC_IDIAG_CMD_QUEACC_ST: case LPFC_IDIAG_CMD_QUEACC_CL: memset(&idiag, 0, sizeof(idiag)); break; default: break; } } /* Free the buffers to the file operation */ kfree(debug->buffer); kfree(debug); return 0; } /** * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from the @phba pci config space according to the * idiag command, and copies to user @buf. Depending on the PCI config space * read command setup, it does either a single register read of a byte * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all * registers from the 4K extended PCI config space. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE; int where, count; char *pbuffer; struct pci_dev *pdev; uint32_t u32val; uint16_t u16val; uint8_t u8val; pdev = phba->pcidev; if (!pdev) return 0; /* This is a user read operation */ debug->op = LPFC_IDIAG_OP_RD; if (!debug->buffer) debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; if (*ppos) return 0; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX]; count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX]; } else return 0; /* Read single PCI config space register */ switch (count) { case SIZE_U8: /* byte (8 bits) */ pci_read_config_byte(pdev, where, &u8val); len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: %02x\n", where, u8val); break; case SIZE_U16: /* word (16 bits) */ pci_read_config_word(pdev, where, &u16val); len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: %04x\n", where, u16val); break; case SIZE_U32: /* double word (32 bits) */ pci_read_config_dword(pdev, where, &u32val); len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: %08x\n", where, u32val); break; case LPFC_PCI_CFG_BROWSE: /* browse all */ goto pcicfg_browse; default: /* illegal count */ len = 0; break; } return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); pcicfg_browse: /* Browse all PCI config space registers */ offset_label = idiag.offset.last_rd; offset = offset_label; /* Read PCI config space */ len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: ", offset_label); while (index > 0) { pci_read_config_dword(pdev, offset, &u32val); len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%08x ", u32val); offset += sizeof(uint32_t); if (offset >= LPFC_PCI_CFG_SIZE) { len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "\n"); break; } index -= sizeof(uint32_t); if (!index) len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "\n"); else if (!(index % (8 * sizeof(uint32_t)))) { offset_label += (8 * sizeof(uint32_t)); len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "\n%03x: ", offset_label); } } /* Set up the offset for next portion of pci cfg read */ if (index == 0) { idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE; if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE) idiag.offset.last_rd = 0; } else idiag.offset.last_rd = 0; return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } /** * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands * @file: The file pointer to read from. * @buf: The buffer to copy the user data from. * @nbytes: The number of bytes to get. * @ppos: The position in the file to start reading from. * * This routine get the debugfs idiag command struct from user space and * then perform the syntax check for PCI config space read or write command * accordingly. In the case of PCI config space read command, it sets up * the command in the idiag command struct for the debugfs read operation. * In the case of PCI config space write operation, it executes the write * operation into the PCI config space accordingly. * * It returns the @nbytges passing in from debugfs user space when successful. * In case of error conditions, it returns proper error code back to the user * space. */ static ssize_t lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t where, value, count; uint32_t u32val; uint16_t u16val; uint8_t u8val; struct pci_dev *pdev; int rc; pdev = phba->pcidev; if (!pdev) return -EFAULT; /* This is a user write operation */ debug->op = LPFC_IDIAG_OP_WR; rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); if (rc < 0) return rc; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { /* Sanity check on PCI config read command line arguments */ if (rc != LPFC_PCI_CFG_RD_CMD_ARG) goto error_out; /* Read command from PCI config space, set up command fields */ where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX]; count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX]; if (count == LPFC_PCI_CFG_BROWSE) { if (where % sizeof(uint32_t)) goto error_out; /* Starting offset to browse */ idiag.offset.last_rd = where; } else if ((count != sizeof(uint8_t)) && (count != sizeof(uint16_t)) && (count != sizeof(uint32_t))) goto error_out; if (count == sizeof(uint8_t)) { if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t)) goto error_out; if (where % sizeof(uint8_t)) goto error_out; } if (count == sizeof(uint16_t)) { if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t)) goto error_out; if (where % sizeof(uint16_t)) goto error_out; } if (count == sizeof(uint32_t)) { if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t)) goto error_out; if (where % sizeof(uint32_t)) goto error_out; } } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { /* Sanity check on PCI config write command line arguments */ if (rc != LPFC_PCI_CFG_WR_CMD_ARG) goto error_out; /* Write command to PCI config space, read-modify-write */ where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX]; count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX]; value = idiag.cmd.data[IDIAG_PCICFG_VALUE_INDX]; /* Sanity checks */ if ((count != sizeof(uint8_t)) && (count != sizeof(uint16_t)) && (count != sizeof(uint32_t))) goto error_out; if (count == sizeof(uint8_t)) { if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t)) goto error_out; if (where % sizeof(uint8_t)) goto error_out; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) pci_write_config_byte(pdev, where, (uint8_t)value); if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { rc = pci_read_config_byte(pdev, where, &u8val); if (!rc) { u8val |= (uint8_t)value; pci_write_config_byte(pdev, where, u8val); } } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { rc = pci_read_config_byte(pdev, where, &u8val); if (!rc) { u8val &= (uint8_t)(~value); pci_write_config_byte(pdev, where, u8val); } } } if (count == sizeof(uint16_t)) { if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t)) goto error_out; if (where % sizeof(uint16_t)) goto error_out; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) pci_write_config_word(pdev, where, (uint16_t)value); if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { rc = pci_read_config_word(pdev, where, &u16val); if (!rc) { u16val |= (uint16_t)value; pci_write_config_word(pdev, where, u16val); } } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { rc = pci_read_config_word(pdev, where, &u16val); if (!rc) { u16val &= (uint16_t)(~value); pci_write_config_word(pdev, where, u16val); } } } if (count == sizeof(uint32_t)) { if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t)) goto error_out; if (where % sizeof(uint32_t)) goto error_out; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) pci_write_config_dword(pdev, where, value); if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { rc = pci_read_config_dword(pdev, where, &u32val); if (!rc) { u32val |= value; pci_write_config_dword(pdev, where, u32val); } } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { rc = pci_read_config_dword(pdev, where, &u32val); if (!rc) { u32val &= ~value; pci_write_config_dword(pdev, where, u32val); } } } } else /* All other opecodes are illegal for now */ goto error_out; return nbytes; error_out: memset(&idiag, 0, sizeof(idiag)); return -EINVAL; } /** * lpfc_idiag_baracc_read - idiag debugfs pci bar access read * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from the @phba pci bar memory mapped space * according to the idiag command, and copies to user @buf. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; int offset_label, offset, offset_run, len = 0, index; int bar_num, acc_range, bar_size; char *pbuffer; void __iomem *mem_mapped_bar; uint32_t if_type; struct pci_dev *pdev; uint32_t u32val; pdev = phba->pcidev; if (!pdev) return 0; /* This is a user read operation */ debug->op = LPFC_IDIAG_OP_RD; if (!debug->buffer) debug->buffer = kmalloc(LPFC_PCI_BAR_RD_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; if (*ppos) return 0; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) { bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX]; offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX]; acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX]; bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX]; } else return 0; if (acc_range == 0) return 0; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { if (bar_num == IDIAG_BARACC_BAR_0) mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p; else if (bar_num == IDIAG_BARACC_BAR_1) mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p; else if (bar_num == IDIAG_BARACC_BAR_2) mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p; else return 0; } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { if (bar_num == IDIAG_BARACC_BAR_0) mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p; else return 0; } else return 0; /* Read single PCI bar space register */ if (acc_range == SINGLE_WORD) { offset_run = offset; u32val = readl(mem_mapped_bar + offset_run); len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "%05x: %08x\n", offset_run, u32val); } else goto baracc_browse; return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); baracc_browse: /* Browse all PCI bar space registers */ offset_label = idiag.offset.last_rd; offset_run = offset_label; /* Read PCI bar memory mapped space */ len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "%05x: ", offset_label); index = LPFC_PCI_BAR_RD_SIZE; while (index > 0) { u32val = readl(mem_mapped_bar + offset_run); len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "%08x ", u32val); offset_run += sizeof(uint32_t); if (acc_range == LPFC_PCI_BAR_BROWSE) { if (offset_run >= bar_size) { len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); break; } } else { if (offset_run >= offset + (acc_range * sizeof(uint32_t))) { len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); break; } } index -= sizeof(uint32_t); if (!index) len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); else if (!(index % (8 * sizeof(uint32_t)))) { offset_label += (8 * sizeof(uint32_t)); len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n%05x: ", offset_label); } } /* Set up the offset for next portion of pci bar read */ if (index == 0) { idiag.offset.last_rd += LPFC_PCI_BAR_RD_SIZE; if (acc_range == LPFC_PCI_BAR_BROWSE) { if (idiag.offset.last_rd >= bar_size) idiag.offset.last_rd = 0; } else { if (offset_run >= offset + (acc_range * sizeof(uint32_t))) idiag.offset.last_rd = offset; } } else { if (acc_range == LPFC_PCI_BAR_BROWSE) idiag.offset.last_rd = 0; else idiag.offset.last_rd = offset; } return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } /** * lpfc_idiag_baracc_write - Syntax check and set up idiag bar access commands * @file: The file pointer to read from. * @buf: The buffer to copy the user data from. * @nbytes: The number of bytes to get. * @ppos: The position in the file to start reading from. * * This routine get the debugfs idiag command struct from user space and * then perform the syntax check for PCI bar memory mapped space read or * write command accordingly. In the case of PCI bar memory mapped space * read command, it sets up the command in the idiag command struct for * the debugfs read operation. In the case of PCI bar memorpy mapped space * write operation, it executes the write operation into the PCI bar memory * mapped space accordingly. * * It returns the @nbytges passing in from debugfs user space when successful. * In case of error conditions, it returns proper error code back to the user * space. */ static ssize_t lpfc_idiag_baracc_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t bar_num, bar_size, offset, value, acc_range; struct pci_dev *pdev; void __iomem *mem_mapped_bar; uint32_t if_type; uint32_t u32val; int rc; pdev = phba->pcidev; if (!pdev) return -EFAULT; /* This is a user write operation */ debug->op = LPFC_IDIAG_OP_WR; rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); if (rc < 0) return rc; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX]; if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { if ((bar_num != IDIAG_BARACC_BAR_0) && (bar_num != IDIAG_BARACC_BAR_1) && (bar_num != IDIAG_BARACC_BAR_2)) goto error_out; } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { if (bar_num != IDIAG_BARACC_BAR_0) goto error_out; } else goto error_out; if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { if (bar_num == IDIAG_BARACC_BAR_0) { idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] = LPFC_PCI_IF0_BAR0_SIZE; mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p; } else if (bar_num == IDIAG_BARACC_BAR_1) { idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] = LPFC_PCI_IF0_BAR1_SIZE; mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p; } else if (bar_num == IDIAG_BARACC_BAR_2) { idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] = LPFC_PCI_IF0_BAR2_SIZE; mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p; } else goto error_out; } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { if (bar_num == IDIAG_BARACC_BAR_0) { idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] = LPFC_PCI_IF2_BAR0_SIZE; mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p; } else goto error_out; } else goto error_out; offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX]; if (offset % sizeof(uint32_t)) goto error_out; bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX]; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) { /* Sanity check on PCI config read command line arguments */ if (rc != LPFC_PCI_BAR_RD_CMD_ARG) goto error_out; acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX]; if (acc_range == LPFC_PCI_BAR_BROWSE) { if (offset > bar_size - sizeof(uint32_t)) goto error_out; /* Starting offset to browse */ idiag.offset.last_rd = offset; } else if (acc_range > SINGLE_WORD) { if (offset + acc_range * sizeof(uint32_t) > bar_size) goto error_out; /* Starting offset to browse */ idiag.offset.last_rd = offset; } else if (acc_range != SINGLE_WORD) goto error_out; } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) { /* Sanity check on PCI bar write command line arguments */ if (rc != LPFC_PCI_BAR_WR_CMD_ARG) goto error_out; /* Write command to PCI bar space, read-modify-write */ acc_range = SINGLE_WORD; value = idiag.cmd.data[IDIAG_BARACC_REG_VAL_INDX]; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR) { writel(value, mem_mapped_bar + offset); readl(mem_mapped_bar + offset); } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST) { u32val = readl(mem_mapped_bar + offset); u32val |= value; writel(u32val, mem_mapped_bar + offset); readl(mem_mapped_bar + offset); } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) { u32val = readl(mem_mapped_bar + offset); u32val &= ~value; writel(u32val, mem_mapped_bar + offset); readl(mem_mapped_bar + offset); } } else /* All other opecodes are illegal for now */ goto error_out; return nbytes; error_out: memset(&idiag, 0, sizeof(idiag)); return -EINVAL; } static int __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype, char *pbuffer, int len) { if (!qp) return len; len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\t%s WQ info: ", wqtype); len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "AssocCQID[%04d]: WQ-STAT[oflow:x%x posted:x%llx]\n", qp->assoc_qid, qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]", qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, qp->hba_index, qp->notify_interval); len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; } static int lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer, int *len, int max_cnt, int cq_id) { struct lpfc_queue *qp; int qidx; for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { qp = phba->sli4_hba.hdwq[qidx].io_wq; if (qp->assoc_qid != cq_id) continue; *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); if (*len >= max_cnt) return 1; } return 0; } static int __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype, char *pbuffer, int len) { if (!qp) return len; len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t%s CQ info: ", cqtype); len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "AssocEQID[%02d]: CQ STAT[max:x%x relw:x%x " "xabt:x%x wq:x%llx]\n", qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " "HST-IDX[%04d], NTFI[%03d], PLMT[%03d]", qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, qp->notify_interval, qp->max_proc_limit); len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; } static int __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp, char *rqtype, char *pbuffer, int len) { if (!qp || !datqp) return len; len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\t%s RQ info: ", rqtype); len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x " "posted:x%x rcv:x%llx]\n", qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n", qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, qp->hba_index, qp->notify_interval); len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n", datqp->queue_id, datqp->entry_count, datqp->entry_size, datqp->host_index, datqp->hba_index, datqp->notify_interval); return len; } static int lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, int *len, int max_cnt, int eqidx, int eq_id) { struct lpfc_queue *qp; int rc; qp = phba->sli4_hba.hdwq[eqidx].io_cq; *len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len); /* Reset max counter */ qp->CQ_max_cqe = 0; if (*len >= max_cnt) return 1; rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len, max_cnt, qp->queue_id); if (rc) return 1; if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) { /* NVMET CQset */ qp = phba->sli4_hba.nvmet_cqset[eqidx]; *len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len); /* Reset max counter */ qp->CQ_max_cqe = 0; if (*len >= max_cnt) return 1; /* RQ header */ qp = phba->sli4_hba.nvmet_mrq_hdr[eqidx]; *len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.nvmet_mrq_data[eqidx], "NVMET MRQ", pbuffer, *len); if (*len >= max_cnt) return 1; } return 0; } static int __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype, char *pbuffer, int len) { if (!qp) return len; len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n%s EQ info: EQ-STAT[max:x%x noE:x%x " "cqe_proc:x%x eqe_proc:x%llx eqd %d]\n", eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, (unsigned long long)qp->q_cnt_4, qp->q_mode); len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " "HST-IDX[%04d], NTFI[%03d], PLMT[%03d], AFFIN[%03d]", qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, qp->notify_interval, qp->max_proc_limit, qp->chann); len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; } /** * lpfc_idiag_queinfo_read - idiag debugfs read queue information * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from the @phba SLI4 PCI function queue information, * and copies to user @buf. * This routine only returns 1 EQs worth of information. It remembers the last * EQ read and jumps to the next EQ. Thus subsequent calls to queInfo will * retrieve all EQs allocated for the phba. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; char *pbuffer; int max_cnt, rc, x, len = 0; struct lpfc_queue *qp = NULL; if (!debug->buffer) debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 256; if (*ppos) return 0; spin_lock_irq(&phba->hbalock); /* Fast-path event queue */ if (phba->sli4_hba.hdwq && phba->cfg_hdw_queue) { x = phba->lpfc_idiag_last_eq; phba->lpfc_idiag_last_eq++; if (phba->lpfc_idiag_last_eq >= phba->cfg_hdw_queue) phba->lpfc_idiag_last_eq = 0; len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "HDWQ %d out of %d HBA HDWQs\n", x, phba->cfg_hdw_queue); /* Fast-path EQ */ qp = phba->sli4_hba.hdwq[x].hba_eq; if (!qp) goto out; len = __lpfc_idiag_print_eq(qp, "HBA", pbuffer, len); /* Reset max counter */ qp->EQ_max_eqe = 0; if (len >= max_cnt) goto too_big; /* will dump both fcp and nvme cqs/wqs for the eq */ rc = lpfc_idiag_cqs_for_eq(phba, pbuffer, &len, max_cnt, x, qp->queue_id); if (rc) goto too_big; /* Only EQ 0 has slow path CQs configured */ if (x) goto out; /* Slow-path mailbox CQ */ qp = phba->sli4_hba.mbx_cq; len = __lpfc_idiag_print_cq(qp, "MBX", pbuffer, len); if (len >= max_cnt) goto too_big; /* Slow-path MBOX MQ */ qp = phba->sli4_hba.mbx_wq; len = __lpfc_idiag_print_wq(qp, "MBX", pbuffer, len); if (len >= max_cnt) goto too_big; /* Slow-path ELS response CQ */ qp = phba->sli4_hba.els_cq; len = __lpfc_idiag_print_cq(qp, "ELS", pbuffer, len); /* Reset max counter */ if (qp) qp->CQ_max_cqe = 0; if (len >= max_cnt) goto too_big; /* Slow-path ELS WQ */ qp = phba->sli4_hba.els_wq; len = __lpfc_idiag_print_wq(qp, "ELS", pbuffer, len); if (len >= max_cnt) goto too_big; qp = phba->sli4_hba.hdr_rq; len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq, "ELS RQpair", pbuffer, len); if (len >= max_cnt) goto too_big; /* Slow-path NVME LS response CQ */ qp = phba->sli4_hba.nvmels_cq; len = __lpfc_idiag_print_cq(qp, "NVME LS", pbuffer, len); /* Reset max counter */ if (qp) qp->CQ_max_cqe = 0; if (len >= max_cnt) goto too_big; /* Slow-path NVME LS WQ */ qp = phba->sli4_hba.nvmels_wq; len = __lpfc_idiag_print_wq(qp, "NVME LS", pbuffer, len); if (len >= max_cnt) goto too_big; goto out; } spin_unlock_irq(&phba->hbalock); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); too_big: len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "Truncated ...\n"); out: spin_unlock_irq(&phba->hbalock); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } /** * lpfc_idiag_que_param_check - queue access command parameter sanity check * @q: The pointer to queue structure. * @index: The index into a queue entry. * @count: The number of queue entries to access. * * Description: * The routine performs sanity check on device queue access method commands. * * Returns: * This function returns -EINVAL when fails the sanity check, otherwise, it * returns 0. **/ static int lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count) { /* Only support single entry read or browsing */ if ((count != 1) && (count != LPFC_QUE_ACC_BROWSE)) return -EINVAL; if (index > q->entry_count - 1) return -EINVAL; return 0; } /** * lpfc_idiag_queacc_read_qe - read a single entry from the given queue index * @pbuffer: The pointer to buffer to copy the read data into. * @len: Length of the buffer. * @pque: The pointer to the queue to be read. * @index: The index into the queue entry. * * Description: * This routine reads out a single entry from the given queue's index location * and copies it into the buffer provided. * * Returns: * This function returns 0 when it fails, otherwise, it returns the length of * the data read into the buffer provided. **/ static int lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque, uint32_t index) { int offset, esize; uint32_t *pentry; if (!pbuffer || !pque) return 0; esize = pque->entry_size; len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "QE-INDEX[%04d]:\n", index); offset = 0; pentry = lpfc_sli4_qe(pque, index); while (esize > 0) { len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "%08x ", *pentry); pentry++; offset += sizeof(uint32_t); esize -= sizeof(uint32_t); if (esize > 0 && !(offset % (4 * sizeof(uint32_t)))) len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n"); } len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n"); return len; } /** * lpfc_idiag_queacc_read - idiag debugfs read port queue * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from the @phba device queue memory according to the * idiag command, and copies to user @buf. Depending on the queue dump read * command setup, it does either a single queue entry read or browing through * all entries of the queue. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; uint32_t last_index, index, count; struct lpfc_queue *pque = NULL; char *pbuffer; int len = 0; /* This is a user read operation */ debug->op = LPFC_IDIAG_OP_RD; if (!debug->buffer) debug->buffer = kmalloc(LPFC_QUE_ACC_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; if (*ppos) return 0; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX]; count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX]; pque = (struct lpfc_queue *)idiag.ptr_private; } else return 0; /* Browse the queue starting from index */ if (count == LPFC_QUE_ACC_BROWSE) goto que_browse; /* Read a single entry from the queue */ len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); que_browse: /* Browse all entries from the queue */ last_index = idiag.offset.last_rd; index = last_index; while (len < LPFC_QUE_ACC_SIZE - pque->entry_size) { len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index); index++; if (index > pque->entry_count - 1) break; } /* Set up the offset for next portion of pci cfg read */ if (index > pque->entry_count - 1) index = 0; idiag.offset.last_rd = index; return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } /** * lpfc_idiag_queacc_write - Syntax check and set up idiag queacc commands * @file: The file pointer to read from. * @buf: The buffer to copy the user data from. * @nbytes: The number of bytes to get. * @ppos: The position in the file to start reading from. * * This routine get the debugfs idiag command struct from user space and then * perform the syntax check for port queue read (dump) or write (set) command * accordingly. In the case of port queue read command, it sets up the command * in the idiag command struct for the following debugfs read operation. In * the case of port queue write operation, it executes the write operation * into the port queue entry accordingly. * * It returns the @nbytges passing in from debugfs user space when successful. * In case of error conditions, it returns proper error code back to the user * space. **/ static ssize_t lpfc_idiag_queacc_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t qidx, quetp, queid, index, count, offset, value; uint32_t *pentry; struct lpfc_queue *pque, *qp; int rc; /* This is a user write operation */ debug->op = LPFC_IDIAG_OP_WR; rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); if (rc < 0) return rc; /* Get and sanity check on command feilds */ quetp = idiag.cmd.data[IDIAG_QUEACC_QUETP_INDX]; queid = idiag.cmd.data[IDIAG_QUEACC_QUEID_INDX]; index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX]; count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX]; offset = idiag.cmd.data[IDIAG_QUEACC_OFFST_INDX]; value = idiag.cmd.data[IDIAG_QUEACC_VALUE_INDX]; /* Sanity check on command line arguments */ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) { if (rc != LPFC_QUE_ACC_WR_CMD_ARG) goto error_out; if (count != 1) goto error_out; } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { if (rc != LPFC_QUE_ACC_RD_CMD_ARG) goto error_out; } else goto error_out; switch (quetp) { case LPFC_IDIAG_EQ: /* HBA event queue */ if (phba->sli4_hba.hdwq) { for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { qp = phba->sli4_hba.hdwq[qidx].hba_eq; if (qp && qp->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check(qp, index, count); if (rc) goto error_out; idiag.ptr_private = qp; goto pass_check; } } } goto error_out; case LPFC_IDIAG_CQ: /* MBX complete queue */ if (phba->sli4_hba.mbx_cq && phba->sli4_hba.mbx_cq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.mbx_cq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.mbx_cq; goto pass_check; } /* ELS complete queue */ if (phba->sli4_hba.els_cq && phba->sli4_hba.els_cq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.els_cq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.els_cq; goto pass_check; } /* NVME LS complete queue */ if (phba->sli4_hba.nvmels_cq && phba->sli4_hba.nvmels_cq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.nvmels_cq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.nvmels_cq; goto pass_check; } /* FCP complete queue */ if (phba->sli4_hba.hdwq) { for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { qp = phba->sli4_hba.hdwq[qidx].io_cq; if (qp && qp->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( qp, index, count); if (rc) goto error_out; idiag.ptr_private = qp; goto pass_check; } } } goto error_out; case LPFC_IDIAG_MQ: /* MBX work queue */ if (phba->sli4_hba.mbx_wq && phba->sli4_hba.mbx_wq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.mbx_wq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.mbx_wq; goto pass_check; } goto error_out; case LPFC_IDIAG_WQ: /* ELS work queue */ if (phba->sli4_hba.els_wq && phba->sli4_hba.els_wq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.els_wq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.els_wq; goto pass_check; } /* NVME LS work queue */ if (phba->sli4_hba.nvmels_wq && phba->sli4_hba.nvmels_wq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.nvmels_wq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.nvmels_wq; goto pass_check; } if (phba->sli4_hba.hdwq) { /* FCP/SCSI work queue */ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { qp = phba->sli4_hba.hdwq[qidx].io_wq; if (qp && qp->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( qp, index, count); if (rc) goto error_out; idiag.ptr_private = qp; goto pass_check; } } } goto error_out; case LPFC_IDIAG_RQ: /* HDR queue */ if (phba->sli4_hba.hdr_rq && phba->sli4_hba.hdr_rq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.hdr_rq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.hdr_rq; goto pass_check; } /* DAT queue */ if (phba->sli4_hba.dat_rq && phba->sli4_hba.dat_rq->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( phba->sli4_hba.dat_rq, index, count); if (rc) goto error_out; idiag.ptr_private = phba->sli4_hba.dat_rq; goto pass_check; } goto error_out; default: goto error_out; } pass_check: if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { if (count == LPFC_QUE_ACC_BROWSE) idiag.offset.last_rd = index; } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) { /* Additional sanity checks on write operation */ pque = (struct lpfc_queue *)idiag.ptr_private; if (offset > pque->entry_size/sizeof(uint32_t) - 1) goto error_out; pentry = lpfc_sli4_qe(pque, index); pentry += offset; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR) *pentry = value; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST) *pentry |= value; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) *pentry &= ~value; } return nbytes; error_out: /* Clean out command structure on command error out */ memset(&idiag, 0, sizeof(idiag)); return -EINVAL; } /** * lpfc_idiag_drbacc_read_reg - idiag debugfs read a doorbell register * @phba: The pointer to hba structure. * @pbuffer: The pointer to the buffer to copy the data to. * @len: The length of bytes to copied. * @drbregid: The id to doorbell registers. * * Description: * This routine reads a doorbell register and copies its content to the * user buffer pointed to by @pbuffer. * * Returns: * This function returns the amount of data that was copied into @pbuffer. **/ static int lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer, int len, uint32_t drbregid) { if (!pbuffer) return 0; switch (drbregid) { case LPFC_DRB_EQ: len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len, "EQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.EQDBregaddr)); break; case LPFC_DRB_CQ: len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len, "CQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.CQDBregaddr)); break; case LPFC_DRB_MQ: len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, "MQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.MQDBregaddr)); break; case LPFC_DRB_WQ: len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, "WQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.WQDBregaddr)); break; case LPFC_DRB_RQ: len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, "RQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.RQDBregaddr)); break; default: break; } return len; } /** * lpfc_idiag_drbacc_read - idiag debugfs read port doorbell * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from the @phba device doorbell register according * to the idiag command, and copies to user @buf. Depending on the doorbell * register read command setup, it does either a single doorbell register * read or dump all doorbell registers. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t drb_reg_id, i; char *pbuffer; int len = 0; /* This is a user read operation */ debug->op = LPFC_IDIAG_OP_RD; if (!debug->buffer) debug->buffer = kmalloc(LPFC_DRB_ACC_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; if (*ppos) return 0; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX]; else return 0; if (drb_reg_id == LPFC_DRB_ACC_ALL) for (i = 1; i <= LPFC_DRB_MAX; i++) len = lpfc_idiag_drbacc_read_reg(phba, pbuffer, len, i); else len = lpfc_idiag_drbacc_read_reg(phba, pbuffer, len, drb_reg_id); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } /** * lpfc_idiag_drbacc_write - Syntax check and set up idiag drbacc commands * @file: The file pointer to read from. * @buf: The buffer to copy the user data from. * @nbytes: The number of bytes to get. * @ppos: The position in the file to start reading from. * * This routine get the debugfs idiag command struct from user space and then * perform the syntax check for port doorbell register read (dump) or write * (set) command accordingly. In the case of port queue read command, it sets * up the command in the idiag command struct for the following debugfs read * operation. In the case of port doorbell register write operation, it * executes the write operation into the port doorbell register accordingly. * * It returns the @nbytges passing in from debugfs user space when successful. * In case of error conditions, it returns proper error code back to the user * space. **/ static ssize_t lpfc_idiag_drbacc_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t drb_reg_id, value, reg_val = 0; void __iomem *drb_reg; int rc; /* This is a user write operation */ debug->op = LPFC_IDIAG_OP_WR; rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); if (rc < 0) return rc; /* Sanity check on command line arguments */ drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX]; value = idiag.cmd.data[IDIAG_DRBACC_VALUE_INDX]; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) { if (rc != LPFC_DRB_ACC_WR_CMD_ARG) goto error_out; if (drb_reg_id > LPFC_DRB_MAX) goto error_out; } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) { if (rc != LPFC_DRB_ACC_RD_CMD_ARG) goto error_out; if ((drb_reg_id > LPFC_DRB_MAX) && (drb_reg_id != LPFC_DRB_ACC_ALL)) goto error_out; } else goto error_out; /* Perform the write access operation */ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) { switch (drb_reg_id) { case LPFC_DRB_EQ: drb_reg = phba->sli4_hba.EQDBregaddr; break; case LPFC_DRB_CQ: drb_reg = phba->sli4_hba.CQDBregaddr; break; case LPFC_DRB_MQ: drb_reg = phba->sli4_hba.MQDBregaddr; break; case LPFC_DRB_WQ: drb_reg = phba->sli4_hba.WQDBregaddr; break; case LPFC_DRB_RQ: drb_reg = phba->sli4_hba.RQDBregaddr; break; default: goto error_out; } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR) reg_val = value; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST) { reg_val = readl(drb_reg); reg_val |= value; } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) { reg_val = readl(drb_reg); reg_val &= ~value; } writel(reg_val, drb_reg); readl(drb_reg); /* flush */ } return nbytes; error_out: /* Clean out command structure on command error out */ memset(&idiag, 0, sizeof(idiag)); return -EINVAL; } /** * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers * @phba: The pointer to hba structure. * @pbuffer: The pointer to the buffer to copy the data to. * @len: The length of bytes to copied. * @ctlregid: The id to doorbell registers. * * Description: * This routine reads a control register and copies its content to the * user buffer pointed to by @pbuffer. * * Returns: * This function returns the amount of data that was copied into @pbuffer. **/ static int lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer, int len, uint32_t ctlregid) { if (!pbuffer) return 0; switch (ctlregid) { case LPFC_CTL_PORT_SEM: len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, "Port SemReg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_SEM_OFFSET)); break; case LPFC_CTL_PORT_STA: len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, "Port StaReg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_STA_OFFSET)); break; case LPFC_CTL_PORT_CTL: len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, "Port CtlReg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_CTL_OFFSET)); break; case LPFC_CTL_PORT_ER1: len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, "Port Er1Reg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER1_OFFSET)); break; case LPFC_CTL_PORT_ER2: len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, "Port Er2Reg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER2_OFFSET)); break; case LPFC_CTL_PDEV_CTL: len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, "PDev CtlReg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET)); break; default: break; } return len; } /** * lpfc_idiag_ctlacc_read - idiag debugfs read port and device control register * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from the @phba port and device registers according * to the idiag command, and copies to user @buf. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_idiag_ctlacc_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t ctl_reg_id, i; char *pbuffer; int len = 0; /* This is a user read operation */ debug->op = LPFC_IDIAG_OP_RD; if (!debug->buffer) debug->buffer = kmalloc(LPFC_CTL_ACC_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; if (*ppos) return 0; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX]; else return 0; if (ctl_reg_id == LPFC_CTL_ACC_ALL) for (i = 1; i <= LPFC_CTL_MAX; i++) len = lpfc_idiag_ctlacc_read_reg(phba, pbuffer, len, i); else len = lpfc_idiag_ctlacc_read_reg(phba, pbuffer, len, ctl_reg_id); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } /** * lpfc_idiag_ctlacc_write - Syntax check and set up idiag ctlacc commands * @file: The file pointer to read from. * @buf: The buffer to copy the user data from. * @nbytes: The number of bytes to get. * @ppos: The position in the file to start reading from. * * This routine get the debugfs idiag command struct from user space and then * perform the syntax check for port and device control register read (dump) * or write (set) command accordingly. * * It returns the @nbytges passing in from debugfs user space when successful. * In case of error conditions, it returns proper error code back to the user * space. **/ static ssize_t lpfc_idiag_ctlacc_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; uint32_t ctl_reg_id, value, reg_val = 0; void __iomem *ctl_reg; int rc; /* This is a user write operation */ debug->op = LPFC_IDIAG_OP_WR; rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); if (rc < 0) return rc; /* Sanity check on command line arguments */ ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX]; value = idiag.cmd.data[IDIAG_CTLACC_VALUE_INDX]; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) { if (rc != LPFC_CTL_ACC_WR_CMD_ARG) goto error_out; if (ctl_reg_id > LPFC_CTL_MAX) goto error_out; } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) { if (rc != LPFC_CTL_ACC_RD_CMD_ARG) goto error_out; if ((ctl_reg_id > LPFC_CTL_MAX) && (ctl_reg_id != LPFC_CTL_ACC_ALL)) goto error_out; } else goto error_out; /* Perform the write access operation */ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) { switch (ctl_reg_id) { case LPFC_CTL_PORT_SEM: ctl_reg = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_SEM_OFFSET; break; case LPFC_CTL_PORT_STA: ctl_reg = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_STA_OFFSET; break; case LPFC_CTL_PORT_CTL: ctl_reg = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_CTL_OFFSET; break; case LPFC_CTL_PORT_ER1: ctl_reg = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER1_OFFSET; break; case LPFC_CTL_PORT_ER2: ctl_reg = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER2_OFFSET; break; case LPFC_CTL_PDEV_CTL: ctl_reg = phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET; break; default: goto error_out; } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR) reg_val = value; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST) { reg_val = readl(ctl_reg); reg_val |= value; } if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) { reg_val = readl(ctl_reg); reg_val &= ~value; } writel(reg_val, ctl_reg); readl(ctl_reg); /* flush */ } return nbytes; error_out: /* Clean out command structure on command error out */ memset(&idiag, 0, sizeof(idiag)); return -EINVAL; } /** * lpfc_idiag_mbxacc_get_setup - idiag debugfs get mailbox access setup * @phba: Pointer to HBA context object. * @pbuffer: Pointer to data buffer. * * Description: * This routine gets the driver mailbox access debugfs setup information. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static int lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer) { uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd; int len = 0; mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX]; mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX]; mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, "mbx_dump_map: 0x%08x\n", mbx_dump_map); len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, "mbx_dump_cnt: %04d\n", mbx_dump_cnt); len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, "mbx_word_cnt: %04d\n", mbx_word_cnt); len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, "mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd); return len; } /** * lpfc_idiag_mbxacc_read - idiag debugfs read on mailbox access * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from the @phba driver mailbox access debugfs setup * information. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_idiag_mbxacc_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; char *pbuffer; int len = 0; /* This is a user read operation */ debug->op = LPFC_IDIAG_OP_RD; if (!debug->buffer) debug->buffer = kmalloc(LPFC_MBX_ACC_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; if (*ppos) return 0; if ((idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) && (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP)) return 0; len = lpfc_idiag_mbxacc_get_setup(phba, pbuffer); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } /** * lpfc_idiag_mbxacc_write - Syntax check and set up idiag mbxacc commands * @file: The file pointer to read from. * @buf: The buffer to copy the user data from. * @nbytes: The number of bytes to get. * @ppos: The position in the file to start reading from. * * This routine get the debugfs idiag command struct from user space and then * perform the syntax check for driver mailbox command (dump) and sets up the * necessary states in the idiag command struct accordingly. * * It returns the @nbytges passing in from debugfs user space when successful. * In case of error conditions, it returns proper error code back to the user * space. **/ static ssize_t lpfc_idiag_mbxacc_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd; int rc; /* This is a user write operation */ debug->op = LPFC_IDIAG_OP_WR; rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); if (rc < 0) return rc; /* Sanity check on command line arguments */ mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX]; mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX]; mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_MBXACC_DP) { if (!(mbx_dump_map & LPFC_MBX_DMP_MBX_ALL)) goto error_out; if ((mbx_dump_map & ~LPFC_MBX_DMP_MBX_ALL) && (mbx_dump_map != LPFC_MBX_DMP_ALL)) goto error_out; if (mbx_word_cnt > sizeof(MAILBOX_t)) goto error_out; } else if (idiag.cmd.opcode == LPFC_IDIAG_BSG_MBXACC_DP) { if (!(mbx_dump_map & LPFC_BSG_DMP_MBX_ALL)) goto error_out; if ((mbx_dump_map & ~LPFC_BSG_DMP_MBX_ALL) && (mbx_dump_map != LPFC_MBX_DMP_ALL)) goto error_out; if (mbx_word_cnt > (BSG_MBOX_SIZE)/4) goto error_out; if (mbx_mbox_cmd != 0x9b) goto error_out; } else goto error_out; if (mbx_word_cnt == 0) goto error_out; if (rc != LPFC_MBX_DMP_ARG) goto error_out; if (mbx_mbox_cmd & ~0xff) goto error_out; /* condition for stop mailbox dump */ if (mbx_dump_cnt == 0) goto reset_out; return nbytes; reset_out: /* Clean out command structure on command error out */ memset(&idiag, 0, sizeof(idiag)); return nbytes; error_out: /* Clean out command structure on command error out */ memset(&idiag, 0, sizeof(idiag)); return -EINVAL; } /** * lpfc_idiag_extacc_avail_get - get the available extents information * @phba: pointer to lpfc hba data structure. * @pbuffer: pointer to internal buffer. * @len: length into the internal buffer data has been copied. * * Description: * This routine is to get the available extent information. * * Returns: * overall length of the data read into the internal buffer. **/ static int lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len) { uint16_t ext_cnt = 0, ext_size = 0; len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\nAvailable Extents Information:\n"); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tPort Available VPI extents: "); lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI, &ext_cnt, &ext_size); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Count %3d, Size %3d\n", ext_cnt, ext_size); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tPort Available VFI extents: "); lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI, &ext_cnt, &ext_size); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Count %3d, Size %3d\n", ext_cnt, ext_size); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tPort Available RPI extents: "); lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI, &ext_cnt, &ext_size); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Count %3d, Size %3d\n", ext_cnt, ext_size); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tPort Available XRI extents: "); lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI, &ext_cnt, &ext_size); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Count %3d, Size %3d\n", ext_cnt, ext_size); return len; } /** * lpfc_idiag_extacc_alloc_get - get the allocated extents information * @phba: pointer to lpfc hba data structure. * @pbuffer: pointer to internal buffer. * @len: length into the internal buffer data has been copied. * * Description: * This routine is to get the allocated extent information. * * Returns: * overall length of the data read into the internal buffer. **/ static int lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len) { uint16_t ext_cnt, ext_size; int rc; len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\nAllocated Extents Information:\n"); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tHost Allocated VPI extents: "); rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI, &ext_cnt, &ext_size); if (!rc) len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Port %d Extent %3d, Size %3d\n", phba->brd_no, ext_cnt, ext_size); else len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "N/A\n"); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tHost Allocated VFI extents: "); rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI, &ext_cnt, &ext_size); if (!rc) len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Port %d Extent %3d, Size %3d\n", phba->brd_no, ext_cnt, ext_size); else len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "N/A\n"); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tHost Allocated RPI extents: "); rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI, &ext_cnt, &ext_size); if (!rc) len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Port %d Extent %3d, Size %3d\n", phba->brd_no, ext_cnt, ext_size); else len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "N/A\n"); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tHost Allocated XRI extents: "); rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI, &ext_cnt, &ext_size); if (!rc) len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Port %d Extent %3d, Size %3d\n", phba->brd_no, ext_cnt, ext_size); else len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "N/A\n"); return len; } /** * lpfc_idiag_extacc_drivr_get - get driver extent information * @phba: pointer to lpfc hba data structure. * @pbuffer: pointer to internal buffer. * @len: length into the internal buffer data has been copied. * * Description: * This routine is to get the driver extent information. * * Returns: * overall length of the data read into the internal buffer. **/ static int lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len) { struct lpfc_rsrc_blks *rsrc_blks; int index; len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\nDriver Extents Information:\n"); len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tVPI extents:\n"); index = 0; list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) { len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\t\tBlock %3d: Start %4d, Count %4d\n", index, rsrc_blks->rsrc_start, rsrc_blks->rsrc_size); index++; } len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tVFI extents:\n"); index = 0; list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list, list) { len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\t\tBlock %3d: Start %4d, Count %4d\n", index, rsrc_blks->rsrc_start, rsrc_blks->rsrc_size); index++; } len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tRPI extents:\n"); index = 0; list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list, list) { len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\t\tBlock %3d: Start %4d, Count %4d\n", index, rsrc_blks->rsrc_start, rsrc_blks->rsrc_size); index++; } len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tXRI extents:\n"); index = 0; list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list, list) { len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\t\tBlock %3d: Start %4d, Count %4d\n", index, rsrc_blks->rsrc_start, rsrc_blks->rsrc_size); index++; } return len; } /** * lpfc_idiag_extacc_write - Syntax check and set up idiag extacc commands * @file: The file pointer to read from. * @buf: The buffer to copy the user data from. * @nbytes: The number of bytes to get. * @ppos: The position in the file to start reading from. * * This routine get the debugfs idiag command struct from user space and then * perform the syntax check for extent information access commands and sets * up the necessary states in the idiag command struct accordingly. * * It returns the @nbytges passing in from debugfs user space when successful. * In case of error conditions, it returns proper error code back to the user * space. **/ static ssize_t lpfc_idiag_extacc_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; uint32_t ext_map; int rc; /* This is a user write operation */ debug->op = LPFC_IDIAG_OP_WR; rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); if (rc < 0) return rc; ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX]; if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD) goto error_out; if (rc != LPFC_EXT_ACC_CMD_ARG) goto error_out; if (!(ext_map & LPFC_EXT_ACC_ALL)) goto error_out; return nbytes; error_out: /* Clean out command structure on command error out */ memset(&idiag, 0, sizeof(idiag)); return -EINVAL; } /** * lpfc_idiag_extacc_read - idiag debugfs read access to extent information * @file: The file pointer to read from. * @buf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads data from the proper extent information according to * the idiag command, and copies to user @buf. * * Returns: * This function returns the amount of data that was read (this could be less * than @nbytes if the end of the file was reached) or a negative error value. **/ static ssize_t lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; char *pbuffer; uint32_t ext_map; int len = 0; /* This is a user read operation */ debug->op = LPFC_IDIAG_OP_RD; if (!debug->buffer) debug->buffer = kmalloc(LPFC_EXT_ACC_BUF_SIZE, GFP_KERNEL); if (!debug->buffer) return 0; pbuffer = debug->buffer; if (*ppos) return 0; if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD) return 0; ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX]; if (ext_map & LPFC_EXT_ACC_AVAIL) len = lpfc_idiag_extacc_avail_get(phba, pbuffer, len); if (ext_map & LPFC_EXT_ACC_ALLOC) len = lpfc_idiag_extacc_alloc_get(phba, pbuffer, len); if (ext_map & LPFC_EXT_ACC_DRIVR) len = lpfc_idiag_extacc_drivr_get(phba, pbuffer, len); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } static int lpfc_cgn_buffer_open(struct inode *inode, struct file *file) { struct lpfc_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; debug->buffer = vmalloc(LPFC_CGN_BUF_SIZE); if (!debug->buffer) { kfree(debug); goto out; } debug->i_private = inode->i_private; file->private_data = debug; rc = 0; out: return rc; } static ssize_t lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; char *buffer = debug->buffer; uint32_t *ptr; int cnt, len = 0; if (!phba->sli4_hba.pc_sli4_params.mi_ver || !phba->cgn_i) { len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, "Congestion Mgmt is not supported\n"); goto out; } ptr = (uint32_t *)phba->cgn_i->virt; len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, "Congestion Buffer Header\n"); /* Dump the first 32 bytes */ cnt = 32; len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, "000: %08x %08x %08x %08x %08x %08x %08x %08x\n", *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), *(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7)); ptr += 8; len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, "Congestion Buffer Data\n"); while (cnt < sizeof(struct lpfc_cgn_info)) { if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) { len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, "Truncated . . .\n"); goto out; } len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, "%03x: %08x %08x %08x %08x " "%08x %08x %08x %08x\n", cnt, *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), *(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7)); cnt += 32; ptr += 8; } if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) { len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, "Truncated . . .\n"); goto out; } len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, "Parameter Data\n"); ptr = (uint32_t *)&phba->cgn_p; len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, "%08x %08x %08x %08x\n", *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3)); out: return simple_read_from_buffer(buf, nbytes, ppos, buffer, len); } static int lpfc_cgn_buffer_release(struct inode *inode, struct file *file) { struct lpfc_debug *debug = file->private_data; vfree(debug->buffer); kfree(debug); return 0; } static int lpfc_rx_monitor_open(struct inode *inode, struct file *file) { struct lpfc_rx_monitor_debug *debug; int rc = -ENOMEM; debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; debug->buffer = vmalloc(MAX_DEBUGFS_RX_INFO_SIZE); if (!debug->buffer) { kfree(debug); goto out; } debug->i_private = inode->i_private; file->private_data = debug; rc = 0; out: return rc; } static ssize_t lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_rx_monitor_debug *debug = file->private_data; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; char *buffer = debug->buffer; if (!phba->rx_monitor) { scnprintf(buffer, MAX_DEBUGFS_RX_INFO_SIZE, "Rx Monitor Info is empty.\n"); } else { lpfc_rx_monitor_report(phba, phba->rx_monitor, buffer, MAX_DEBUGFS_RX_INFO_SIZE, LPFC_MAX_RXMONITOR_ENTRY); } return simple_read_from_buffer(buf, nbytes, ppos, buffer, strlen(buffer)); } static int lpfc_rx_monitor_release(struct inode *inode, struct file *file) { struct lpfc_rx_monitor_debug *debug = file->private_data; vfree(debug->buffer); kfree(debug); return 0; } #undef lpfc_debugfs_op_disc_trc static const struct file_operations lpfc_debugfs_op_disc_trc = { .owner = THIS_MODULE, .open = lpfc_debugfs_disc_trc_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_nodelist static const struct file_operations lpfc_debugfs_op_nodelist = { .owner = THIS_MODULE, .open = lpfc_debugfs_nodelist_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_multixripools static const struct file_operations lpfc_debugfs_op_multixripools = { .owner = THIS_MODULE, .open = lpfc_debugfs_multixripools_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .write = lpfc_debugfs_multixripools_write, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_hbqinfo static const struct file_operations lpfc_debugfs_op_hbqinfo = { .owner = THIS_MODULE, .open = lpfc_debugfs_hbqinfo_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; #ifdef LPFC_HDWQ_LOCK_STAT #undef lpfc_debugfs_op_lockstat static const struct file_operations lpfc_debugfs_op_lockstat = { .owner = THIS_MODULE, .open = lpfc_debugfs_lockstat_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .write = lpfc_debugfs_lockstat_write, .release = lpfc_debugfs_release, }; #endif #undef lpfc_debugfs_ras_log static const struct file_operations lpfc_debugfs_ras_log = { .owner = THIS_MODULE, .open = lpfc_debugfs_ras_log_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_ras_log_release, }; #undef lpfc_debugfs_op_dumpHBASlim static const struct file_operations lpfc_debugfs_op_dumpHBASlim = { .owner = THIS_MODULE, .open = lpfc_debugfs_dumpHBASlim_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_dumpHostSlim static const struct file_operations lpfc_debugfs_op_dumpHostSlim = { .owner = THIS_MODULE, .open = lpfc_debugfs_dumpHostSlim_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_nvmestat static const struct file_operations lpfc_debugfs_op_nvmestat = { .owner = THIS_MODULE, .open = lpfc_debugfs_nvmestat_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .write = lpfc_debugfs_nvmestat_write, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_scsistat static const struct file_operations lpfc_debugfs_op_scsistat = { .owner = THIS_MODULE, .open = lpfc_debugfs_scsistat_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .write = lpfc_debugfs_scsistat_write, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_ioktime static const struct file_operations lpfc_debugfs_op_ioktime = { .owner = THIS_MODULE, .open = lpfc_debugfs_ioktime_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .write = lpfc_debugfs_ioktime_write, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_nvmeio_trc static const struct file_operations lpfc_debugfs_op_nvmeio_trc = { .owner = THIS_MODULE, .open = lpfc_debugfs_nvmeio_trc_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .write = lpfc_debugfs_nvmeio_trc_write, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_hdwqstat static const struct file_operations lpfc_debugfs_op_hdwqstat = { .owner = THIS_MODULE, .open = lpfc_debugfs_hdwqstat_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .write = lpfc_debugfs_hdwqstat_write, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_dif_err static const struct file_operations lpfc_debugfs_op_dif_err = { .owner = THIS_MODULE, .open = simple_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_dif_err_read, .write = lpfc_debugfs_dif_err_write, .release = lpfc_debugfs_dif_err_release, }; #undef lpfc_debugfs_op_slow_ring_trc static const struct file_operations lpfc_debugfs_op_slow_ring_trc = { .owner = THIS_MODULE, .open = lpfc_debugfs_slow_ring_trc_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, .release = lpfc_debugfs_release, }; static struct dentry *lpfc_debugfs_root = NULL; static atomic_t lpfc_debugfs_hba_count; /* * File operations for the iDiag debugfs */ #undef lpfc_idiag_op_pciCfg static const struct file_operations lpfc_idiag_op_pciCfg = { .owner = THIS_MODULE, .open = lpfc_idiag_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_idiag_pcicfg_read, .write = lpfc_idiag_pcicfg_write, .release = lpfc_idiag_cmd_release, }; #undef lpfc_idiag_op_barAcc static const struct file_operations lpfc_idiag_op_barAcc = { .owner = THIS_MODULE, .open = lpfc_idiag_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_idiag_baracc_read, .write = lpfc_idiag_baracc_write, .release = lpfc_idiag_cmd_release, }; #undef lpfc_idiag_op_queInfo static const struct file_operations lpfc_idiag_op_queInfo = { .owner = THIS_MODULE, .open = lpfc_idiag_open, .read = lpfc_idiag_queinfo_read, .release = lpfc_idiag_release, }; #undef lpfc_idiag_op_queAcc static const struct file_operations lpfc_idiag_op_queAcc = { .owner = THIS_MODULE, .open = lpfc_idiag_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_idiag_queacc_read, .write = lpfc_idiag_queacc_write, .release = lpfc_idiag_cmd_release, }; #undef lpfc_idiag_op_drbAcc static const struct file_operations lpfc_idiag_op_drbAcc = { .owner = THIS_MODULE, .open = lpfc_idiag_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_idiag_drbacc_read, .write = lpfc_idiag_drbacc_write, .release = lpfc_idiag_cmd_release, }; #undef lpfc_idiag_op_ctlAcc static const struct file_operations lpfc_idiag_op_ctlAcc = { .owner = THIS_MODULE, .open = lpfc_idiag_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_idiag_ctlacc_read, .write = lpfc_idiag_ctlacc_write, .release = lpfc_idiag_cmd_release, }; #undef lpfc_idiag_op_mbxAcc static const struct file_operations lpfc_idiag_op_mbxAcc = { .owner = THIS_MODULE, .open = lpfc_idiag_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_idiag_mbxacc_read, .write = lpfc_idiag_mbxacc_write, .release = lpfc_idiag_cmd_release, }; #undef lpfc_idiag_op_extAcc static const struct file_operations lpfc_idiag_op_extAcc = { .owner = THIS_MODULE, .open = lpfc_idiag_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_idiag_extacc_read, .write = lpfc_idiag_extacc_write, .release = lpfc_idiag_cmd_release, }; #undef lpfc_cgn_buffer_op static const struct file_operations lpfc_cgn_buffer_op = { .owner = THIS_MODULE, .open = lpfc_cgn_buffer_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_cgn_buffer_read, .release = lpfc_cgn_buffer_release, }; #undef lpfc_rx_monitor_op static const struct file_operations lpfc_rx_monitor_op = { .owner = THIS_MODULE, .open = lpfc_rx_monitor_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_rx_monitor_read, .release = lpfc_rx_monitor_release, }; #endif /* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command * @phba: Pointer to HBA context object. * @dmabuf: Pointer to a DMA buffer descriptor. * * Description: * This routine dump a bsg pass-through non-embedded mailbox command with * external buffer. **/ void lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, enum mbox_type mbox_tp, enum dma_type dma_tp, enum sta_type sta_tp, struct lpfc_dmabuf *dmabuf, uint32_t ext_buf) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint32_t *mbx_mbox_cmd, *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt; char line_buf[LPFC_MBX_ACC_LBUF_SZ]; int len = 0; uint32_t do_dump = 0; uint32_t *pword; uint32_t i; if (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP) return; mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX]; mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX]; mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; if (!(*mbx_dump_map & LPFC_MBX_DMP_ALL) || (*mbx_dump_cnt == 0) || (*mbx_word_cnt == 0)) return; if (*mbx_mbox_cmd != 0x9B) return; if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) { if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) { do_dump |= LPFC_BSG_DMP_MBX_RD_MBX; pr_err("\nRead mbox command (x%x), " "nemb:0x%x, extbuf_cnt:%d:\n", sta_tp, nemb_tp, ext_buf); } } if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) { if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) { do_dump |= LPFC_BSG_DMP_MBX_RD_BUF; pr_err("\nRead mbox buffer (x%x), " "nemb:0x%x, extbuf_seq:%d:\n", sta_tp, nemb_tp, ext_buf); } } if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) { if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) { do_dump |= LPFC_BSG_DMP_MBX_WR_MBX; pr_err("\nWrite mbox command (x%x), " "nemb:0x%x, extbuf_cnt:%d:\n", sta_tp, nemb_tp, ext_buf); } } if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) { if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) { do_dump |= LPFC_BSG_DMP_MBX_WR_BUF; pr_err("\nWrite mbox buffer (x%x), " "nemb:0x%x, extbuf_seq:%d:\n", sta_tp, nemb_tp, ext_buf); } } /* dump buffer content */ if (do_dump) { pword = (uint32_t *)dmabuf->virt; for (i = 0; i < *mbx_word_cnt; i++) { if (!(i % 8)) { if (i != 0) pr_err("%s\n", line_buf); len = 0; len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, "%03d: ", i); } len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, "%08x ", (uint32_t)*pword); pword++; } if ((i - 1) % 8) pr_err("%s\n", line_buf); (*mbx_dump_cnt)--; } /* Clean out command structure on reaching dump count */ if (*mbx_dump_cnt == 0) memset(&idiag, 0, sizeof(idiag)); return; #endif } /* lpfc_idiag_mbxacc_dump_issue_mbox - idiag debugfs dump issue mailbox command * @phba: Pointer to HBA context object. * @dmabuf: Pointer to a DMA buffer descriptor. * * Description: * This routine dump a pass-through non-embedded mailbox command from issue * mailbox command. **/ void lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint32_t *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt, *mbx_mbox_cmd; char line_buf[LPFC_MBX_ACC_LBUF_SZ]; int len = 0; uint32_t *pword; uint8_t *pbyte; uint32_t i, j; if (idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) return; mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX]; mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX]; mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; if (!(*mbx_dump_map & LPFC_MBX_DMP_MBX_ALL) || (*mbx_dump_cnt == 0) || (*mbx_word_cnt == 0)) return; if ((*mbx_mbox_cmd != LPFC_MBX_ALL_CMD) && (*mbx_mbox_cmd != pmbox->mbxCommand)) return; /* dump buffer content */ if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) { pr_err("Mailbox command:0x%x dump by word:\n", pmbox->mbxCommand); pword = (uint32_t *)pmbox; for (i = 0; i < *mbx_word_cnt; i++) { if (!(i % 8)) { if (i != 0) pr_err("%s\n", line_buf); len = 0; memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, "%03d: ", i); } len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, "%08x ", ((uint32_t)*pword) & 0xffffffff); pword++; } if ((i - 1) % 8) pr_err("%s\n", line_buf); pr_err("\n"); } if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) { pr_err("Mailbox command:0x%x dump by byte:\n", pmbox->mbxCommand); pbyte = (uint8_t *)pmbox; for (i = 0; i < *mbx_word_cnt; i++) { if (!(i % 8)) { if (i != 0) pr_err("%s\n", line_buf); len = 0; memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, "%03d: ", i); } for (j = 0; j < 4; j++) { len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, "%02x", ((uint8_t)*pbyte) & 0xff); pbyte++; } len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, " "); } if ((i - 1) % 8) pr_err("%s\n", line_buf); pr_err("\n"); } (*mbx_dump_cnt)--; /* Clean out command structure on reaching dump count */ if (*mbx_dump_cnt == 0) memset(&idiag, 0, sizeof(idiag)); return; #endif } /** * lpfc_debugfs_initialize - Initialize debugfs for a vport * @vport: The vport pointer to initialize. * * Description: * When Debugfs is configured this routine sets up the lpfc debugfs file system. * If not already created, this routine will create the lpfc directory, and * lpfcX directory (for this HBA), and vportX directory for this vport. It will * also create each file used to access lpfc specific debugfs information. **/ inline void lpfc_debugfs_initialize(struct lpfc_vport *vport) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct lpfc_hba *phba = vport->phba; char name[64]; uint32_t num, i; bool pport_setup = false; if (!lpfc_debugfs_enable) return; /* Setup lpfc root directory */ if (!lpfc_debugfs_root) { lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL); atomic_set(&lpfc_debugfs_hba_count, 0); } if (!lpfc_debugfs_start_time) lpfc_debugfs_start_time = jiffies; /* Setup funcX directory for specific HBA PCI function */ snprintf(name, sizeof(name), "fn%d", phba->brd_no); if (!phba->hba_debugfs_root) { pport_setup = true; phba->hba_debugfs_root = debugfs_create_dir(name, lpfc_debugfs_root); atomic_inc(&lpfc_debugfs_hba_count); atomic_set(&phba->debugfs_vport_count, 0); /* Multi-XRI pools */ snprintf(name, sizeof(name), "multixripools"); phba->debug_multixri_pools = debugfs_create_file(name, S_IFREG | 0644, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_multixripools); if (IS_ERR(phba->debug_multixri_pools)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0527 Cannot create debugfs multixripools\n"); goto debug_failed; } /* Congestion Info Buffer */ scnprintf(name, sizeof(name), "cgn_buffer"); phba->debug_cgn_buffer = debugfs_create_file(name, S_IFREG | 0644, phba->hba_debugfs_root, phba, &lpfc_cgn_buffer_op); if (IS_ERR(phba->debug_cgn_buffer)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "6527 Cannot create debugfs " "cgn_buffer\n"); goto debug_failed; } /* RX Monitor */ scnprintf(name, sizeof(name), "rx_monitor"); phba->debug_rx_monitor = debugfs_create_file(name, S_IFREG | 0644, phba->hba_debugfs_root, phba, &lpfc_rx_monitor_op); if (IS_ERR(phba->debug_rx_monitor)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "6528 Cannot create debugfs " "rx_monitor\n"); goto debug_failed; } /* RAS log */ snprintf(name, sizeof(name), "ras_log"); phba->debug_ras_log = debugfs_create_file(name, 0644, phba->hba_debugfs_root, phba, &lpfc_debugfs_ras_log); if (IS_ERR(phba->debug_ras_log)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "6148 Cannot create debugfs" " ras_log\n"); goto debug_failed; } /* Setup hbqinfo */ snprintf(name, sizeof(name), "hbqinfo"); phba->debug_hbqinfo = debugfs_create_file(name, S_IFREG | 0644, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_hbqinfo); #ifdef LPFC_HDWQ_LOCK_STAT /* Setup lockstat */ snprintf(name, sizeof(name), "lockstat"); phba->debug_lockstat = debugfs_create_file(name, S_IFREG | 0644, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_lockstat); if (IS_ERR(phba->debug_lockstat)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "4610 Can't create debugfs lockstat\n"); goto debug_failed; } #endif /* Setup dumpHBASlim */ if (phba->sli_rev < LPFC_SLI_REV4) { snprintf(name, sizeof(name), "dumpHBASlim"); phba->debug_dumpHBASlim = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dumpHBASlim); } else phba->debug_dumpHBASlim = NULL; /* Setup dumpHostSlim */ if (phba->sli_rev < LPFC_SLI_REV4) { snprintf(name, sizeof(name), "dumpHostSlim"); phba->debug_dumpHostSlim = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dumpHostSlim); } else phba->debug_dumpHostSlim = NULL; /* Setup DIF Error Injections */ snprintf(name, sizeof(name), "InjErrLBA"); phba->debug_InjErrLBA = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; snprintf(name, sizeof(name), "InjErrNPortID"); phba->debug_InjErrNPortID = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); snprintf(name, sizeof(name), "InjErrWWPN"); phba->debug_InjErrWWPN = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); snprintf(name, sizeof(name), "writeGuardInjErr"); phba->debug_writeGuard = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); snprintf(name, sizeof(name), "writeAppInjErr"); phba->debug_writeApp = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); snprintf(name, sizeof(name), "writeRefInjErr"); phba->debug_writeRef = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); snprintf(name, sizeof(name), "readGuardInjErr"); phba->debug_readGuard = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); snprintf(name, sizeof(name), "readAppInjErr"); phba->debug_readApp = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); snprintf(name, sizeof(name), "readRefInjErr"); phba->debug_readRef = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); /* Setup slow ring trace */ if (lpfc_debugfs_max_slow_ring_trc) { num = lpfc_debugfs_max_slow_ring_trc - 1; if (num & lpfc_debugfs_max_slow_ring_trc) { /* Change to be a power of 2 */ num = lpfc_debugfs_max_slow_ring_trc; i = 0; while (num > 1) { num = num >> 1; i++; } lpfc_debugfs_max_slow_ring_trc = (1 << i); pr_err("lpfc_debugfs_max_disc_trc changed to " "%d\n", lpfc_debugfs_max_disc_trc); } } snprintf(name, sizeof(name), "slow_ring_trace"); phba->debug_slow_ring_trc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_slow_ring_trc); if (!phba->slow_ring_trc) { phba->slow_ring_trc = kcalloc( lpfc_debugfs_max_slow_ring_trc, sizeof(struct lpfc_debugfs_trc), GFP_KERNEL); if (!phba->slow_ring_trc) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0416 Cannot create debugfs " "slow_ring buffer\n"); goto debug_failed; } atomic_set(&phba->slow_ring_trc_cnt, 0); } snprintf(name, sizeof(name), "nvmeio_trc"); phba->debug_nvmeio_trc = debugfs_create_file(name, 0644, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_nvmeio_trc); atomic_set(&phba->nvmeio_trc_cnt, 0); if (lpfc_debugfs_max_nvmeio_trc) { num = lpfc_debugfs_max_nvmeio_trc - 1; if (num & lpfc_debugfs_max_disc_trc) { /* Change to be a power of 2 */ num = lpfc_debugfs_max_nvmeio_trc; i = 0; while (num > 1) { num = num >> 1; i++; } lpfc_debugfs_max_nvmeio_trc = (1 << i); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0575 lpfc_debugfs_max_nvmeio_trc " "changed to %d\n", lpfc_debugfs_max_nvmeio_trc); } phba->nvmeio_trc_size = lpfc_debugfs_max_nvmeio_trc; /* Allocate trace buffer and initialize */ phba->nvmeio_trc = kzalloc( (sizeof(struct lpfc_debugfs_nvmeio_trc) * phba->nvmeio_trc_size), GFP_KERNEL); if (!phba->nvmeio_trc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0576 Cannot create debugfs " "nvmeio_trc buffer\n"); goto nvmeio_off; } phba->nvmeio_trc_on = 1; phba->nvmeio_trc_output_idx = 0; phba->nvmeio_trc = NULL; } else { nvmeio_off: phba->nvmeio_trc_size = 0; phba->nvmeio_trc_on = 0; phba->nvmeio_trc_output_idx = 0; phba->nvmeio_trc = NULL; } } snprintf(name, sizeof(name), "vport%d", vport->vpi); if (!vport->vport_debugfs_root) { vport->vport_debugfs_root = debugfs_create_dir(name, phba->hba_debugfs_root); atomic_inc(&phba->debugfs_vport_count); } if (lpfc_debugfs_max_disc_trc) { num = lpfc_debugfs_max_disc_trc - 1; if (num & lpfc_debugfs_max_disc_trc) { /* Change to be a power of 2 */ num = lpfc_debugfs_max_disc_trc; i = 0; while (num > 1) { num = num >> 1; i++; } lpfc_debugfs_max_disc_trc = (1 << i); pr_err("lpfc_debugfs_max_disc_trc changed to %d\n", lpfc_debugfs_max_disc_trc); } } vport->disc_trc = kzalloc( (sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_disc_trc), GFP_KERNEL); if (!vport->disc_trc) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0418 Cannot create debugfs disc trace " "buffer\n"); goto debug_failed; } atomic_set(&vport->disc_trc_cnt, 0); snprintf(name, sizeof(name), "discovery_trace"); vport->debug_disc_trc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_disc_trc); snprintf(name, sizeof(name), "nodelist"); vport->debug_nodelist = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_nodelist); snprintf(name, sizeof(name), "nvmestat"); vport->debug_nvmestat = debugfs_create_file(name, 0644, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_nvmestat); snprintf(name, sizeof(name), "scsistat"); vport->debug_scsistat = debugfs_create_file(name, 0644, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_scsistat); if (IS_ERR(vport->debug_scsistat)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "4611 Cannot create debugfs scsistat\n"); goto debug_failed; } snprintf(name, sizeof(name), "ioktime"); vport->debug_ioktime = debugfs_create_file(name, 0644, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_ioktime); if (IS_ERR(vport->debug_ioktime)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0815 Cannot create debugfs ioktime\n"); goto debug_failed; } snprintf(name, sizeof(name), "hdwqstat"); vport->debug_hdwqstat = debugfs_create_file(name, 0644, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_hdwqstat); /* * The following section is for additional directories/files for the * physical port. */ if (!pport_setup) goto debug_failed; /* * iDiag debugfs root entry points for SLI4 device only */ if (phba->sli_rev < LPFC_SLI_REV4) goto debug_failed; snprintf(name, sizeof(name), "iDiag"); if (!phba->idiag_root) { phba->idiag_root = debugfs_create_dir(name, phba->hba_debugfs_root); /* Initialize iDiag data structure */ memset(&idiag, 0, sizeof(idiag)); } /* iDiag read PCI config space */ snprintf(name, sizeof(name), "pciCfg"); if (!phba->idiag_pci_cfg) { phba->idiag_pci_cfg = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_pciCfg); idiag.offset.last_rd = 0; } /* iDiag PCI BAR access */ snprintf(name, sizeof(name), "barAcc"); if (!phba->idiag_bar_acc) { phba->idiag_bar_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_barAcc); idiag.offset.last_rd = 0; } /* iDiag get PCI function queue information */ snprintf(name, sizeof(name), "queInfo"); if (!phba->idiag_que_info) { phba->idiag_que_info = debugfs_create_file(name, S_IFREG|S_IRUGO, phba->idiag_root, phba, &lpfc_idiag_op_queInfo); } /* iDiag access PCI function queue */ snprintf(name, sizeof(name), "queAcc"); if (!phba->idiag_que_acc) { phba->idiag_que_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_queAcc); } /* iDiag access PCI function doorbell registers */ snprintf(name, sizeof(name), "drbAcc"); if (!phba->idiag_drb_acc) { phba->idiag_drb_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_drbAcc); } /* iDiag access PCI function control registers */ snprintf(name, sizeof(name), "ctlAcc"); if (!phba->idiag_ctl_acc) { phba->idiag_ctl_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc); } /* iDiag access mbox commands */ snprintf(name, sizeof(name), "mbxAcc"); if (!phba->idiag_mbx_acc) { phba->idiag_mbx_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc); } /* iDiag extents access commands */ if (phba->sli4_hba.extents_in_use) { snprintf(name, sizeof(name), "extAcc"); if (!phba->idiag_ext_acc) { phba->idiag_ext_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_extAcc); } } debug_failed: return; #endif } /** * lpfc_debugfs_terminate - Tear down debugfs infrastructure for this vport * @vport: The vport pointer to remove from debugfs. * * Description: * When Debugfs is configured this routine removes debugfs file system elements * that are specific to this vport. It also checks to see if there are any * users left for the debugfs directories associated with the HBA and driver. If * this is the last user of the HBA directory or driver directory then it will * remove those from the debugfs infrastructure as well. **/ inline void lpfc_debugfs_terminate(struct lpfc_vport *vport) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct lpfc_hba *phba = vport->phba; kfree(vport->disc_trc); vport->disc_trc = NULL; debugfs_remove(vport->debug_disc_trc); /* discovery_trace */ vport->debug_disc_trc = NULL; debugfs_remove(vport->debug_nodelist); /* nodelist */ vport->debug_nodelist = NULL; debugfs_remove(vport->debug_nvmestat); /* nvmestat */ vport->debug_nvmestat = NULL; debugfs_remove(vport->debug_scsistat); /* scsistat */ vport->debug_scsistat = NULL; debugfs_remove(vport->debug_ioktime); /* ioktime */ vport->debug_ioktime = NULL; debugfs_remove(vport->debug_hdwqstat); /* hdwqstat */ vport->debug_hdwqstat = NULL; if (vport->vport_debugfs_root) { debugfs_remove(vport->vport_debugfs_root); /* vportX */ vport->vport_debugfs_root = NULL; atomic_dec(&phba->debugfs_vport_count); } if (atomic_read(&phba->debugfs_vport_count) == 0) { debugfs_remove(phba->debug_multixri_pools); /* multixripools*/ phba->debug_multixri_pools = NULL; debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ phba->debug_hbqinfo = NULL; debugfs_remove(phba->debug_cgn_buffer); phba->debug_cgn_buffer = NULL; debugfs_remove(phba->debug_rx_monitor); phba->debug_rx_monitor = NULL; debugfs_remove(phba->debug_ras_log); phba->debug_ras_log = NULL; #ifdef LPFC_HDWQ_LOCK_STAT debugfs_remove(phba->debug_lockstat); /* lockstat */ phba->debug_lockstat = NULL; #endif debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ phba->debug_dumpHBASlim = NULL; debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ phba->debug_dumpHostSlim = NULL; debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ phba->debug_InjErrLBA = NULL; debugfs_remove(phba->debug_InjErrNPortID); phba->debug_InjErrNPortID = NULL; debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */ phba->debug_InjErrWWPN = NULL; debugfs_remove(phba->debug_writeGuard); /* writeGuard */ phba->debug_writeGuard = NULL; debugfs_remove(phba->debug_writeApp); /* writeApp */ phba->debug_writeApp = NULL; debugfs_remove(phba->debug_writeRef); /* writeRef */ phba->debug_writeRef = NULL; debugfs_remove(phba->debug_readGuard); /* readGuard */ phba->debug_readGuard = NULL; debugfs_remove(phba->debug_readApp); /* readApp */ phba->debug_readApp = NULL; debugfs_remove(phba->debug_readRef); /* readRef */ phba->debug_readRef = NULL; kfree(phba->slow_ring_trc); phba->slow_ring_trc = NULL; /* slow_ring_trace */ debugfs_remove(phba->debug_slow_ring_trc); phba->debug_slow_ring_trc = NULL; debugfs_remove(phba->debug_nvmeio_trc); phba->debug_nvmeio_trc = NULL; kfree(phba->nvmeio_trc); phba->nvmeio_trc = NULL; /* * iDiag release */ if (phba->sli_rev == LPFC_SLI_REV4) { /* iDiag extAcc */ debugfs_remove(phba->idiag_ext_acc); phba->idiag_ext_acc = NULL; /* iDiag mbxAcc */ debugfs_remove(phba->idiag_mbx_acc); phba->idiag_mbx_acc = NULL; /* iDiag ctlAcc */ debugfs_remove(phba->idiag_ctl_acc); phba->idiag_ctl_acc = NULL; /* iDiag drbAcc */ debugfs_remove(phba->idiag_drb_acc); phba->idiag_drb_acc = NULL; /* iDiag queAcc */ debugfs_remove(phba->idiag_que_acc); phba->idiag_que_acc = NULL; /* iDiag queInfo */ debugfs_remove(phba->idiag_que_info); phba->idiag_que_info = NULL; /* iDiag barAcc */ debugfs_remove(phba->idiag_bar_acc); phba->idiag_bar_acc = NULL; /* iDiag pciCfg */ debugfs_remove(phba->idiag_pci_cfg); phba->idiag_pci_cfg = NULL; /* Finally remove the iDiag debugfs root */ debugfs_remove(phba->idiag_root); phba->idiag_root = NULL; } if (phba->hba_debugfs_root) { debugfs_remove(phba->hba_debugfs_root); /* fnX */ phba->hba_debugfs_root = NULL; atomic_dec(&lpfc_debugfs_hba_count); } if (atomic_read(&lpfc_debugfs_hba_count) == 0) { debugfs_remove(lpfc_debugfs_root); /* lpfc */ lpfc_debugfs_root = NULL; } } #endif return; } /* * Driver debug utility routines outside of debugfs. The debug utility * routines implemented here is intended to be used in the instrumented * debug driver for debugging host or port issues. */ /** * lpfc_debug_dump_all_queues - dump all the queues with a hba * @phba: Pointer to HBA context object. * * This function dumps entries of all the queues asociated with the @phba. **/ void lpfc_debug_dump_all_queues(struct lpfc_hba *phba) { int idx; /* * Dump Work Queues (WQs) */ lpfc_debug_dump_wq(phba, DUMP_MBX, 0); lpfc_debug_dump_wq(phba, DUMP_ELS, 0); lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0); for (idx = 0; idx < phba->cfg_hdw_queue; idx++) lpfc_debug_dump_wq(phba, DUMP_IO, idx); lpfc_debug_dump_hdr_rq(phba); lpfc_debug_dump_dat_rq(phba); /* * Dump Complete Queues (CQs) */ lpfc_debug_dump_cq(phba, DUMP_MBX, 0); lpfc_debug_dump_cq(phba, DUMP_ELS, 0); lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0); for (idx = 0; idx < phba->cfg_hdw_queue; idx++) lpfc_debug_dump_cq(phba, DUMP_IO, idx); /* * Dump Event Queues (EQs) */ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) lpfc_debug_dump_hba_eq(phba, idx); }
linux-master
drivers/scsi/lpfc/lpfc_debugfs.c
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ /* * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS */ #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/utsname.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc.h" #include "lpfc_scsi.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_version.h" #include "lpfc_vport.h" #include "lpfc_debugfs.h" /* FDMI Port Speed definitions - FC-GS-7 */ #define HBA_PORTSPEED_1GFC 0x00000001 /* 1G FC */ #define HBA_PORTSPEED_2GFC 0x00000002 /* 2G FC */ #define HBA_PORTSPEED_4GFC 0x00000008 /* 4G FC */ #define HBA_PORTSPEED_10GFC 0x00000004 /* 10G FC */ #define HBA_PORTSPEED_8GFC 0x00000010 /* 8G FC */ #define HBA_PORTSPEED_16GFC 0x00000020 /* 16G FC */ #define HBA_PORTSPEED_32GFC 0x00000040 /* 32G FC */ #define HBA_PORTSPEED_20GFC 0x00000080 /* 20G FC */ #define HBA_PORTSPEED_40GFC 0x00000100 /* 40G FC */ #define HBA_PORTSPEED_128GFC 0x00000200 /* 128G FC */ #define HBA_PORTSPEED_64GFC 0x00000400 /* 64G FC */ #define HBA_PORTSPEED_256GFC 0x00000800 /* 256G FC */ #define HBA_PORTSPEED_UNKNOWN 0x00008000 /* Unknown */ #define HBA_PORTSPEED_10GE 0x00010000 /* 10G E */ #define HBA_PORTSPEED_40GE 0x00020000 /* 40G E */ #define HBA_PORTSPEED_100GE 0x00040000 /* 100G E */ #define HBA_PORTSPEED_25GE 0x00080000 /* 25G E */ #define HBA_PORTSPEED_50GE 0x00100000 /* 50G E */ #define HBA_PORTSPEED_400GE 0x00200000 /* 400G E */ #define FOURBYTES 4 static char *lpfc_release_version = LPFC_DRIVER_VERSION; static void lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb); static void lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, struct lpfc_dmabuf *mp, uint32_t size) { if (!mp) { lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "0146 Ignoring unsolicited CT No HBQ " "status = x%x\n", get_job_ulpstatus(phba, piocbq)); } lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "0145 Ignoring unsolicited CT HBQ Size:%d " "status = x%x\n", size, get_job_ulpstatus(phba, piocbq)); } static void lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, struct lpfc_dmabuf *mp, uint32_t size) { lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size); } /** * lpfc_ct_unsol_cmpl : Completion callback function for unsol ct commands * @phba : pointer to lpfc hba data structure. * @cmdiocb : pointer to lpfc command iocb data structure. * @rspiocb : pointer to lpfc response iocb data structure. * * This routine is the callback function for issuing unsol ct reject command. * The memory allocated in the reject command path is freed up here. **/ static void lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_nodelist *ndlp; struct lpfc_dmabuf *mp, *bmp; ndlp = cmdiocb->ndlp; if (ndlp) lpfc_nlp_put(ndlp); mp = cmdiocb->rsp_dmabuf; bmp = cmdiocb->bpl_dmabuf; if (mp) { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); cmdiocb->rsp_dmabuf = NULL; } if (bmp) { lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); cmdiocb->bpl_dmabuf = NULL; } lpfc_sli_release_iocbq(phba, cmdiocb); } /** * lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands * @ndlp: pointer to a node-list data structure. * @ct_req: pointer to the CT request data structure. * @ulp_context: context of received UNSOL CT command * @ox_id: ox_id of the UNSOL CT command * * This routine is invoked by the lpfc_ct_handle_mibreq routine for sending * a reject response. Reject response is sent for the unhandled commands. **/ static void lpfc_ct_reject_event(struct lpfc_nodelist *ndlp, struct lpfc_sli_ct_request *ct_req, u16 ulp_context, u16 ox_id) { struct lpfc_vport *vport = ndlp->vport; struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ct_request *ct_rsp; struct lpfc_iocbq *cmdiocbq = NULL; struct lpfc_dmabuf *bmp = NULL; struct lpfc_dmabuf *mp = NULL; struct ulp_bde64 *bpl; u8 rc = 0; u32 tmo; /* fill in BDEs for command */ mp = kmalloc(sizeof(*mp), GFP_KERNEL); if (!mp) { rc = 1; goto ct_exit; } mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp->phys); if (!mp->virt) { rc = 2; goto ct_free_mp; } /* Allocate buffer for Buffer ptr list */ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); if (!bmp) { rc = 3; goto ct_free_mpvirt; } bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &bmp->phys); if (!bmp->virt) { rc = 4; goto ct_free_bmp; } INIT_LIST_HEAD(&mp->list); INIT_LIST_HEAD(&bmp->list); bpl = (struct ulp_bde64 *)bmp->virt; memset(bpl, 0, sizeof(struct ulp_bde64)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4); bpl->tus.w = le32_to_cpu(bpl->tus.w); ct_rsp = (struct lpfc_sli_ct_request *)mp->virt; memset(ct_rsp, 0, sizeof(struct lpfc_sli_ct_request)); ct_rsp->RevisionId.bits.Revision = SLI_CT_REVISION; ct_rsp->RevisionId.bits.InId = 0; ct_rsp->FsType = ct_req->FsType; ct_rsp->FsSubType = ct_req->FsSubType; ct_rsp->CommandResponse.bits.Size = 0; ct_rsp->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CT_RESPONSE_FS_RJT); ct_rsp->ReasonCode = SLI_CT_REQ_NOT_SUPPORTED; ct_rsp->Explanation = SLI_CT_NO_ADDITIONAL_EXPL; cmdiocbq = lpfc_sli_get_iocbq(phba); if (!cmdiocbq) { rc = 5; goto ct_free_bmpvirt; } if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], ox_id, 1, FC_RCTL_DD_SOL_CTL, 1, CMD_XMIT_SEQUENCE64_WQE); } else { lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, 0, ulp_context, 1, FC_RCTL_DD_SOL_CTL, 1, CMD_XMIT_SEQUENCE64_CX); } /* Save for completion so we can release these resources */ cmdiocbq->rsp_dmabuf = mp; cmdiocbq->bpl_dmabuf = bmp; cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl; tmo = (3 * phba->fc_ratov); cmdiocbq->retry = 0; cmdiocbq->vport = vport; cmdiocbq->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; cmdiocbq->ndlp = lpfc_nlp_get(ndlp); if (!cmdiocbq->ndlp) goto ct_no_ndlp; rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); if (rc) { lpfc_nlp_put(ndlp); goto ct_no_ndlp; } return; ct_no_ndlp: rc = 6; lpfc_sli_release_iocbq(phba, cmdiocbq); ct_free_bmpvirt: lpfc_mbuf_free(phba, bmp->virt, bmp->phys); ct_free_bmp: kfree(bmp); ct_free_mpvirt: lpfc_mbuf_free(phba, mp->virt, mp->phys); ct_free_mp: kfree(mp); ct_exit: lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "6440 Unsol CT: Rsp err %d Data: x%x\n", rc, vport->fc_flag); } /** * lpfc_ct_handle_mibreq - Process an unsolicited CT MIB request data buffer * @phba: pointer to lpfc hba data structure. * @ctiocbq: pointer to lpfc CT command iocb data structure. * * This routine is used for processing the IOCB associated with a unsolicited * CT MIB request. It first determines whether there is an existing ndlp that * matches the DID from the unsolicited IOCB. If not, it will return. **/ static void lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) { struct lpfc_sli_ct_request *ct_req; struct lpfc_nodelist *ndlp = NULL; struct lpfc_vport *vport = ctiocbq->vport; u32 ulp_status = get_job_ulpstatus(phba, ctiocbq); u32 ulp_word4 = get_job_word4(phba, ctiocbq); u32 did; u16 mi_cmd; did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp); if (ulp_status) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "6438 Unsol CT: status:x%x/x%x did : x%x\n", ulp_status, ulp_word4, did); return; } /* Ignore traffic received during vport shutdown */ if (vport->fc_flag & FC_UNLOADING) return; ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "6439 Unsol CT: NDLP Not Found for DID : x%x", did); return; } ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt; mi_cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "6442 : MI Cmd : x%x Not Supported\n", mi_cmd); lpfc_ct_reject_event(ndlp, ct_req, bf_get(wqe_ctxt_tag, &ctiocbq->wqe.xmit_els_rsp.wqe_com), bf_get(wqe_rcvoxid, &ctiocbq->wqe.xmit_els_rsp.wqe_com)); } /** * lpfc_ct_unsol_event - Process an unsolicited event from a ct sli ring * @phba: pointer to lpfc hba data structure. * @pring: pointer to a SLI ring. * @ctiocbq: pointer to lpfc ct iocb data structure. * * This routine is used to process an unsolicited event received from a SLI * (Service Level Interface) ring. The actual processing of the data buffer * associated with the unsolicited event is done by invoking appropriate routine * after properly set up the iocb buffer from the SLI ring on which the * unsolicited event was received. **/ void lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *ctiocbq) { struct lpfc_dmabuf *mp = NULL; IOCB_t *icmd = &ctiocbq->iocb; int i; struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocb; dma_addr_t dma_addr; uint32_t size; struct list_head head; struct lpfc_sli_ct_request *ct_req; struct lpfc_dmabuf *bdeBuf1 = ctiocbq->cmd_dmabuf; struct lpfc_dmabuf *bdeBuf2 = ctiocbq->bpl_dmabuf; u32 status, parameter, bde_count = 0; struct lpfc_wcqe_complete *wcqe_cmpl = NULL; ctiocbq->cmd_dmabuf = NULL; ctiocbq->rsp_dmabuf = NULL; ctiocbq->bpl_dmabuf = NULL; wcqe_cmpl = &ctiocbq->wcqe_cmpl; status = get_job_ulpstatus(phba, ctiocbq); parameter = get_job_word4(phba, ctiocbq); if (phba->sli_rev == LPFC_SLI_REV4) bde_count = wcqe_cmpl->word3; else bde_count = icmd->ulpBdeCount; if (unlikely(status == IOSTAT_NEED_BUFFER)) { lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); } else if ((status == IOSTAT_LOCAL_REJECT) && ((parameter & IOERR_PARAM_MASK) == IOERR_RCV_BUFFER_WAITING)) { /* Not enough posted buffers; Try posting more buffers */ phba->fc_stat.NoRcvBuf++; if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) lpfc_sli3_post_buffer(phba, pring, 2); return; } /* If there are no BDEs associated * with this IOCB, there is nothing to do. */ if (bde_count == 0) return; ctiocbq->cmd_dmabuf = bdeBuf1; if (bde_count == 2) ctiocbq->bpl_dmabuf = bdeBuf2; ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt; if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE && ct_req->FsSubType == SLI_CT_MIB_Subtypes) { lpfc_ct_handle_mibreq(phba, ctiocbq); } else { if (!lpfc_bsg_ct_unsol_event(phba, pring, ctiocbq)) return; } if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { INIT_LIST_HEAD(&head); list_add_tail(&head, &ctiocbq->list); list_for_each_entry(iocb, &head, list) { if (phba->sli_rev == LPFC_SLI_REV4) bde_count = iocb->wcqe_cmpl.word3; else bde_count = iocb->iocb.ulpBdeCount; if (!bde_count) continue; bdeBuf1 = iocb->cmd_dmabuf; iocb->cmd_dmabuf = NULL; if (phba->sli_rev == LPFC_SLI_REV4) size = iocb->wqe.gen_req.bde.tus.f.bdeSize; else size = iocb->iocb.un.cont64[0].tus.f.bdeSize; lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size); lpfc_in_buf_free(phba, bdeBuf1); if (bde_count == 2) { bdeBuf2 = iocb->bpl_dmabuf; iocb->bpl_dmabuf = NULL; if (phba->sli_rev == LPFC_SLI_REV4) size = iocb->unsol_rcv_len; else size = iocb->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize; lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2, size); lpfc_in_buf_free(phba, bdeBuf2); } } list_del(&head); } else { INIT_LIST_HEAD(&head); list_add_tail(&head, &ctiocbq->list); list_for_each_entry(iocbq, &head, list) { icmd = &iocbq->iocb; if (icmd->ulpBdeCount == 0) lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0); for (i = 0; i < icmd->ulpBdeCount; i++) { dma_addr = getPaddr(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow); mp = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); size = icmd->un.cont64[i].tus.f.bdeSize; lpfc_ct_unsol_buffer(phba, iocbq, mp, size); lpfc_in_buf_free(phba, mp); } lpfc_sli3_post_buffer(phba, pring, i); } list_del(&head); } } /** * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler * @phba: Pointer to HBA context object. * @dmabuf: pointer to a dmabuf that describes the FC sequence * * This function serves as the upper level protocol abort handler for CT * protocol. * * Return 1 if abort has been handled, 0 otherwise. **/ int lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) { int handled; /* CT upper level goes through BSG */ handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf); return handled; } static void lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) { struct lpfc_dmabuf *mlast, *next_mlast; list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) { list_del(&mlast->list); lpfc_mbuf_free(phba, mlast->virt, mlast->phys); kfree(mlast); } lpfc_mbuf_free(phba, mlist->virt, mlist->phys); kfree(mlist); return; } static struct lpfc_dmabuf * lpfc_alloc_ct_rsp(struct lpfc_hba *phba, __be16 cmdcode, struct ulp_bde64 *bpl, uint32_t size, int *entries) { struct lpfc_dmabuf *mlist = NULL; struct lpfc_dmabuf *mp; int cnt, i = 0; /* We get chunks of FCELSSIZE */ cnt = size > FCELSSIZE ? FCELSSIZE: size; while (size) { /* Allocate buffer for rsp payload */ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { if (mlist) lpfc_free_ct_rsp(phba, mlist); return NULL; } INIT_LIST_HEAD(&mp->list); if (be16_to_cpu(cmdcode) == SLI_CTNS_GID_FT || be16_to_cpu(cmdcode) == SLI_CTNS_GFF_ID) mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); else mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys)); if (!mp->virt) { kfree(mp); if (mlist) lpfc_free_ct_rsp(phba, mlist); return NULL; } /* Queue it to a linked list */ if (!mlist) mlist = mp; else list_add_tail(&mp->list, &mlist->list); bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; /* build buffer ptr list for IOCB */ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); bpl->tus.f.bdeSize = (uint16_t) cnt; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; i++; size -= cnt; } *entries = i; return mlist; } int lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb) { struct lpfc_dmabuf *buf_ptr; /* IOCBQ job structure gets cleaned during release. Just release * the dma buffers here. */ if (ctiocb->cmd_dmabuf) { buf_ptr = ctiocb->cmd_dmabuf; lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); ctiocb->cmd_dmabuf = NULL; } if (ctiocb->rsp_dmabuf) { lpfc_free_ct_rsp(phba, ctiocb->rsp_dmabuf); ctiocb->rsp_dmabuf = NULL; } if (ctiocb->bpl_dmabuf) { buf_ptr = ctiocb->bpl_dmabuf; lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); ctiocb->bpl_dmabuf = NULL; } lpfc_sli_release_iocbq(phba, ctiocb); return 0; } /* * lpfc_gen_req - Build and issue a GEN_REQUEST command to the SLI Layer * @vport: pointer to a host virtual N_Port data structure. * @bmp: Pointer to BPL for SLI command * @inp: Pointer to data buffer for response data. * @outp: Pointer to data buffer that hold the CT command. * @cmpl: completion routine to call when command completes * @ndlp: Destination NPort nodelist entry * * This function as the final part for issuing a CT command. */ static int lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp, void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *), struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry, uint32_t tmo, uint8_t retry) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *geniocb; int rc; u16 ulp_context; /* Allocate buffer for command iocb */ geniocb = lpfc_sli_get_iocbq(phba); if (geniocb == NULL) return 1; /* Update the num_entry bde count */ geniocb->num_bdes = num_entry; geniocb->bpl_dmabuf = bmp; /* Save for completion so we can release these resources */ geniocb->cmd_dmabuf = inp; geniocb->rsp_dmabuf = outp; geniocb->event_tag = event_tag; if (!tmo) { /* FC spec states we need 3 * ratov for CT requests */ tmo = (3 * phba->fc_ratov); } if (phba->sli_rev == LPFC_SLI_REV4) ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; else ulp_context = ndlp->nlp_rpi; lpfc_sli_prep_gen_req(phba, geniocb, bmp, ulp_context, num_entry, tmo); /* Issue GEN REQ IOCB for NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0119 Issue GEN REQ IOCB to NPORT x%x " "Data: x%x x%x\n", ndlp->nlp_DID, geniocb->iotag, vport->port_state); geniocb->cmd_cmpl = cmpl; geniocb->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; geniocb->vport = vport; geniocb->retry = retry; geniocb->ndlp = lpfc_nlp_get(ndlp); if (!geniocb->ndlp) goto out; rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0); if (rc == IOCB_ERROR) { lpfc_nlp_put(ndlp); goto out; } return 0; out: lpfc_sli_release_iocbq(phba, geniocb); return 1; } /* * lpfc_ct_cmd - Build and issue a CT command * @vport: pointer to a host virtual N_Port data structure. * @inmp: Pointer to data buffer for response data. * @bmp: Pointer to BPL for SLI command * @ndlp: Destination NPort nodelist entry * @cmpl: completion routine to call when command completes * * This function is called for issuing a CT command. */ static int lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp, struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp, void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *), uint32_t rsp_size, uint8_t retry) { struct lpfc_hba *phba = vport->phba; struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt; struct lpfc_dmabuf *outmp; int cnt = 0, status; __be16 cmdcode = ((struct lpfc_sli_ct_request *)inmp->virt)-> CommandResponse.bits.CmdRsp; bpl++; /* Skip past ct request */ /* Put buffer(s) for ct rsp in bpl */ outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt); if (!outmp) return -ENOMEM; /* * Form the CT IOCB. The total number of BDEs in this IOCB * is the single command plus response count from * lpfc_alloc_ct_rsp. */ cnt += 1; status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, phba->fc_eventTag, cnt, 0, retry); if (status) { lpfc_free_ct_rsp(phba, outmp); return -ENOMEM; } return 0; } struct lpfc_vport * lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) { struct lpfc_vport *vport_curr; unsigned long flags; spin_lock_irqsave(&phba->port_list_lock, flags); list_for_each_entry(vport_curr, &phba->port_list, listentry) { if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) { spin_unlock_irqrestore(&phba->port_list_lock, flags); return vport_curr; } } spin_unlock_irqrestore(&phba->port_list_lock, flags); return NULL; } static void lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) { struct lpfc_nodelist *ndlp; if ((vport->port_type != LPFC_NPIV_PORT) || !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) { ndlp = lpfc_setup_disc_node(vport, Did); if (ndlp) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "Parse GID_FTrsp: did:x%x flg:x%x x%x", Did, ndlp->nlp_flag, vport->fc_flag); /* By default, the driver expects to support FCP FC4 */ if (fc4_type == FC_TYPE_FCP) ndlp->nlp_fc4_type |= NLP_FC4_FCP; if (fc4_type == FC_TYPE_NVME) ndlp->nlp_fc4_type |= NLP_FC4_NVME; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0238 Process x%06x NameServer Rsp " "Data: x%x x%x x%x x%x x%x\n", Did, ndlp->nlp_flag, ndlp->nlp_fc4_type, ndlp->nlp_state, vport->fc_flag, vport->fc_rscn_id_cnt); /* if ndlp needs to be discovered and prior * state of ndlp hit devloss, change state to * allow rediscovery. */ if (ndlp->nlp_flag & NLP_NPR_2B_DISC && ndlp->nlp_state == NLP_STE_UNUSED_NODE) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } } else { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d", Did, vport->fc_flag, vport->fc_rscn_id_cnt); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0239 Skip x%06x NameServer Rsp " "Data: x%x x%x x%px\n", Did, vport->fc_flag, vport->fc_rscn_id_cnt, ndlp); } } else { if (!(vport->fc_flag & FC_RSCN_MODE) || lpfc_rscn_payload_check(vport, Did)) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "Query GID_FTrsp: did:x%x flg:x%x cnt:%d", Did, vport->fc_flag, vport->fc_rscn_id_cnt); /* * This NPortID was previously a FCP/NVMe target, * Don't even bother to send GFF_ID. */ ndlp = lpfc_findnode_did(vport, Did); if (ndlp && (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))) { if (fc4_type == FC_TYPE_FCP) ndlp->nlp_fc4_type |= NLP_FC4_FCP; if (fc4_type == FC_TYPE_NVME) ndlp->nlp_fc4_type |= NLP_FC4_NVME; lpfc_setup_disc_node(vport, Did); } else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, 0, Did) == 0) vport->num_disc_nodes++; else lpfc_setup_disc_node(vport, Did); } else { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d", Did, vport->fc_flag, vport->fc_rscn_id_cnt); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0245 Skip x%06x NameServer Rsp " "Data: x%x x%x\n", Did, vport->fc_flag, vport->fc_rscn_id_cnt); } } } static void lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) { struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL; char *str; if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) str = "GID_FT"; else str = "GID_PT"; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6430 Process %s rsp for %08x type %x %s %s\n", str, Did, fc4_type, (fc4_type == FC_TYPE_FCP) ? "FCP" : " ", (fc4_type == FC_TYPE_NVME) ? "NVME" : " "); /* * To conserve rpi's, filter out addresses for other * vports on the same physical HBAs. */ if (Did != vport->fc_myDID && (!lpfc_find_vport_by_did(phba, Did) || vport->cfg_peer_port_login)) { if (!phba->nvmet_support) { /* FCPI/NVMEI path. Process Did */ lpfc_prep_node_fc4type(vport, Did, fc4_type); return; } /* NVMET path. NVMET only cares about NVMEI nodes. */ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_type != NLP_NVME_INITIATOR || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) continue; spin_lock_irq(&ndlp->lock); if (ndlp->nlp_DID == Did) ndlp->nlp_flag &= ~NLP_NVMET_RECOV; else ndlp->nlp_flag |= NLP_NVMET_RECOV; spin_unlock_irq(&ndlp->lock); } } } static int lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, uint32_t Size) { struct lpfc_sli_ct_request *Response = (struct lpfc_sli_ct_request *) mp->virt; struct lpfc_dmabuf *mlast, *next_mp; uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; uint32_t Did, CTentry; int Cnt; struct list_head head; struct lpfc_nodelist *ndlp = NULL; lpfc_set_disctmo(vport); vport->num_disc_nodes = 0; vport->fc_ns_retry = 0; list_add_tail(&head, &mp->list); list_for_each_entry_safe(mp, next_mp, &head, list) { mlast = mp; Cnt = Size > FCELSSIZE ? FCELSSIZE : Size; Size -= Cnt; if (!ctptr) { ctptr = (uint32_t *) mlast->virt; } else Cnt -= 16; /* subtract length of CT header */ /* Loop through entire NameServer list of DIDs */ while (Cnt >= sizeof(uint32_t)) { /* Get next DID from NameServer List */ CTentry = *ctptr++; Did = ((be32_to_cpu(CTentry)) & Mask_DID); lpfc_ns_rsp_audit_did(vport, Did, fc4_type); if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY))) goto nsout1; Cnt -= sizeof(uint32_t); } ctptr = NULL; } /* All GID_FT entries processed. If the driver is running in * in target mode, put impacted nodes into recovery and drop * the RPI to flush outstanding IO. */ if (vport->phba->nvmet_support) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (!(ndlp->nlp_flag & NLP_NVMET_RECOV)) continue; lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NVMET_RECOV; spin_unlock_irq(&ndlp->lock); } } nsout1: list_del(&head); return 0; } static void lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_dmabuf *outp; struct lpfc_dmabuf *inp; struct lpfc_sli_ct_request *CTrsp; struct lpfc_sli_ct_request *CTreq; struct lpfc_nodelist *ndlp; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); int rc, type; /* First save ndlp, before we overwrite it */ ndlp = cmdiocb->ndlp; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->rsp_iocb = rspiocb; inp = cmdiocb->cmd_dmabuf; outp = cmdiocb->rsp_dmabuf; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT cmpl: status:x%x/x%x rtry:%d", ulp_status, ulp_word4, vport->fc_ns_retry); /* Ignore response if link flipped after this request was made */ if (cmdiocb->event_tag != phba->fc_eventTag) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "9043 Event tag mismatch. Ignoring NS rsp\n"); goto out; } /* Don't bother processing response if vport is being torn down. */ if (vport->load_flag & FC_UNLOADING) { if (vport->fc_flag & FC_RSCN_MODE) lpfc_els_flush_rscn(vport); goto out; } if (lpfc_els_chk_latt(vport)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0216 Link event during NS query\n"); if (vport->fc_flag & FC_RSCN_MODE) lpfc_els_flush_rscn(vport); lpfc_vport_set_state(vport, FC_VPORT_FAILED); goto out; } if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0226 NS query failed due to link event: " "ulp_status x%x ulp_word4 x%x fc_flag x%x " "port_state x%x gidft_inp x%x\n", ulp_status, ulp_word4, vport->fc_flag, vport->port_state, vport->gidft_inp); if (vport->fc_flag & FC_RSCN_MODE) lpfc_els_flush_rscn(vport); if (vport->gidft_inp) vport->gidft_inp--; goto out; } spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_RSCN_DEFERRED) { vport->fc_flag &= ~FC_RSCN_DEFERRED; spin_unlock_irq(shost->host_lock); /* This is a GID_FT completing so the gidft_inp counter was * incremented before the GID_FT was issued to the wire. */ if (vport->gidft_inp) vport->gidft_inp--; /* * Skip processing the NS response * Re-issue the NS cmd */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0151 Process Deferred RSCN Data: x%x x%x\n", vport->fc_flag, vport->fc_rscn_id_cnt); lpfc_els_handle_rscn(vport); goto out; } spin_unlock_irq(shost->host_lock); if (ulp_status) { /* Check for retry */ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { if (ulp_status != IOSTAT_LOCAL_REJECT || (ulp_word4 & IOERR_PARAM_MASK) != IOERR_NO_RESOURCES) vport->fc_ns_retry++; type = lpfc_get_gidft_type(vport, cmdiocb); if (type == 0) goto out; /* CT command is being retried */ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, vport->fc_ns_retry, type); if (rc == 0) goto out; else { /* Unable to send NS cmd */ if (vport->gidft_inp) vport->gidft_inp--; } } if (vport->fc_flag & FC_RSCN_MODE) lpfc_els_flush_rscn(vport); lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0257 GID_FT Query error: 0x%x 0x%x\n", ulp_status, vport->fc_ns_retry); } else { /* Good status, continue checking */ CTreq = (struct lpfc_sli_ct_request *) inp->virt; CTrsp = (struct lpfc_sli_ct_request *) outp->virt; if (CTrsp->CommandResponse.bits.CmdRsp == cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0208 NameServer Rsp Data: x%x x%x " "x%x x%x sz x%x\n", vport->fc_flag, CTreq->un.gid.Fc4Type, vport->num_disc_nodes, vport->gidft_inp, get_job_data_placed(phba, rspiocb)); lpfc_ns_rsp(vport, outp, CTreq->un.gid.Fc4Type, get_job_data_placed(phba, rspiocb)); } else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_RJT) { /* NameServer Rsp Error */ if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ) && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0269 No NameServer Entries " "Data: x%x x%x x%x x%x\n", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT no entry cmd:x%x rsn:x%x exp:x%x", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0240 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } } else { /* NameServer Rsp Error */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0241 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } if (vport->gidft_inp) vport->gidft_inp--; } lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4216 GID_FT cmpl inp %d disc %d\n", vport->gidft_inp, vport->num_disc_nodes); /* Link up / RSCN discovery */ if ((vport->num_disc_nodes == 0) && (vport->gidft_inp == 0)) { /* * The driver has cycled through all Nports in the RSCN payload. * Complete the handling by cleaning up and marking the * current driver state. */ if (vport->port_state >= LPFC_DISC_AUTH) { if (vport->fc_flag & FC_RSCN_MODE) { lpfc_els_flush_rscn(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */ spin_unlock_irq(shost->host_lock); } else lpfc_els_flush_rscn(vport); } lpfc_disc_start(vport); } out: lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); return; } static void lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_dmabuf *outp; struct lpfc_dmabuf *inp; struct lpfc_sli_ct_request *CTrsp; struct lpfc_sli_ct_request *CTreq; struct lpfc_nodelist *ndlp; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); int rc; /* First save ndlp, before we overwrite it */ ndlp = cmdiocb->ndlp; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->rsp_iocb = rspiocb; inp = cmdiocb->cmd_dmabuf; outp = cmdiocb->rsp_dmabuf; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_PT cmpl: status:x%x/x%x rtry:%d", ulp_status, ulp_word4, vport->fc_ns_retry); /* Ignore response if link flipped after this request was made */ if (cmdiocb->event_tag != phba->fc_eventTag) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "9044 Event tag mismatch. Ignoring NS rsp\n"); goto out; } /* Don't bother processing response if vport is being torn down. */ if (vport->load_flag & FC_UNLOADING) { if (vport->fc_flag & FC_RSCN_MODE) lpfc_els_flush_rscn(vport); goto out; } if (lpfc_els_chk_latt(vport)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4108 Link event during NS query\n"); if (vport->fc_flag & FC_RSCN_MODE) lpfc_els_flush_rscn(vport); lpfc_vport_set_state(vport, FC_VPORT_FAILED); goto out; } if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4166 NS query failed due to link event: " "ulp_status x%x ulp_word4 x%x fc_flag x%x " "port_state x%x gidft_inp x%x\n", ulp_status, ulp_word4, vport->fc_flag, vport->port_state, vport->gidft_inp); if (vport->fc_flag & FC_RSCN_MODE) lpfc_els_flush_rscn(vport); if (vport->gidft_inp) vport->gidft_inp--; goto out; } spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_RSCN_DEFERRED) { vport->fc_flag &= ~FC_RSCN_DEFERRED; spin_unlock_irq(shost->host_lock); /* This is a GID_PT completing so the gidft_inp counter was * incremented before the GID_PT was issued to the wire. */ if (vport->gidft_inp) vport->gidft_inp--; /* * Skip processing the NS response * Re-issue the NS cmd */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "4167 Process Deferred RSCN Data: x%x x%x\n", vport->fc_flag, vport->fc_rscn_id_cnt); lpfc_els_handle_rscn(vport); goto out; } spin_unlock_irq(shost->host_lock); if (ulp_status) { /* Check for retry */ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { if (ulp_status != IOSTAT_LOCAL_REJECT || (ulp_word4 & IOERR_PARAM_MASK) != IOERR_NO_RESOURCES) vport->fc_ns_retry++; /* CT command is being retried */ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, vport->fc_ns_retry, GID_PT_N_PORT); if (rc == 0) goto out; else { /* Unable to send NS cmd */ if (vport->gidft_inp) vport->gidft_inp--; } } if (vport->fc_flag & FC_RSCN_MODE) lpfc_els_flush_rscn(vport); lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "4103 GID_FT Query error: 0x%x 0x%x\n", ulp_status, vport->fc_ns_retry); } else { /* Good status, continue checking */ CTreq = (struct lpfc_sli_ct_request *)inp->virt; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4105 NameServer Rsp Data: x%x x%x " "x%x x%x sz x%x\n", vport->fc_flag, CTreq->un.gid.Fc4Type, vport->num_disc_nodes, vport->gidft_inp, get_job_data_placed(phba, rspiocb)); lpfc_ns_rsp(vport, outp, CTreq->un.gid.Fc4Type, get_job_data_placed(phba, rspiocb)); } else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_RJT) { /* NameServer Rsp Error */ if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ) && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) { lpfc_printf_vlog( vport, KERN_INFO, LOG_DISCOVERY, "4106 No NameServer Entries " "Data: x%x x%x x%x x%x\n", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc( vport, LPFC_DISC_TRC_CT, "GID_PT no entry cmd:x%x rsn:x%x exp:x%x", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } else { lpfc_printf_vlog( vport, KERN_INFO, LOG_DISCOVERY, "4107 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc( vport, LPFC_DISC_TRC_CT, "GID_PT rsp err1 cmd:x%x rsn:x%x exp:x%x", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } } else { /* NameServer Rsp Error */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "4109 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc( vport, LPFC_DISC_TRC_CT, "GID_PT rsp err2 cmd:x%x rsn:x%x exp:x%x", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } if (vport->gidft_inp) vport->gidft_inp--; } lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6450 GID_PT cmpl inp %d disc %d\n", vport->gidft_inp, vport->num_disc_nodes); /* Link up / RSCN discovery */ if ((vport->num_disc_nodes == 0) && (vport->gidft_inp == 0)) { /* * The driver has cycled through all Nports in the RSCN payload. * Complete the handling by cleaning up and marking the * current driver state. */ if (vport->port_state >= LPFC_DISC_AUTH) { if (vport->fc_flag & FC_RSCN_MODE) { lpfc_els_flush_rscn(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */ spin_unlock_irq(shost->host_lock); } else { lpfc_els_flush_rscn(vport); } } lpfc_disc_start(vport); } out: lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } static void lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; struct lpfc_sli_ct_request *CTrsp; int did, rc, retry; uint8_t fbits; struct lpfc_nodelist *ndlp = NULL, *free_ndlp = NULL; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId; did = be32_to_cpu(did); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GFF_ID cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, did); /* Ignore response if link flipped after this request was made */ if (cmdiocb->event_tag != phba->fc_eventTag) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "9045 Event tag mismatch. Ignoring NS rsp\n"); goto iocb_free; } if (ulp_status == IOSTAT_SUCCESS) { /* Good status, continue checking */ CTrsp = (struct lpfc_sli_ct_request *) outp->virt; fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET]; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6431 Process GFF_ID rsp for %08x " "fbits %02x %s %s\n", did, fbits, (fbits & FC4_FEATURE_INIT) ? "Initiator" : " ", (fbits & FC4_FEATURE_TARGET) ? "Target" : " "); if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) { if ((fbits & FC4_FEATURE_INIT) && !(fbits & FC4_FEATURE_TARGET)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0270 Skip x%x GFF " "NameServer Rsp Data: (init) " "x%x x%x\n", did, fbits, vport->fc_rscn_id_cnt); goto out; } } } else { /* Check for retry */ if (cmdiocb->retry < LPFC_MAX_NS_RETRY) { retry = 1; if (ulp_status == IOSTAT_LOCAL_REJECT) { switch ((ulp_word4 & IOERR_PARAM_MASK)) { case IOERR_NO_RESOURCES: /* We don't increment the retry * count for this case. */ break; case IOERR_LINK_DOWN: case IOERR_SLI_ABORTED: case IOERR_SLI_DOWN: retry = 0; break; default: cmdiocb->retry++; } } else cmdiocb->retry++; if (retry) { /* CT command is being retried */ rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, cmdiocb->retry, did); if (rc == 0) { /* success */ free_ndlp = cmdiocb->ndlp; lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(free_ndlp); return; } } } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0267 NameServer GFF Rsp " "x%x Error (%d %d) Data: x%x x%x\n", did, ulp_status, ulp_word4, vport->fc_flag, vport->fc_rscn_id_cnt); } /* This is a target port, unregistered port, or the GFF_ID failed */ ndlp = lpfc_setup_disc_node(vport, did); if (ndlp) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0242 Process x%x GFF " "NameServer Rsp Data: x%x x%x x%x\n", did, ndlp->nlp_flag, vport->fc_flag, vport->fc_rscn_id_cnt); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0243 Skip x%x GFF " "NameServer Rsp Data: x%x x%x\n", did, vport->fc_flag, vport->fc_rscn_id_cnt); } out: /* Link up / RSCN discovery */ if (vport->num_disc_nodes) vport->num_disc_nodes--; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6451 GFF_ID cmpl inp %d disc %d\n", vport->gidft_inp, vport->num_disc_nodes); if (vport->num_disc_nodes == 0) { /* * The driver has cycled through all Nports in the RSCN payload. * Complete the handling by cleaning up and marking the * current driver state. */ if (vport->port_state >= LPFC_DISC_AUTH) { if (vport->fc_flag & FC_RSCN_MODE) { lpfc_els_flush_rscn(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */ spin_unlock_irq(shost->host_lock); } else lpfc_els_flush_rscn(vport); } lpfc_disc_start(vport); } iocb_free: free_ndlp = cmdiocb->ndlp; lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(free_ndlp); return; } static void lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; struct lpfc_sli_ct_request *CTrsp; int did; struct lpfc_nodelist *ndlp = NULL; struct lpfc_nodelist *ns_ndlp = cmdiocb->ndlp; uint32_t fc4_data_0, fc4_data_1; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId; did = be32_to_cpu(did); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GFT_ID cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, did); /* Ignore response if link flipped after this request was made */ if ((uint32_t)cmdiocb->event_tag != phba->fc_eventTag) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "9046 Event tag mismatch. Ignoring NS rsp\n"); goto out; } if (ulp_status == IOSTAT_SUCCESS) { /* Good status, continue checking */ CTrsp = (struct lpfc_sli_ct_request *)outp->virt; fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]); fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6432 Process GFT_ID rsp for %08x " "Data %08x %08x %s %s\n", did, fc4_data_0, fc4_data_1, (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) ? "FCP" : " ", (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ? "NVME" : " "); /* Lookup the NPort_ID queried in the GFT_ID and find the * driver's local node. It's an error if the driver * doesn't have one. */ ndlp = lpfc_findnode_did(vport, did); if (ndlp) { /* The bitmask value for FCP and NVME FCP types is * the same because they are 32 bits distant from * each other in word0 and word0. */ if (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) ndlp->nlp_fc4_type |= NLP_FC4_FCP; if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ndlp->nlp_fc4_type |= NLP_FC4_NVME; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "3064 Setting ndlp x%px, DID x%06x " "with FC4 x%08x, Data: x%08x x%08x " "%d\n", ndlp, did, ndlp->nlp_fc4_type, FC_TYPE_FCP, FC_TYPE_NVME, ndlp->nlp_state); if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE && ndlp->nlp_fc4_type) { ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; /* This is a fabric topology so if discovery * started with an unsolicited PLOGI, don't * send a PRLI. Targets don't issue PLOGI or * PRLI when acting as a target. Likely this is * an initiator function. */ if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); lpfc_issue_els_prli(vport, ndlp, 0); } } else if (!ndlp->nlp_fc4_type) { /* If fc4 type is still unknown, then LOGO */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "6443 Sending LOGO ndlp x%px," "DID x%06x with fc4_type: " "x%08x, state: %d\n", ndlp, did, ndlp->nlp_fc4_type, ndlp->nlp_state); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } } } else lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "3065 GFT_ID failed x%08x\n", ulp_status); out: lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ns_ndlp); } static void lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *inp; struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; struct lpfc_nodelist *ndlp; int cmdcode, rc; uint8_t retry; uint32_t latt; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); /* First save ndlp, before we overwrite it */ ndlp = cmdiocb->ndlp; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->rsp_iocb = rspiocb; inp = cmdiocb->cmd_dmabuf; outp = cmdiocb->rsp_dmabuf; cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)-> CommandResponse.bits.CmdRsp); CTrsp = (struct lpfc_sli_ct_request *) outp->virt; latt = lpfc_els_chk_latt(vport); /* RFT request completes status <ulp_status> CmdRsp <CmdRsp> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0209 CT Request completes, latt %d, " "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n", latt, ulp_status, be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), get_job_ulpcontext(phba, cmdiocb), cmdiocb->iotag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "CT cmd cmpl: status:x%x/x%x cmd:x%x", ulp_status, ulp_word4, cmdcode); if (ulp_status) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0268 NS cmd x%x Error (x%x x%x)\n", cmdcode, ulp_status, ulp_word4); if (ulp_status == IOSTAT_LOCAL_REJECT && (((ulp_word4 & IOERR_PARAM_MASK) == IOERR_SLI_DOWN) || ((ulp_word4 & IOERR_PARAM_MASK) == IOERR_SLI_ABORTED))) goto out; retry = cmdiocb->retry; if (retry >= LPFC_MAX_NS_RETRY) goto out; retry++; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0250 Retrying NS cmd %x\n", cmdcode); rc = lpfc_ns_cmd(vport, cmdcode, retry, 0); if (rc == 0) goto out; } out: lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); return; } static void lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status == IOSTAT_SUCCESS) { struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RFT_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } static void lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status == IOSTAT_SUCCESS) { struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *) outp->virt; if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RNN_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } static void lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status == IOSTAT_SUCCESS) { struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RSPN_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } static void lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status == IOSTAT_SUCCESS) { struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *) outp->virt; if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RSNN_NN; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } static void lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; /* even if it fails we will act as though it succeeded. */ vport->ct_flags = 0; lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } static void lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status == IOSTAT_SUCCESS) { struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RFF_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } /* * Although the symbolic port name is thought to be an integer * as of January 18, 2016, leave it as a string until more of * the record state becomes defined. */ int lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, size_t size) { int n; /* * Use the lpfc board number as the Symbolic Port * Name object. NPIV is not in play so this integer * value is sufficient and unique per FC-ID. */ n = scnprintf(symbol, size, "%d", vport->phba->brd_no); return n; } int lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, size_t size) { char fwrev[FW_REV_STR_SIZE] = {0}; char tmp[MAXHOSTNAMELEN] = {0}; memset(symbol, 0, size); scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName); if (strlcat(symbol, tmp, size) >= size) goto buffer_done; lpfc_decode_firmware_rev(vport->phba, fwrev, 0); scnprintf(tmp, sizeof(tmp), " FV%s", fwrev); if (strlcat(symbol, tmp, size) >= size) goto buffer_done; scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version); if (strlcat(symbol, tmp, size) >= size) goto buffer_done; scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name); if (strlcat(symbol, tmp, size) >= size) goto buffer_done; /* Note :- OS name is "Linux" */ scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname); strlcat(symbol, tmp, size); buffer_done: return strnlen(symbol, size); } static uint32_t lpfc_find_map_node(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp, *next_ndlp; struct Scsi_Host *shost; uint32_t cnt = 0; shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_type & NLP_FABRIC) continue; if ((ndlp->nlp_state == NLP_STE_MAPPED_NODE) || (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)) cnt++; } spin_unlock_irq(shost->host_lock); return cnt; } /* * This routine will return the FC4 Type associated with the CT * GID_FT command. */ int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb) { struct lpfc_sli_ct_request *CtReq; struct lpfc_dmabuf *mp; uint32_t type; mp = cmdiocb->cmd_dmabuf; if (mp == NULL) return 0; CtReq = (struct lpfc_sli_ct_request *)mp->virt; type = (uint32_t)CtReq->un.gid.Fc4Type; if ((type != SLI_CTPT_FCP) && (type != SLI_CTPT_NVME)) return 0; return type; } /* * lpfc_ns_cmd * Description: * Issue Cmd to NameServer * SLI_CTNS_GID_FT * LI_CTNS_RFT_ID */ int lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, uint8_t retry, uint32_t context) { struct lpfc_nodelist * ndlp; struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *mp, *bmp; struct lpfc_sli_ct_request *CtReq; struct ulp_bde64 *bpl; void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *) = NULL; uint32_t *ptr; uint32_t rsp_size = 1024; size_t size; int rc = 0; ndlp = lpfc_findnode_did(vport, NameServer_DID); if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { rc=1; goto ns_cmd_exit; } /* fill in BDEs for command */ /* Allocate buffer for command payload */ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { rc=2; goto ns_cmd_exit; } INIT_LIST_HEAD(&mp->list); mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); if (!mp->virt) { rc=3; goto ns_cmd_free_mp; } /* Allocate buffer for Buffer ptr list */ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!bmp) { rc=4; goto ns_cmd_free_mpvirt; } INIT_LIST_HEAD(&bmp->list); bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys)); if (!bmp->virt) { rc=5; goto ns_cmd_free_bmp; } /* NameServer Req */ lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY, "0236 NameServer Req Data: x%x x%x x%x x%x\n", cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt, context); bpl = (struct ulp_bde64 *) bmp->virt; memset(bpl, 0, sizeof(struct ulp_bde64)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); bpl->tus.f.bdeFlags = 0; if (cmdcode == SLI_CTNS_GID_FT) bpl->tus.f.bdeSize = GID_REQUEST_SZ; else if (cmdcode == SLI_CTNS_GID_PT) bpl->tus.f.bdeSize = GID_REQUEST_SZ; else if (cmdcode == SLI_CTNS_GFF_ID) bpl->tus.f.bdeSize = GFF_REQUEST_SZ; else if (cmdcode == SLI_CTNS_GFT_ID) bpl->tus.f.bdeSize = GFT_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RFT_ID) bpl->tus.f.bdeSize = RFT_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RNN_ID) bpl->tus.f.bdeSize = RNN_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RSPN_ID) bpl->tus.f.bdeSize = RSPN_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RSNN_NN) bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; else if (cmdcode == SLI_CTNS_DA_ID) bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RFF_ID) bpl->tus.f.bdeSize = RFF_REQUEST_SZ; else bpl->tus.f.bdeSize = 0; bpl->tus.w = le32_to_cpu(bpl->tus.w); CtReq = (struct lpfc_sli_ct_request *) mp->virt; memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request)); CtReq->RevisionId.bits.Revision = SLI_CT_REVISION; CtReq->RevisionId.bits.InId = 0; CtReq->FsType = SLI_CT_DIRECTORY_SERVICE; CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER; CtReq->CommandResponse.bits.Size = 0; switch (cmdcode) { case SLI_CTNS_GID_FT: CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_GID_FT); CtReq->un.gid.Fc4Type = context; if (vport->port_state < LPFC_NS_QRY) vport->port_state = LPFC_NS_QRY; lpfc_set_disctmo(vport); cmpl = lpfc_cmpl_ct_cmd_gid_ft; rsp_size = FC_MAX_NS_RSP; break; case SLI_CTNS_GID_PT: CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_GID_PT); CtReq->un.gid.PortType = context; if (vport->port_state < LPFC_NS_QRY) vport->port_state = LPFC_NS_QRY; lpfc_set_disctmo(vport); cmpl = lpfc_cmpl_ct_cmd_gid_pt; rsp_size = FC_MAX_NS_RSP; break; case SLI_CTNS_GFF_ID: CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_GFF_ID); CtReq->un.gff.PortId = cpu_to_be32(context); cmpl = lpfc_cmpl_ct_cmd_gff_id; break; case SLI_CTNS_GFT_ID: CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_GFT_ID); CtReq->un.gft.PortId = cpu_to_be32(context); cmpl = lpfc_cmpl_ct_cmd_gft_id; break; case SLI_CTNS_RFT_ID: vport->ct_flags &= ~FC_CT_RFT_ID; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RFT_ID); CtReq->un.rft.port_id = cpu_to_be32(vport->fc_myDID); /* Register Application Services type if vmid enabled. */ if (phba->cfg_vmid_app_header) CtReq->un.rft.app_serv_reg = cpu_to_be32(RFT_APP_SERV_REG); /* Register FC4 FCP type if enabled. */ if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP) CtReq->un.rft.fcp_reg = cpu_to_be32(RFT_FCP_REG); /* Register NVME type if enabled. */ if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) CtReq->un.rft.nvme_reg = cpu_to_be32(RFT_NVME_REG); ptr = (uint32_t *)CtReq; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6433 Issue RFT (%s %s %s): %08x %08x %08x " "%08x %08x %08x %08x %08x\n", CtReq->un.rft.fcp_reg ? "FCP" : " ", CtReq->un.rft.nvme_reg ? "NVME" : " ", CtReq->un.rft.app_serv_reg ? "APPS" : " ", *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), *(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7)); cmpl = lpfc_cmpl_ct_cmd_rft_id; break; case SLI_CTNS_RNN_ID: vport->ct_flags &= ~FC_CT_RNN_ID; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RNN_ID); CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID); memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); cmpl = lpfc_cmpl_ct_cmd_rnn_id; break; case SLI_CTNS_RSPN_ID: vport->ct_flags &= ~FC_CT_RSPN_ID; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RSPN_ID); CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID); size = sizeof(CtReq->un.rspn.symbname); CtReq->un.rspn.len = lpfc_vport_symbolic_port_name(vport, CtReq->un.rspn.symbname, size); cmpl = lpfc_cmpl_ct_cmd_rspn_id; break; case SLI_CTNS_RSNN_NN: vport->ct_flags &= ~FC_CT_RSNN_NN; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RSNN_NN); memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); size = sizeof(CtReq->un.rsnn.symbname); CtReq->un.rsnn.len = lpfc_vport_symbolic_node_name(vport, CtReq->un.rsnn.symbname, size); cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; break; case SLI_CTNS_DA_ID: /* Implement DA_ID Nameserver request */ CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_DA_ID); CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID); cmpl = lpfc_cmpl_ct_cmd_da_id; break; case SLI_CTNS_RFF_ID: vport->ct_flags &= ~FC_CT_RFF_ID; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RFF_ID); CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); CtReq->un.rff.fbits = FC4_FEATURE_INIT; /* The driver always supports FC_TYPE_FCP. However, the * caller can specify NVME (type x28) as well. But only * these that FC4 type is supported. */ if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) && (context == FC_TYPE_NVME)) { if ((vport == phba->pport) && phba->nvmet_support) { CtReq->un.rff.fbits = (FC4_FEATURE_TARGET | FC4_FEATURE_NVME_DISC); lpfc_nvmet_update_targetport(phba); } else { lpfc_nvme_update_localport(vport); } CtReq->un.rff.type_code = context; } else if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) && (context == FC_TYPE_FCP)) CtReq->un.rff.type_code = context; else goto ns_cmd_free_bmpvirt; ptr = (uint32_t *)CtReq; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6434 Issue RFF (%s): %08x %08x %08x %08x " "%08x %08x %08x %08x\n", (context == FC_TYPE_NVME) ? "NVME" : "FCP", *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), *(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7)); cmpl = lpfc_cmpl_ct_cmd_rff_id; break; } /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count * to hold ndlp reference for the corresponding callback function. */ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { /* On success, The cmpl function will free the buffers */ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "Issue CT cmd: cmd:x%x did:x%x", cmdcode, ndlp->nlp_DID, 0); return 0; } rc=6; ns_cmd_free_bmpvirt: lpfc_mbuf_free(phba, bmp->virt, bmp->phys); ns_cmd_free_bmp: kfree(bmp); ns_cmd_free_mpvirt: lpfc_mbuf_free(phba, mp->virt, mp->phys); ns_cmd_free_mp: kfree(mp); ns_cmd_exit: lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0266 Issue NameServer Req x%x err %d Data: x%x x%x\n", cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt); return 1; } /** * lpfc_fdmi_rprt_defer - Check for any deferred FDMI RPRT commands * @phba: Pointer to HBA context object. * @mask: Initial port attributes mask * * This function checks to see if any vports have deferred their FDMI RPRT. * A vports RPRT may be deferred if it is issued before the primary ports * RHBA completes. */ static void lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask) { struct lpfc_vport **vports; struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; int i; phba->hba_flag |= HBA_RHBA_CMPL; vports = lpfc_create_vport_work_array(phba); if (vports) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { vport = vports[i]; ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) continue; if (vport->ct_flags & FC_CT_RPRT_DEFER) { vport->ct_flags &= ~FC_CT_RPRT_DEFER; vport->fdmi_port_mask = mask; lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); } } } lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion * @phba: Pointer to HBA context object. * @cmdiocb: Pointer to the command IOCBQ. * @rspiocb: Pointer to the response IOCBQ. * * This function to handle the completion of a driver initiated FDMI * CT command issued during discovery. */ static void lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; struct lpfc_sli_ct_request *CTcmd = inp->virt; struct lpfc_sli_ct_request *CTrsp = outp->virt; __be16 fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; __be16 fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; struct lpfc_nodelist *ndlp, *free_ndlp = NULL; uint32_t latt, cmd, err; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); latt = lpfc_els_chk_latt(vport); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "FDMI cmpl: status:x%x/x%x latt:%d", ulp_status, ulp_word4, latt); if (latt || ulp_status) { /* Look for a retryable error */ if (ulp_status == IOSTAT_LOCAL_REJECT) { switch ((ulp_word4 & IOERR_PARAM_MASK)) { case IOERR_SLI_ABORTED: case IOERR_SLI_DOWN: /* Driver aborted this IO. No retry as error * is likely Offline->Online or some adapter * error. Recovery will try again. */ break; case IOERR_ABORT_IN_PROGRESS: case IOERR_SEQUENCE_TIMEOUT: case IOERR_ILLEGAL_FRAME: case IOERR_NO_RESOURCES: case IOERR_ILLEGAL_COMMAND: cmdiocb->retry++; if (cmdiocb->retry >= LPFC_FDMI_MAX_RETRY) break; /* Retry the same FDMI command */ err = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocb, 0); if (err == IOCB_ERROR) break; return; default: break; } } lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0229 FDMI cmd %04x failed, latt = %d " "ulp_status: x%x, rid x%x\n", be16_to_cpu(fdmi_cmd), latt, ulp_status, ulp_word4); } free_ndlp = cmdiocb->ndlp; lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(free_ndlp); ndlp = lpfc_findnode_did(vport, FDMI_DID); if (!ndlp) return; /* Check for a CT LS_RJT response */ cmd = be16_to_cpu(fdmi_cmd); if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) { /* FDMI rsp failed */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS, "0220 FDMI cmd failed FS_RJT Data: x%x", cmd); /* Should we fallback to FDMI-2 / FDMI-1 ? */ switch (cmd) { case SLI_MGMT_RHBA: if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) { /* Fallback to FDMI-1 for HBA attributes */ vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR; /* If HBA attributes are FDMI1, so should * port attributes be for consistency. */ vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; /* Start over */ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); } return; case SLI_MGMT_RPRT: if (vport->port_type != LPFC_PHYSICAL_PORT) { ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) return; } if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) { /* Fallback to FDMI-1 */ vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; /* Start over */ lpfc_fdmi_cmd(vport, ndlp, cmd, 0); return; } if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) { vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; /* Retry the same command */ lpfc_fdmi_cmd(vport, ndlp, cmd, 0); } return; case SLI_MGMT_RPA: /* No retry on Vendor, RPA only done on physical port */ if (phba->link_flag & LS_CT_VEN_RPA) { phba->link_flag &= ~LS_CT_VEN_RPA; if (phba->cmf_active_mode == LPFC_CFG_OFF) return; lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY | LOG_ELS, "6460 VEN FDMI RPA RJT\n"); return; } if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) { /* Fallback to FDMI-1 */ vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR; vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; /* Start over */ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); return; } if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) { vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; /* Retry the same command */ lpfc_fdmi_cmd(vport, ndlp, cmd, 0); } return; } } /* * On success, need to cycle thru FDMI registration for discovery * DHBA -> DPRT -> RHBA -> RPA (physical port) * DPRT -> RPRT (vports) */ switch (cmd) { case SLI_MGMT_RHBA: /* Check for any RPRTs deferred till after RHBA completes */ lpfc_fdmi_rprt_defer(phba, vport->fdmi_port_mask); lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0); break; case SLI_MGMT_DHBA: lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); break; case SLI_MGMT_DPRT: if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0); } else { ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) return; /* Only issue a RPRT for the vport if the RHBA * for the physical port completes successfully. * We may have to defer the RPRT accordingly. */ if (phba->hba_flag & HBA_RHBA_CMPL) { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6078 RPRT deferred\n"); vport->ct_flags |= FC_CT_RPRT_DEFER; } } break; case SLI_MGMT_RPA: if (vport->port_type == LPFC_PHYSICAL_PORT && phba->sli4_hba.pc_sli4_params.mi_ver) { /* mi is only for the phyical port, no vports */ if (phba->link_flag & LS_CT_VEN_RPA) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS | LOG_CGN_MGMT, "6449 VEN RPA FDMI Success\n"); phba->link_flag &= ~LS_CT_VEN_RPA; break; } lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY | LOG_CGN_MGMT, "6210 Issue Vendor MI FDMI %x\n", phba->sli4_hba.pc_sli4_params.mi_ver); /* CGN is only for the physical port, no vports */ if (lpfc_fdmi_cmd(vport, ndlp, cmd, LPFC_FDMI_VENDOR_ATTR_mi) == 0) phba->link_flag |= LS_CT_VEN_RPA; lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY | LOG_ELS, "6458 Send MI FDMI:%x Flag x%x\n", phba->sli4_hba.pc_sli4_params.mi_ver, phba->link_flag); } else { lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY | LOG_ELS, "6459 No FDMI VEN MI support - " "RPA Success\n"); } break; } return; } /** * lpfc_fdmi_change_check - Check for changed FDMI parameters * @vport: pointer to a host virtual N_Port data structure. * * Check how many mapped NPorts we are connected to * Check if our hostname changed * Called from hbeat timeout routine to check if any FDMI parameters * changed. If so, re-register those Attributes. */ void lpfc_fdmi_change_check(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; uint16_t cnt; if (!lpfc_is_link_up(phba)) return; /* Must be connected to a Fabric */ if (!(vport->fc_flag & FC_FABRIC)) return; ndlp = lpfc_findnode_did(vport, FDMI_DID); if (!ndlp) return; /* Check if system hostname changed */ if (strcmp(phba->os_host_name, init_utsname()->nodename)) { memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", init_utsname()->nodename); lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); /* Since this effects multiple HBA and PORT attributes, we need * de-register and go thru the whole FDMI registration cycle. * DHBA -> DPRT -> RHBA -> RPA (physical port) * DPRT -> RPRT (vports) */ if (vport->port_type == LPFC_PHYSICAL_PORT) { /* For extra Vendor RPA */ phba->link_flag &= ~LS_CT_VEN_RPA; lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); } else { ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) return; lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); } /* Since this code path registers all the port attributes * we can just return without further checking. */ return; } if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc)) return; /* Check if the number of mapped NPorts changed */ cnt = lpfc_find_map_node(vport); if (cnt == vport->fdmi_num_disc) return; if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, LPFC_FDMI_PORT_ATTR_num_disc); } else { ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) return; lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, LPFC_FDMI_PORT_ATTR_num_disc); } } static inline int lpfc_fdmi_set_attr_u32(void *attr, uint16_t attrtype, uint32_t attrval) { struct lpfc_fdmi_attr_u32 *ae = attr; int size = sizeof(*ae); ae->type = cpu_to_be16(attrtype); ae->len = cpu_to_be16(size); ae->value_u32 = cpu_to_be32(attrval); return size; } static inline int lpfc_fdmi_set_attr_wwn(void *attr, uint16_t attrtype, struct lpfc_name *wwn) { struct lpfc_fdmi_attr_wwn *ae = attr; int size = sizeof(*ae); ae->type = cpu_to_be16(attrtype); ae->len = cpu_to_be16(size); /* WWN's assumed to be bytestreams - Big Endian presentation */ memcpy(ae->name, wwn, min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64))); return size; } static inline int lpfc_fdmi_set_attr_fullwwn(void *attr, uint16_t attrtype, struct lpfc_name *wwnn, struct lpfc_name *wwpn) { struct lpfc_fdmi_attr_fullwwn *ae = attr; u8 *nname = ae->nname; u8 *pname = ae->pname; int size = sizeof(*ae); ae->type = cpu_to_be16(attrtype); ae->len = cpu_to_be16(size); /* WWN's assumed to be bytestreams - Big Endian presentation */ memcpy(nname, wwnn, min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64))); memcpy(pname, wwpn, min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64))); return size; } static inline int lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring) { struct lpfc_fdmi_attr_string *ae = attr; int len, size; /* * We are trusting the caller that if a fdmi string field * is capped at 64 bytes, the caller passes in a string of * 64 bytes or less. */ strncpy(ae->value_string, attrstring, sizeof(ae->value_string)); len = strnlen(ae->value_string, sizeof(ae->value_string)); /* round string length to a 32bit boundary. Ensure there's a NULL */ len += (len & 3) ? (4 - (len & 3)) : 4; /* size is Type/Len (4 bytes) plus string length */ size = FOURBYTES + len; ae->type = cpu_to_be16(attrtype); ae->len = cpu_to_be16(size); return size; } /* Bitfields for FC4 Types that can be reported */ #define ATTR_FC4_CT 0x00000001 #define ATTR_FC4_FCP 0x00000002 #define ATTR_FC4_NVME 0x00000004 static inline int lpfc_fdmi_set_attr_fc4types(void *attr, uint16_t attrtype, uint32_t typemask) { struct lpfc_fdmi_attr_fc4types *ae = attr; int size = sizeof(*ae); ae->type = cpu_to_be16(attrtype); ae->len = cpu_to_be16(size); if (typemask & ATTR_FC4_FCP) ae->value_types[2] = 0x01; /* Type 0x8 - FCP */ if (typemask & ATTR_FC4_CT) ae->value_types[7] = 0x01; /* Type 0x20 - CT */ if (typemask & ATTR_FC4_NVME) ae->value_types[6] = 0x01; /* Type 0x28 - NVME */ return size; } /* Routines for all individual HBA attributes */ static int lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_wwn(attr, RHBA_NODENAME, &vport->fc_sparam.nodeName); } static int lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, void *attr) { /* This string MUST be consistent with other FC platforms * supported by Broadcom. */ return lpfc_fdmi_set_attr_string(attr, RHBA_MANUFACTURER, "Emulex Corporation"); } static int lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; return lpfc_fdmi_set_attr_string(attr, RHBA_SERIAL_NUMBER, phba->SerialNumber); } static int lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL, phba->ModelName); } static int lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL_DESCRIPTION, phba->ModelDesc); } static int lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; lpfc_vpd_t *vp = &phba->vpd; char buf[16] = { 0 }; snprintf(buf, sizeof(buf), "%08x", vp->rev.biuRev); return lpfc_fdmi_set_attr_string(attr, RHBA_HARDWARE_VERSION, buf); } static int lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_string(attr, RHBA_DRIVER_VERSION, lpfc_release_version); } static int lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; char buf[64] = { 0 }; if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_decode_firmware_rev(phba, buf, 1); return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION, buf); } return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION, phba->OptionROMVersion); } static int lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; char buf[64] = { 0 }; lpfc_decode_firmware_rev(phba, buf, 1); return lpfc_fdmi_set_attr_string(attr, RHBA_FIRMWARE_VERSION, buf); } static int lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, void *attr) { char buf[256] = { 0 }; snprintf(buf, sizeof(buf), "%s %s %s", init_utsname()->sysname, init_utsname()->release, init_utsname()->version); return lpfc_fdmi_set_attr_string(attr, RHBA_OS_NAME_VERSION, buf); } static int lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RHBA_MAX_CT_PAYLOAD_LEN, LPFC_MAX_CT_SIZE); } static int lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, void *attr) { char buf[256] = { 0 }; lpfc_vport_symbolic_node_name(vport, buf, sizeof(buf)); return lpfc_fdmi_set_attr_string(attr, RHBA_SYM_NODENAME, buf); } static int lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RHBA_VENDOR_INFO, 0); } static int lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, void *attr) { /* Each driver instance corresponds to a single port */ return lpfc_fdmi_set_attr_u32(attr, RHBA_NUM_PORTS, 1); } static int lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_wwn(attr, RHBA_FABRIC_WWNN, &vport->fabric_nodename); } static int lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; return lpfc_fdmi_set_attr_string(attr, RHBA_BIOS_VERSION, phba->BIOSVersion); } static int lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, void *attr) { /* Driver doesn't have access to this information */ return lpfc_fdmi_set_attr_u32(attr, RHBA_BIOS_STATE, 0); } static int lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_string(attr, RHBA_VENDOR_ID, "EMULEX"); } /* * Routines for all individual PORT attributes */ static int lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; u32 fc4types; fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP); /* Check to see if Firmware supports NVME and on physical port */ if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) && phba->sli4_hba.pc_sli4_params.nvme) fc4types |= ATTR_FC4_NVME; return lpfc_fdmi_set_attr_fc4types(attr, RPRT_SUPPORTED_FC4_TYPES, fc4types); } static int lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; u32 speeds = 0; u32 tcfg; u8 i, cnt; if (!(phba->hba_flag & HBA_FCOE_MODE)) { cnt = 0; if (phba->sli_rev == LPFC_SLI_REV4) { tcfg = phba->sli4_hba.conf_trunk; for (i = 0; i < 4; i++, tcfg >>= 1) if (tcfg & 1) cnt++; } if (cnt > 2) { /* 4 lane trunk group */ if (phba->lmt & LMT_64Gb) speeds |= HBA_PORTSPEED_256GFC; if (phba->lmt & LMT_32Gb) speeds |= HBA_PORTSPEED_128GFC; if (phba->lmt & LMT_16Gb) speeds |= HBA_PORTSPEED_64GFC; } else if (cnt) { /* 2 lane trunk group */ if (phba->lmt & LMT_128Gb) speeds |= HBA_PORTSPEED_256GFC; if (phba->lmt & LMT_64Gb) speeds |= HBA_PORTSPEED_128GFC; if (phba->lmt & LMT_32Gb) speeds |= HBA_PORTSPEED_64GFC; if (phba->lmt & LMT_16Gb) speeds |= HBA_PORTSPEED_32GFC; } else { if (phba->lmt & LMT_256Gb) speeds |= HBA_PORTSPEED_256GFC; if (phba->lmt & LMT_128Gb) speeds |= HBA_PORTSPEED_128GFC; if (phba->lmt & LMT_64Gb) speeds |= HBA_PORTSPEED_64GFC; if (phba->lmt & LMT_32Gb) speeds |= HBA_PORTSPEED_32GFC; if (phba->lmt & LMT_16Gb) speeds |= HBA_PORTSPEED_16GFC; if (phba->lmt & LMT_10Gb) speeds |= HBA_PORTSPEED_10GFC; if (phba->lmt & LMT_8Gb) speeds |= HBA_PORTSPEED_8GFC; if (phba->lmt & LMT_4Gb) speeds |= HBA_PORTSPEED_4GFC; if (phba->lmt & LMT_2Gb) speeds |= HBA_PORTSPEED_2GFC; if (phba->lmt & LMT_1Gb) speeds |= HBA_PORTSPEED_1GFC; } } else { /* FCoE links support only one speed */ switch (phba->fc_linkspeed) { case LPFC_ASYNC_LINK_SPEED_10GBPS: speeds = HBA_PORTSPEED_10GE; break; case LPFC_ASYNC_LINK_SPEED_25GBPS: speeds = HBA_PORTSPEED_25GE; break; case LPFC_ASYNC_LINK_SPEED_40GBPS: speeds = HBA_PORTSPEED_40GE; break; case LPFC_ASYNC_LINK_SPEED_100GBPS: speeds = HBA_PORTSPEED_100GE; break; } } return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_SPEED, speeds); } static int lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; u32 speeds = 0; if (!(phba->hba_flag & HBA_FCOE_MODE)) { switch (phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: speeds = HBA_PORTSPEED_1GFC; break; case LPFC_LINK_SPEED_2GHZ: speeds = HBA_PORTSPEED_2GFC; break; case LPFC_LINK_SPEED_4GHZ: speeds = HBA_PORTSPEED_4GFC; break; case LPFC_LINK_SPEED_8GHZ: speeds = HBA_PORTSPEED_8GFC; break; case LPFC_LINK_SPEED_10GHZ: speeds = HBA_PORTSPEED_10GFC; break; case LPFC_LINK_SPEED_16GHZ: speeds = HBA_PORTSPEED_16GFC; break; case LPFC_LINK_SPEED_32GHZ: speeds = HBA_PORTSPEED_32GFC; break; case LPFC_LINK_SPEED_64GHZ: speeds = HBA_PORTSPEED_64GFC; break; case LPFC_LINK_SPEED_128GHZ: speeds = HBA_PORTSPEED_128GFC; break; case LPFC_LINK_SPEED_256GHZ: speeds = HBA_PORTSPEED_256GFC; break; default: speeds = HBA_PORTSPEED_UNKNOWN; break; } } else { switch (phba->fc_linkspeed) { case LPFC_ASYNC_LINK_SPEED_10GBPS: speeds = HBA_PORTSPEED_10GE; break; case LPFC_ASYNC_LINK_SPEED_25GBPS: speeds = HBA_PORTSPEED_25GE; break; case LPFC_ASYNC_LINK_SPEED_40GBPS: speeds = HBA_PORTSPEED_40GE; break; case LPFC_ASYNC_LINK_SPEED_100GBPS: speeds = HBA_PORTSPEED_100GE; break; default: speeds = HBA_PORTSPEED_UNKNOWN; break; } } return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_SPEED, speeds); } static int lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, void *attr) { struct serv_parm *hsp = (struct serv_parm *)&vport->fc_sparam; return lpfc_fdmi_set_attr_u32(attr, RPRT_MAX_FRAME_SIZE, (((uint32_t)hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) | (uint32_t)hsp->cmn.bbRcvSizeLsb); } static int lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, void *attr) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); char buf[64] = { 0 }; snprintf(buf, sizeof(buf), "/sys/class/scsi_host/host%d", shost->host_no); return lpfc_fdmi_set_attr_string(attr, RPRT_OS_DEVICE_NAME, buf); } static int lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, void *attr) { char buf[64] = { 0 }; scnprintf(buf, sizeof(buf), "%s", vport->phba->os_host_name); return lpfc_fdmi_set_attr_string(attr, RPRT_HOST_NAME, buf); } static int lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_wwn(attr, RPRT_NODENAME, &vport->fc_sparam.nodeName); } static int lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_wwn(attr, RPRT_PORTNAME, &vport->fc_sparam.portName); } static int lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, void *attr) { char buf[256] = { 0 }; lpfc_vport_symbolic_port_name(vport, buf, sizeof(buf)); return lpfc_fdmi_set_attr_string(attr, RPRT_SYM_PORTNAME, buf); } static int lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_TYPE, (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ? LPFC_FDMI_PORTTYPE_NLPORT : LPFC_FDMI_PORTTYPE_NPORT); } static int lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_CLASS, FC_COS_CLASS2 | FC_COS_CLASS3); } static int lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_wwn(attr, RPRT_FABRICNAME, &vport->fabric_portname); } static int lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; u32 fc4types; fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP); /* Check to see if NVME is configured or not */ if (vport == phba->pport && phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) fc4types |= ATTR_FC4_NVME; return lpfc_fdmi_set_attr_fc4types(attr, RPRT_ACTIVE_FC4_TYPES, fc4types); } static int lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_STATE, LPFC_FDMI_PORTSTATE_ONLINE); } static int lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, void *attr) { vport->fdmi_num_disc = lpfc_find_map_node(vport); return lpfc_fdmi_set_attr_u32(attr, RPRT_DISC_PORT, vport->fdmi_num_disc); } static int lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_ID, vport->fc_myDID); } static int lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_SERVICE, "Smart SAN Initiator"); } static int lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_fullwwn(attr, RPRT_SMART_GUID, &vport->fc_sparam.nodeName, &vport->fc_sparam.portName); } static int lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_VERSION, "Smart SAN Version 2.0"); } static int lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_MODEL, phba->ModelName); } static int lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, void *attr) { /* SRIOV (type 3) is not supported */ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_PORT_INFO, (vport->vpi) ? 2 /* NPIV */ : 1 /* Physical */); } static int lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_QOS, 0); } static int lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_SECURITY, 1); } static int lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; char buf[32] = { 0 }; sprintf(buf, "ELXE2EM:%04d", phba->sli4_hba.pc_sli4_params.mi_ver); return lpfc_fdmi_set_attr_string(attr, RPRT_VENDOR_MI, buf); } /* RHBA attribute jump table */ static int (*lpfc_fdmi_hba_action[]) (struct lpfc_vport *vport, void *attrbuf) = { /* Action routine Mask bit Attribute type */ lpfc_fdmi_hba_attr_wwnn, /* bit0 RHBA_NODENAME */ lpfc_fdmi_hba_attr_manufacturer, /* bit1 RHBA_MANUFACTURER */ lpfc_fdmi_hba_attr_sn, /* bit2 RHBA_SERIAL_NUMBER */ lpfc_fdmi_hba_attr_model, /* bit3 RHBA_MODEL */ lpfc_fdmi_hba_attr_description, /* bit4 RHBA_MODEL_DESCRIPTION */ lpfc_fdmi_hba_attr_hdw_ver, /* bit5 RHBA_HARDWARE_VERSION */ lpfc_fdmi_hba_attr_drvr_ver, /* bit6 RHBA_DRIVER_VERSION */ lpfc_fdmi_hba_attr_rom_ver, /* bit7 RHBA_OPTION_ROM_VERSION */ lpfc_fdmi_hba_attr_fmw_ver, /* bit8 RHBA_FIRMWARE_VERSION */ lpfc_fdmi_hba_attr_os_ver, /* bit9 RHBA_OS_NAME_VERSION */ lpfc_fdmi_hba_attr_ct_len, /* bit10 RHBA_MAX_CT_PAYLOAD_LEN */ lpfc_fdmi_hba_attr_symbolic_name, /* bit11 RHBA_SYM_NODENAME */ lpfc_fdmi_hba_attr_vendor_info, /* bit12 RHBA_VENDOR_INFO */ lpfc_fdmi_hba_attr_num_ports, /* bit13 RHBA_NUM_PORTS */ lpfc_fdmi_hba_attr_fabric_wwnn, /* bit14 RHBA_FABRIC_WWNN */ lpfc_fdmi_hba_attr_bios_ver, /* bit15 RHBA_BIOS_VERSION */ lpfc_fdmi_hba_attr_bios_state, /* bit16 RHBA_BIOS_STATE */ lpfc_fdmi_hba_attr_vendor_id, /* bit17 RHBA_VENDOR_ID */ }; /* RPA / RPRT attribute jump table */ static int (*lpfc_fdmi_port_action[]) (struct lpfc_vport *vport, void *attrbuf) = { /* Action routine Mask bit Attribute type */ lpfc_fdmi_port_attr_fc4type, /* bit0 RPRT_SUPPORT_FC4_TYPES */ lpfc_fdmi_port_attr_support_speed, /* bit1 RPRT_SUPPORTED_SPEED */ lpfc_fdmi_port_attr_speed, /* bit2 RPRT_PORT_SPEED */ lpfc_fdmi_port_attr_max_frame, /* bit3 RPRT_MAX_FRAME_SIZE */ lpfc_fdmi_port_attr_os_devname, /* bit4 RPRT_OS_DEVICE_NAME */ lpfc_fdmi_port_attr_host_name, /* bit5 RPRT_HOST_NAME */ lpfc_fdmi_port_attr_wwnn, /* bit6 RPRT_NODENAME */ lpfc_fdmi_port_attr_wwpn, /* bit7 RPRT_PORTNAME */ lpfc_fdmi_port_attr_symbolic_name, /* bit8 RPRT_SYM_PORTNAME */ lpfc_fdmi_port_attr_port_type, /* bit9 RPRT_PORT_TYPE */ lpfc_fdmi_port_attr_class, /* bit10 RPRT_SUPPORTED_CLASS */ lpfc_fdmi_port_attr_fabric_wwpn, /* bit11 RPRT_FABRICNAME */ lpfc_fdmi_port_attr_active_fc4type, /* bit12 RPRT_ACTIVE_FC4_TYPES */ lpfc_fdmi_port_attr_port_state, /* bit13 RPRT_PORT_STATE */ lpfc_fdmi_port_attr_num_disc, /* bit14 RPRT_DISC_PORT */ lpfc_fdmi_port_attr_nportid, /* bit15 RPRT_PORT_ID */ lpfc_fdmi_smart_attr_service, /* bit16 RPRT_SMART_SERVICE */ lpfc_fdmi_smart_attr_guid, /* bit17 RPRT_SMART_GUID */ lpfc_fdmi_smart_attr_version, /* bit18 RPRT_SMART_VERSION */ lpfc_fdmi_smart_attr_model, /* bit19 RPRT_SMART_MODEL */ lpfc_fdmi_smart_attr_port_info, /* bit20 RPRT_SMART_PORT_INFO */ lpfc_fdmi_smart_attr_qos, /* bit21 RPRT_SMART_QOS */ lpfc_fdmi_smart_attr_security, /* bit22 RPRT_SMART_SECURITY */ lpfc_fdmi_vendor_attr_mi, /* bit23 RPRT_VENDOR_MI */ }; /** * lpfc_fdmi_cmd - Build and send a FDMI cmd to the specified NPort * @vport: pointer to a host virtual N_Port data structure. * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID) * @cmdcode: FDMI command to send * @new_mask: Mask of HBA or PORT Attributes to send * * Builds and sends a FDMI command using the CT subsystem. */ int lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode, uint32_t new_mask) { struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *rq, *rsp; struct lpfc_sli_ct_request *CtReq; struct ulp_bde64_le *bde; uint32_t bit_pos; uint32_t size, addsz; uint32_t rsp_size; uint32_t mask; struct lpfc_fdmi_reg_hba *rh; struct lpfc_fdmi_port_entry *pe; struct lpfc_fdmi_reg_portattr *pab = NULL, *base = NULL; struct lpfc_fdmi_attr_block *ab = NULL; int (*func)(struct lpfc_vport *vport, void *attrbuf); void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb); if (!ndlp) return 0; cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */ /* fill in BDEs for command */ /* Allocate buffer for command payload */ rq = kmalloc(sizeof(*rq), GFP_KERNEL); if (!rq) goto fdmi_cmd_exit; rq->virt = lpfc_mbuf_alloc(phba, 0, &rq->phys); if (!rq->virt) goto fdmi_cmd_free_rq; /* Allocate buffer for Buffer ptr list */ rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); if (!rsp) goto fdmi_cmd_free_rqvirt; rsp->virt = lpfc_mbuf_alloc(phba, 0, &rsp->phys); if (!rsp->virt) goto fdmi_cmd_free_rsp; INIT_LIST_HEAD(&rq->list); INIT_LIST_HEAD(&rsp->list); /* mbuf buffers are 1K in length - aka LPFC_BPL_SIZE */ memset(rq->virt, 0, LPFC_BPL_SIZE); rsp_size = LPFC_BPL_SIZE; /* FDMI request */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0218 FDMI Request x%x mask x%x Data: x%x x%x x%x\n", cmdcode, new_mask, vport->fdmi_port_mask, vport->fc_flag, vport->port_state); CtReq = (struct lpfc_sli_ct_request *)rq->virt; /* First populate the CT_IU preamble */ CtReq->RevisionId.bits.Revision = SLI_CT_REVISION; CtReq->RevisionId.bits.InId = 0; CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE; CtReq->FsSubType = SLI_CT_FDMI_Subtypes; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode); size = 0; /* Next fill in the specific FDMI cmd information */ switch (cmdcode) { case SLI_MGMT_RHAT: case SLI_MGMT_RHBA: rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un; /* HBA Identifier */ memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName, sizeof(struct lpfc_name)); size += sizeof(struct lpfc_fdmi_hba_ident); if (cmdcode == SLI_MGMT_RHBA) { /* Registered Port List */ /* One entry (port) per adapter */ rh->rpl.EntryCnt = cpu_to_be32(1); memcpy(&rh->rpl.pe.PortName, &phba->pport->fc_sparam.portName, sizeof(struct lpfc_name)); size += sizeof(struct lpfc_fdmi_reg_port_list); } ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size); ab->EntryCnt = 0; size += FOURBYTES; /* add length of EntryCnt field */ bit_pos = 0; if (new_mask) mask = new_mask; else mask = vport->fdmi_hba_mask; /* Mask will dictate what attributes to build in the request */ while (mask) { if (mask & 0x1) { func = lpfc_fdmi_hba_action[bit_pos]; addsz = func(vport, ((uint8_t *)rh + size)); if (addsz) { ab->EntryCnt++; size += addsz; } /* check if another attribute fits */ if ((size + FDMI_MAX_ATTRLEN) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) goto hba_out; } mask = mask >> 1; bit_pos++; } hba_out: ab->EntryCnt = cpu_to_be32(ab->EntryCnt); /* Total size */ size += GID_REQUEST_SZ - 4; break; case SLI_MGMT_RPRT: if (vport->port_type != LPFC_PHYSICAL_PORT) { ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) return 0; } fallthrough; case SLI_MGMT_RPA: /* Store base ptr right after preamble */ base = (struct lpfc_fdmi_reg_portattr *)&CtReq->un; if (cmdcode == SLI_MGMT_RPRT) { rh = (struct lpfc_fdmi_reg_hba *)base; /* HBA Identifier */ memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName, sizeof(struct lpfc_name)); pab = (struct lpfc_fdmi_reg_portattr *) ((uint8_t *)base + sizeof(struct lpfc_name)); size += sizeof(struct lpfc_name); } else { pab = base; } memcpy((uint8_t *)&pab->PortName, (uint8_t *)&vport->fc_sparam.portName, sizeof(struct lpfc_name)); pab->ab.EntryCnt = 0; /* add length of name and EntryCnt field */ size += sizeof(struct lpfc_name) + FOURBYTES; bit_pos = 0; if (new_mask) mask = new_mask; else mask = vport->fdmi_port_mask; /* Mask will dictate what attributes to build in the request */ while (mask) { if (mask & 0x1) { func = lpfc_fdmi_port_action[bit_pos]; addsz = func(vport, ((uint8_t *)base + size)); if (addsz) { pab->ab.EntryCnt++; size += addsz; } /* check if another attribute fits */ if ((size + FDMI_MAX_ATTRLEN) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) goto port_out; } mask = mask >> 1; bit_pos++; } port_out: pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt); size += GID_REQUEST_SZ - 4; break; case SLI_MGMT_GHAT: case SLI_MGMT_GRPL: rsp_size = FC_MAX_NS_RSP; fallthrough; case SLI_MGMT_DHBA: case SLI_MGMT_DHAT: pe = (struct lpfc_fdmi_port_entry *)&CtReq->un; memcpy((uint8_t *)&pe->PortName, (uint8_t *)&vport->fc_sparam.portName, sizeof(struct lpfc_name)); size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name); break; case SLI_MGMT_GPAT: case SLI_MGMT_GPAS: rsp_size = FC_MAX_NS_RSP; fallthrough; case SLI_MGMT_DPRT: if (vport->port_type != LPFC_PHYSICAL_PORT) { ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) return 0; } fallthrough; case SLI_MGMT_DPA: pe = (struct lpfc_fdmi_port_entry *)&CtReq->un; memcpy((uint8_t *)&pe->PortName, (uint8_t *)&vport->fc_sparam.portName, sizeof(struct lpfc_name)); size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name); break; case SLI_MGMT_GRHL: size = GID_REQUEST_SZ - 4; break; default: lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "0298 FDMI cmdcode x%x not supported\n", cmdcode); goto fdmi_cmd_free_rspvirt; } CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size); bde = (struct ulp_bde64_le *)rsp->virt; bde->addr_high = cpu_to_le32(putPaddrHigh(rq->phys)); bde->addr_low = cpu_to_le32(putPaddrLow(rq->phys)); bde->type_size = cpu_to_le32(ULP_BDE64_TYPE_BDE_64 << ULP_BDE64_TYPE_SHIFT); bde->type_size |= cpu_to_le32(size); /* * The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count * to hold ndlp reference for the corresponding callback function. */ if (!lpfc_ct_cmd(vport, rq, rsp, ndlp, cmpl, rsp_size, 0)) return 0; fdmi_cmd_free_rspvirt: lpfc_mbuf_free(phba, rsp->virt, rsp->phys); fdmi_cmd_free_rsp: kfree(rsp); fdmi_cmd_free_rqvirt: lpfc_mbuf_free(phba, rq->virt, rq->phys); fdmi_cmd_free_rq: kfree(rq); fdmi_cmd_exit: /* Issue FDMI request failed */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0244 Issue FDMI request failed Data: x%x\n", cmdcode); return 1; } /** * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer. * @t: Context object of the timer. * * This function set the WORKER_DELAYED_DISC_TMO flag and wake up * the worker thread. **/ void lpfc_delayed_disc_tmo(struct timer_list *t) { struct lpfc_vport *vport = from_timer(vport, t, delayed_disc_tmo); struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long iflag; spin_lock_irqsave(&vport->work_port_lock, iflag); tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO; if (!tmo_posted) vport->work_port_events |= WORKER_DELAYED_DISC_TMO; spin_unlock_irqrestore(&vport->work_port_lock, iflag); if (!tmo_posted) lpfc_worker_wake_up(phba); return; } /** * lpfc_delayed_disc_timeout_handler - Function called by worker thread to * handle delayed discovery. * @vport: pointer to a host virtual N_Port data structure. * * This function start nport discovery of the vport. **/ void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); if (!(vport->fc_flag & FC_DISC_DELAYED)) { spin_unlock_irq(shost->host_lock); return; } vport->fc_flag &= ~FC_DISC_DELAYED; spin_unlock_irq(shost->host_lock); lpfc_do_scr_ns_plogi(vport->phba, vport); } void lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) { struct lpfc_sli *psli = &phba->sli; lpfc_vpd_t *vp = &phba->vpd; uint32_t b1, b2, b3, b4, i, rev; char c; uint32_t *ptr, str[4]; uint8_t *fwname; if (phba->sli_rev == LPFC_SLI_REV4) snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName); else if (vp->rev.rBit) { if (psli->sli_flag & LPFC_SLI_ACTIVE) rev = vp->rev.sli2FwRev; else rev = vp->rev.sli1FwRev; b1 = (rev & 0x0000f000) >> 12; b2 = (rev & 0x00000f00) >> 8; b3 = (rev & 0x000000c0) >> 6; b4 = (rev & 0x00000030) >> 4; switch (b4) { case 0: c = 'N'; break; case 1: c = 'A'; break; case 2: c = 'B'; break; case 3: c = 'X'; break; default: c = 0; break; } b4 = (rev & 0x0000000f); if (psli->sli_flag & LPFC_SLI_ACTIVE) fwname = vp->rev.sli2FwName; else fwname = vp->rev.sli1FwName; for (i = 0; i < 16; i++) if (fwname[i] == 0x20) fwname[i] = 0; ptr = (uint32_t*)fwname; for (i = 0; i < 3; i++) str[i] = be32_to_cpu(*ptr++); if (c == 0) { if (flag) sprintf(fwrevision, "%d.%d%d (%s)", b1, b2, b3, (char *)str); else sprintf(fwrevision, "%d.%d%d", b1, b2, b3); } else { if (flag) sprintf(fwrevision, "%d.%d%d%c%d (%s)", b1, b2, b3, c, b4, (char *)str); else sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4); } } else { rev = vp->rev.smFwRev; b1 = (rev & 0xff000000) >> 24; b2 = (rev & 0x00f00000) >> 20; b3 = (rev & 0x000f0000) >> 16; c = (rev & 0x0000ff00) >> 8; b4 = (rev & 0x000000ff); sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4); } return; } static void lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; struct lpfc_sli_ct_request *ctcmd = inp->virt; struct lpfc_sli_ct_request *ctrsp = outp->virt; __be16 rsp = ctrsp->CommandResponse.bits.CmdRsp; struct app_id_object *app; struct lpfc_nodelist *ndlp = cmdiocb->ndlp; u32 cmd, hash, bucket; struct lpfc_vmid *vmp, *cur; u8 *data = outp->virt; int i; cmd = be16_to_cpu(ctcmd->CommandResponse.bits.CmdRsp); if (cmd == SLI_CTAS_DALLAPP_ID) lpfc_ct_free_iocb(phba, cmdiocb); if (lpfc_els_chk_latt(vport) || get_job_ulpstatus(phba, rspiocb)) { if (cmd != SLI_CTAS_DALLAPP_ID) goto free_res; } /* Check for a CT LS_RJT response */ if (be16_to_cpu(rsp) == SLI_CT_RESPONSE_FS_RJT) { if (cmd != SLI_CTAS_DALLAPP_ID) lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "3306 VMID FS_RJT Data: x%x x%x x%x\n", cmd, ctrsp->ReasonCode, ctrsp->Explanation); if ((cmd != SLI_CTAS_DALLAPP_ID) || (ctrsp->ReasonCode != SLI_CT_UNABLE_TO_PERFORM_REQ) || (ctrsp->Explanation != SLI_CT_APP_ID_NOT_AVAILABLE)) { /* If DALLAPP_ID failed retry later */ if (cmd == SLI_CTAS_DALLAPP_ID) vport->load_flag |= FC_DEREGISTER_ALL_APP_ID; goto free_res; } } switch (cmd) { case SLI_CTAS_RAPP_IDENT: app = (struct app_id_object *)(RAPP_IDENT_OFFSET + data); lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "6712 RAPP_IDENT app id %d port id x%x id " "len %d\n", be32_to_cpu(app->app_id), be32_to_cpu(app->port_id), app->obj.entity_id_len); if (app->obj.entity_id_len == 0 || app->port_id == 0) goto free_res; hash = lpfc_vmid_hash_fn(app->obj.entity_id, app->obj.entity_id_len); vmp = lpfc_get_vmid_from_hashtable(vport, hash, app->obj.entity_id); if (vmp) { write_lock(&vport->vmid_lock); vmp->un.app_id = be32_to_cpu(app->app_id); vmp->flag |= LPFC_VMID_REGISTERED; vmp->flag &= ~LPFC_VMID_REQ_REGISTER; write_unlock(&vport->vmid_lock); /* Set IN USE flag */ vport->vmid_flag |= LPFC_VMID_IN_USE; } else { lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "6901 No entry found %s hash %d\n", app->obj.entity_id, hash); } break; case SLI_CTAS_DAPP_IDENT: app = (struct app_id_object *)(DAPP_IDENT_OFFSET + data); lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "6713 DAPP_IDENT app id %d port id x%x\n", be32_to_cpu(app->app_id), be32_to_cpu(app->port_id)); break; case SLI_CTAS_DALLAPP_ID: lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "8856 Deregistered all app ids\n"); read_lock(&vport->vmid_lock); for (i = 0; i < phba->cfg_max_vmid; i++) { vmp = &vport->vmid[i]; if (vmp->flag != LPFC_VMID_SLOT_FREE) memset(vmp, 0, sizeof(struct lpfc_vmid)); } read_unlock(&vport->vmid_lock); /* for all elements in the hash table */ if (!hash_empty(vport->hash_table)) hash_for_each(vport->hash_table, bucket, cur, hnode) hash_del(&cur->hnode); vport->load_flag |= FC_ALLOW_VMID; break; default: lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "8857 Invalid command code\n"); } free_res: lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } /** * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort * @vport: pointer to a host virtual N_Port data structure. * @cmdcode: application server command code to send * @vmid: pointer to vmid info structure * * Builds and sends a FDMI command using the CT subsystem. */ int lpfc_vmid_cmd(struct lpfc_vport *vport, int cmdcode, struct lpfc_vmid *vmid) { struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *mp, *bmp; struct lpfc_sli_ct_request *ctreq; struct ulp_bde64 *bpl; u32 size; u32 rsp_size; u8 *data; struct lpfc_vmid_rapp_ident_list *rap; struct lpfc_vmid_dapp_ident_list *dap; u8 retry = 0; struct lpfc_nodelist *ndlp; void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb); ndlp = lpfc_findnode_did(vport, FDMI_DID); if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) return 0; cmpl = lpfc_cmpl_ct_cmd_vmid; /* fill in BDEs for command */ /* Allocate buffer for command payload */ mp = kmalloc(sizeof(*mp), GFP_KERNEL); if (!mp) goto vmid_free_mp_exit; mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp->virt) goto vmid_free_mp_virt_exit; /* Allocate buffer for Buffer ptr list */ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); if (!bmp) goto vmid_free_bmp_exit; bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); if (!bmp->virt) goto vmid_free_bmp_virt_exit; INIT_LIST_HEAD(&mp->list); INIT_LIST_HEAD(&bmp->list); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "3275 VMID Request Data: x%x x%x x%x\n", vport->fc_flag, vport->port_state, cmdcode); ctreq = (struct lpfc_sli_ct_request *)mp->virt; data = mp->virt; /* First populate the CT_IU preamble */ memset(data, 0, LPFC_BPL_SIZE); ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; ctreq->RevisionId.bits.InId = 0; ctreq->FsType = SLI_CT_MANAGEMENT_SERVICE; ctreq->FsSubType = SLI_CT_APP_SEV_Subtypes; ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode); rsp_size = LPFC_BPL_SIZE; size = 0; switch (cmdcode) { case SLI_CTAS_RAPP_IDENT: lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "1329 RAPP_IDENT for %s\n", vmid->host_vmid); ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); rap = (struct lpfc_vmid_rapp_ident_list *) (DAPP_IDENT_OFFSET + data); rap->no_of_objects = cpu_to_be32(1); rap->obj[0].entity_id_len = vmid->vmid_len; memcpy(rap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); size = RAPP_IDENT_OFFSET + struct_size(rap, obj, be32_to_cpu(rap->no_of_objects)); retry = 1; break; case SLI_CTAS_GALLAPPIA_ID: ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); size = GALLAPPIA_ID_SIZE; break; case SLI_CTAS_DAPP_IDENT: lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "1469 DAPP_IDENT for %s\n", vmid->host_vmid); ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); dap = (struct lpfc_vmid_dapp_ident_list *) (DAPP_IDENT_OFFSET + data); dap->no_of_objects = cpu_to_be32(1); dap->obj[0].entity_id_len = vmid->vmid_len; memcpy(dap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); size = DAPP_IDENT_OFFSET + struct_size(dap, obj, be32_to_cpu(dap->no_of_objects)); write_lock(&vport->vmid_lock); vmid->flag &= ~LPFC_VMID_REGISTERED; write_unlock(&vport->vmid_lock); retry = 1; break; case SLI_CTAS_DALLAPP_ID: ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); size = DALLAPP_ID_SIZE; break; default: lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "7062 VMID cmdcode x%x not supported\n", cmdcode); goto vmid_free_all_mem; } ctreq->CommandResponse.bits.Size = cpu_to_be16(rsp_size); bpl = (struct ulp_bde64 *)bmp->virt; bpl->addrHigh = putPaddrHigh(mp->phys); bpl->addrLow = putPaddrLow(mp->phys); bpl->tus.f.bdeFlags = 0; bpl->tus.f.bdeSize = size; /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count * to hold ndlp reference for the corresponding callback function. */ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) return 0; vmid_free_all_mem: lpfc_mbuf_free(phba, bmp->virt, bmp->phys); vmid_free_bmp_virt_exit: kfree(bmp); vmid_free_bmp_exit: lpfc_mbuf_free(phba, mp->virt, mp->phys); vmid_free_mp_virt_exit: kfree(mp); vmid_free_mp_exit: /* Issue CT request failed */ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "3276 VMID CT request failed Data: x%x\n", cmdcode); return -EIO; }
linux-master
drivers/scsi/lpfc/lpfc_ct.c
/* ******************************************************************************* ** O.S : Linux ** FILE NAME : arcmsr_hba.c ** BY : Nick Cheng, C.L. Huang ** Description: SCSI RAID Device Driver for Areca RAID Controller ******************************************************************************* ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved ** ** Web site: www.areca.com.tw ** E-mail: [email protected] ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License version 2 as ** published by the Free Software Foundation. ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ******************************************************************************* ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************* ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr ** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst ******************************************************************************* */ #include <linux/module.h> #include <linux/reboot.h> #include <linux/spinlock.h> #include <linux/pci_ids.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/circ_buf.h> #include <asm/dma.h> #include <asm/io.h> #include <linux/uaccess.h> #include <scsi/scsi_host.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsicam.h> #include "arcmsr.h" MODULE_AUTHOR("Nick Cheng, C.L. Huang <[email protected]>"); MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(ARCMSR_DRIVER_VERSION); static int msix_enable = 1; module_param(msix_enable, int, S_IRUGO); MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)"); static int msi_enable = 1; module_param(msi_enable, int, S_IRUGO); MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)"); static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD; module_param(host_can_queue, int, S_IRUGO); MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128"); static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN; module_param(cmd_per_lun, int, S_IRUGO); MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32"); static int dma_mask_64 = 0; module_param(dma_mask_64, int, S_IRUGO); MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)"); static int set_date_time = 0; module_param(set_date_time, int, S_IRUGO); MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable"); static int cmd_timeout = ARCMSR_DEFAULT_TIMEOUT; module_param(cmd_timeout, int, S_IRUGO); MODULE_PARM_DESC(cmd_timeout, " scsi cmd timeout(0 ~ 120 sec.), default is 90"); #define ARCMSR_SLEEPTIME 10 #define ARCMSR_RETRYCOUNT 12 static wait_queue_head_t wait_q; static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd); static int arcmsr_iop_confirm(struct AdapterControlBlock *acb); static int arcmsr_abort(struct scsi_cmnd *); static int arcmsr_bus_reset(struct scsi_cmnd *); static int arcmsr_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *info); static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id); static int __maybe_unused arcmsr_suspend(struct device *dev); static int __maybe_unused arcmsr_resume(struct device *dev); static void arcmsr_remove(struct pci_dev *pdev); static void arcmsr_shutdown(struct pci_dev *pdev); static void arcmsr_iop_init(struct AdapterControlBlock *acb); static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb); static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, u32 intmask_org); static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb); static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb); static void arcmsr_request_device_map(struct timer_list *t); static void arcmsr_message_isr_bh_fn(struct work_struct *work); static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb); static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB); static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb); static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb); static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb); static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb); static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); static const char *arcmsr_info(struct Scsi_Host *); static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *); static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb); static void arcmsr_set_iop_datetime(struct timer_list *); static int arcmsr_slave_config(struct scsi_device *sdev); static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) { if (queue_depth > ARCMSR_MAX_CMD_PERLUN) queue_depth = ARCMSR_MAX_CMD_PERLUN; return scsi_change_queue_depth(sdev, queue_depth); } static const struct scsi_host_template arcmsr_scsi_host_template = { .module = THIS_MODULE, .proc_name = ARCMSR_NAME, .name = "Areca SAS/SATA RAID driver", .info = arcmsr_info, .queuecommand = arcmsr_queue_command, .eh_abort_handler = arcmsr_abort, .eh_bus_reset_handler = arcmsr_bus_reset, .bios_param = arcmsr_bios_param, .slave_configure = arcmsr_slave_config, .change_queue_depth = arcmsr_adjust_disk_queue_depth, .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD, .this_id = ARCMSR_SCSI_INITIATOR_ID, .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES, .max_sectors = ARCMSR_MAX_XFER_SECTORS_C, .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN, .shost_groups = arcmsr_host_groups, .no_write_same = 1, }; static struct pci_device_id arcmsr_device_id_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200), .driver_data = ACB_ADAPTER_TYPE_B}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201), .driver_data = ACB_ADAPTER_TYPE_B}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202), .driver_data = ACB_ADAPTER_TYPE_B}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203), .driver_data = ACB_ADAPTER_TYPE_B}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214), .driver_data = ACB_ADAPTER_TYPE_D}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880), .driver_data = ACB_ADAPTER_TYPE_C}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884), .driver_data = ACB_ADAPTER_TYPE_E}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886), .driver_data = ACB_ADAPTER_TYPE_F}, {0, 0}, /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); static SIMPLE_DEV_PM_OPS(arcmsr_pm_ops, arcmsr_suspend, arcmsr_resume); static struct pci_driver arcmsr_pci_driver = { .name = "arcmsr", .id_table = arcmsr_device_id_table, .probe = arcmsr_probe, .remove = arcmsr_remove, .driver.pm = &arcmsr_pm_ops, .shutdown = arcmsr_shutdown, }; /* **************************************************************************** **************************************************************************** */ static void arcmsr_free_io_queue(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_B: case ACB_ADAPTER_TYPE_D: case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size, acb->dma_coherent2, acb->dma_coherent_handle2); break; } } static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) { struct pci_dev *pdev = acb->pdev; switch (acb->adapter_type){ case ACB_ADAPTER_TYPE_A:{ acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0)); if (!acb->pmuA) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } break; } case ACB_ADAPTER_TYPE_B:{ void __iomem *mem_base0, *mem_base1; mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!mem_base0) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); if (!mem_base1) { iounmap(mem_base0); printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } acb->mem_base0 = mem_base0; acb->mem_base1 = mem_base1; break; } case ACB_ADAPTER_TYPE_C:{ acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); if (!acb->pmuC) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/ return true; } break; } case ACB_ADAPTER_TYPE_D: { void __iomem *mem_base0; unsigned long addr, range; addr = (unsigned long)pci_resource_start(pdev, 0); range = pci_resource_len(pdev, 0); mem_base0 = ioremap(addr, range); if (!mem_base0) { pr_notice("arcmsr%d: memory mapping region fail\n", acb->host->host_no); return false; } acb->mem_base0 = mem_base0; break; } case ACB_ADAPTER_TYPE_E: { acb->pmuE = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); if (!acb->pmuE) { pr_notice("arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/ writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */ acb->in_doorbell = 0; acb->out_doorbell = 0; break; } case ACB_ADAPTER_TYPE_F: { acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!acb->pmuF) { pr_notice("arcmsr%d: memory mapping region fail\n", acb->host->host_no); return false; } writel(0, &acb->pmuF->host_int_status); /* clear interrupt */ writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell); acb->in_doorbell = 0; acb->out_doorbell = 0; break; } } return true; } static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: iounmap(acb->pmuA); break; case ACB_ADAPTER_TYPE_B: iounmap(acb->mem_base0); iounmap(acb->mem_base1); break; case ACB_ADAPTER_TYPE_C: iounmap(acb->pmuC); break; case ACB_ADAPTER_TYPE_D: iounmap(acb->mem_base0); break; case ACB_ADAPTER_TYPE_E: iounmap(acb->pmuE); break; case ACB_ADAPTER_TYPE_F: iounmap(acb->pmuF); break; } } static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) { irqreturn_t handle_state; struct AdapterControlBlock *acb = dev_id; handle_state = arcmsr_interrupt(acb); return handle_state; } static int arcmsr_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *geom) { int heads, sectors, cylinders, total_capacity; if (scsi_partsize(bdev, capacity, geom)) return 0; total_capacity = capacity; heads = 64; sectors = 32; cylinders = total_capacity / (heads * sectors); if (cylinders > 1024) { heads = 255; sectors = 63; cylinders = total_capacity / (heads * sectors); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return 0; } static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; int i; for (i = 0; i < 2000; i++) { if (readl(&reg->outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus); return true; } msleep(10); } /* max 20 seconds */ return false; } static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; int i; for (i = 0; i < 2000; i++) { if (readl(reg->iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); return true; } msleep(10); } /* max 20 seconds */ return false; } static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB) { struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; int i; for (i = 0; i < 2000; i++) { if (readl(&phbcmu->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &phbcmu->outbound_doorbell_clear); /*clear interrupt*/ return true; } msleep(10); } /* max 20 seconds */ return false; } static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB) { struct MessageUnit_D *reg = pACB->pmuD; int i; for (i = 0; i < 2000; i++) { if (readl(reg->outbound_doorbell) & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell); return true; } msleep(10); } /* max 20 seconds */ return false; } static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB) { int i; uint32_t read_doorbell; struct MessageUnit_E __iomem *phbcmu = pACB->pmuE; for (i = 0; i < 2000; i++) { read_doorbell = readl(&phbcmu->iobound_doorbell); if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) { writel(0, &phbcmu->host_int_status); /*clear interrupt*/ pACB->in_doorbell = read_doorbell; return true; } msleep(10); } /* max 20 seconds */ return false; } static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; int retry_count = 30; writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0); do { if (arcmsr_hbaA_wait_msgint_ready(acb)) break; else { retry_count--; printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ timeout, retry count down = %d \n", acb->host->host_no, retry_count); } } while (retry_count != 0); } static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; int retry_count = 30; writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell); do { if (arcmsr_hbaB_wait_msgint_ready(acb)) break; else { retry_count--; printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ timeout,retry count down = %d \n", acb->host->host_no, retry_count); } } while (retry_count != 0); } static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB) { struct MessageUnit_C __iomem *reg = pACB->pmuC; int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); do { if (arcmsr_hbaC_wait_msgint_ready(pACB)) { break; } else { retry_count--; printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ timeout,retry count down = %d \n", pACB->host->host_no, retry_count); } } while (retry_count != 0); return; } static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB) { int retry_count = 15; struct MessageUnit_D *reg = pACB->pmuD; writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0); do { if (arcmsr_hbaD_wait_msgint_ready(pACB)) break; retry_count--; pr_notice("arcmsr%d: wait 'flush adapter " "cache' timeout, retry count down = %d\n", pACB->host->host_no, retry_count); } while (retry_count != 0); } static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB) { int retry_count = 30; struct MessageUnit_E __iomem *reg = pACB->pmuE; writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0); pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(pACB->out_doorbell, &reg->iobound_doorbell); do { if (arcmsr_hbaE_wait_msgint_ready(pACB)) break; retry_count--; pr_notice("arcmsr%d: wait 'flush adapter " "cache' timeout, retry count down = %d\n", pACB->host->host_no, retry_count); } while (retry_count != 0); } static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_hbaA_flush_cache(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_hbaB_flush_cache(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_hbaC_flush_cache(acb); break; case ACB_ADAPTER_TYPE_D: arcmsr_hbaD_flush_cache(acb); break; case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: arcmsr_hbaE_flush_cache(acb); break; } } static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) { reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203); reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203); reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203); reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203); } else { reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL); reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK); reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL); reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK); } reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER); reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER); reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER); } static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb) { struct MessageUnit_D *reg = acb->pmuD; reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID); reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION); reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK); reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET); reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST); reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS); reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE); reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0); reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1); reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0); reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1); reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL); reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL); reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE); reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW); reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH); reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER); reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW); reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH); reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER); reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER); reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE); reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE); reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER); reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER); reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER); } static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb) { dma_addr_t host_buffer_dma; struct MessageUnit_F __iomem *pmuF; memset(acb->dma_coherent2, 0xff, acb->completeQ_size); acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 + acb->completeQ_size, 4); acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100; acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200; memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE); host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4); pmuF = acb->pmuF; /* host buffer low address, bit0:1 all buffer active */ writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0); /* host buffer high address */ writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1); /* set host buffer physical address */ writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell); } static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) { bool rtn = true; void *dma_coherent; dma_addr_t dma_coherent_handle; struct pci_dev *pdev = acb->pdev; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_B: { acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32); dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, &dma_coherent_handle, GFP_KERNEL); if (!dma_coherent) { pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); return false; } acb->dma_coherent_handle2 = dma_coherent_handle; acb->dma_coherent2 = dma_coherent; acb->pmuB = (struct MessageUnit_B *)dma_coherent; arcmsr_hbaB_assign_regAddr(acb); } break; case ACB_ADAPTER_TYPE_D: { acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32); dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, &dma_coherent_handle, GFP_KERNEL); if (!dma_coherent) { pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); return false; } acb->dma_coherent_handle2 = dma_coherent_handle; acb->dma_coherent2 = dma_coherent; acb->pmuD = (struct MessageUnit_D *)dma_coherent; arcmsr_hbaD_assign_regAddr(acb); } break; case ACB_ADAPTER_TYPE_E: { uint32_t completeQ_size; completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; acb->ioqueue_size = roundup(completeQ_size, 32); dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, &dma_coherent_handle, GFP_KERNEL); if (!dma_coherent){ pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); return false; } acb->dma_coherent_handle2 = dma_coherent_handle; acb->dma_coherent2 = dma_coherent; acb->pCompletionQ = dma_coherent; acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ); acb->doneq_index = 0; } break; case ACB_ADAPTER_TYPE_F: { uint32_t QueueDepth; uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32}; arcmsr_wait_firmware_ready(acb); QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7]; acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128; acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32); dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, &dma_coherent_handle, GFP_KERNEL); if (!dma_coherent) { pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); return false; } acb->dma_coherent_handle2 = dma_coherent_handle; acb->dma_coherent2 = dma_coherent; acb->pCompletionQ = dma_coherent; acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ); acb->doneq_index = 0; arcmsr_hbaF_assign_regAddr(acb); } break; default: break; } return rtn; } static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) { struct pci_dev *pdev = acb->pdev; void *dma_coherent; dma_addr_t dma_coherent_handle; struct CommandControlBlock *ccb_tmp; int i = 0, j = 0; unsigned long cdb_phyaddr, next_ccb_phy; unsigned long roundup_ccbsize; unsigned long max_xfer_len; unsigned long max_sg_entrys; uint32_t firm_config_version, curr_phy_upper32; for (i = 0; i < ARCMSR_MAX_TARGETID; i++) for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) acb->devstate[i][j] = ARECA_RAID_GONE; max_xfer_len = ARCMSR_MAX_XFER_LEN; max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES; firm_config_version = acb->firm_cfg_version; if((firm_config_version & 0xFF) >= 3){ max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */ max_sg_entrys = (max_xfer_len/4096); } acb->host->max_sectors = max_xfer_len/512; acb->host->sg_tablesize = max_sg_entrys; roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB; if (acb->adapter_type != ACB_ADAPTER_TYPE_F) acb->uncache_size += acb->ioqueue_size; dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); if(!dma_coherent){ printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no); return -ENOMEM; } acb->dma_coherent = dma_coherent; acb->dma_coherent_handle = dma_coherent_handle; memset(dma_coherent, 0, acb->uncache_size); acb->ccbsize = roundup_ccbsize; ccb_tmp = dma_coherent; curr_phy_upper32 = upper_32_bits(dma_coherent_handle); acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; for(i = 0; i < acb->maxFreeCCB; i++){ cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_B: ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5; break; case ACB_ADAPTER_TYPE_C: case ACB_ADAPTER_TYPE_D: case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: ccb_tmp->cdb_phyaddr = cdb_phyaddr; break; } acb->pccb_pool[i] = ccb_tmp; ccb_tmp->acb = acb; ccb_tmp->smid = (u32)i << 16; INIT_LIST_HEAD(&ccb_tmp->list); next_ccb_phy = dma_coherent_handle + roundup_ccbsize; if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) { acb->maxFreeCCB = i; acb->host->can_queue = i; break; } else list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize); dma_coherent_handle = next_ccb_phy; } if (acb->adapter_type != ACB_ADAPTER_TYPE_F) { acb->dma_coherent_handle2 = dma_coherent_handle; acb->dma_coherent2 = ccb_tmp; } switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_B: acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2; arcmsr_hbaB_assign_regAddr(acb); break; case ACB_ADAPTER_TYPE_D: acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2; arcmsr_hbaD_assign_regAddr(acb); break; case ACB_ADAPTER_TYPE_E: acb->pCompletionQ = acb->dma_coherent2; acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ); acb->doneq_index = 0; break; } return 0; } static void arcmsr_message_isr_bh_fn(struct work_struct *work) { struct AdapterControlBlock *acb = container_of(work, struct AdapterControlBlock, arcmsr_do_message_isr_bh); char *acb_dev_map = (char *)acb->device_map; uint32_t __iomem *signature = NULL; char __iomem *devicemap = NULL; int target, lun; struct scsi_device *psdev; char diff, temp; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]); devicemap = (char __iomem *)(&reg->message_rwbuffer[21]); break; } case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]); devicemap = (char __iomem *)(&reg->message_rwbuffer[21]); break; } case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]); devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]); break; } case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *reg = acb->pmuD; signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]); devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]); break; } case ACB_ADAPTER_TYPE_E: { struct MessageUnit_E __iomem *reg = acb->pmuE; signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]); devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]); break; } case ACB_ADAPTER_TYPE_F: { signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]); devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]); break; } } if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG) return; for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { temp = readb(devicemap); diff = (*acb_dev_map) ^ temp; if (diff != 0) { *acb_dev_map = temp; for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { if ((diff & 0x01) == 1 && (temp & 0x01) == 1) { scsi_add_device(acb->host, 0, target, lun); } else if ((diff & 0x01) == 1 && (temp & 0x01) == 0) { psdev = scsi_device_lookup(acb->host, 0, target, lun); if (psdev != NULL) { scsi_remove_device(psdev); scsi_device_put(psdev); } } temp >>= 1; diff >>= 1; } } devicemap++; acb_dev_map++; } acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG; } static int arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb) { unsigned long flags; int nvec, i; if (msix_enable == 0) goto msi_int0; nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS, PCI_IRQ_MSIX); if (nvec > 0) { pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no); flags = 0; } else { msi_int0: if (msi_enable == 1) { nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); if (nvec == 1) { dev_info(&pdev->dev, "msi enabled\n"); goto msi_int1; } } nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY); if (nvec < 1) return FAILED; msi_int1: flags = IRQF_SHARED; } acb->vector_count = nvec; for (i = 0; i < nvec; i++) { if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt, flags, "arcmsr", acb)) { pr_warn("arcmsr%d: request_irq =%d failed!\n", acb->host->host_no, pci_irq_vector(pdev, i)); goto out_free_irq; } } return SUCCESS; out_free_irq: while (--i >= 0) free_irq(pci_irq_vector(pdev, i), acb); pci_free_irq_vectors(pdev); return FAILED; } static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb) { INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); pacb->fw_flag = FW_NORMAL; timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0); pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ); add_timer(&pacb->eternal_timer); } static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb) { timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0); pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000); add_timer(&pacb->refresh_timer); } static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb) { struct pci_dev *pcidev = acb->pdev; if (IS_DMA64) { if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) || dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64))) goto dma32; if (acb->adapter_type <= ACB_ADAPTER_TYPE_B) return 0; if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) || dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) { printk("arcmsr: set DMA 64 mask failed\n"); return -ENXIO; } } else { dma32: if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) || dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) || dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) { printk("arcmsr: set DMA 32-bit mask failed\n"); return -ENXIO; } } return 0; } static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *host; struct AdapterControlBlock *acb; uint8_t bus,dev_fun; int error; error = pci_enable_device(pdev); if(error){ return -ENODEV; } host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock)); if(!host){ goto pci_disable_dev; } init_waitqueue_head(&wait_q); bus = pdev->bus->number; dev_fun = pdev->devfn; acb = (struct AdapterControlBlock *) host->hostdata; memset(acb,0,sizeof(struct AdapterControlBlock)); acb->pdev = pdev; acb->adapter_type = id->driver_data; if (arcmsr_set_dma_mask(acb)) goto scsi_host_release; acb->host = host; host->max_lun = ARCMSR_MAX_TARGETLUN; host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/ host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/ if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD)) host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD; host->can_queue = host_can_queue; /* max simultaneous cmds */ if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN)) cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN; host->cmd_per_lun = cmd_per_lun; host->this_id = ARCMSR_SCSI_INITIATOR_ID; host->unique_id = (bus << 8) | dev_fun; pci_set_drvdata(pdev, host); pci_set_master(pdev); error = pci_request_regions(pdev, "arcmsr"); if(error){ goto scsi_host_release; } spin_lock_init(&acb->eh_lock); spin_lock_init(&acb->ccblist_lock); spin_lock_init(&acb->postq_lock); spin_lock_init(&acb->doneq_lock); spin_lock_init(&acb->rqbuffer_lock); spin_lock_init(&acb->wqbuffer_lock); acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; INIT_LIST_HEAD(&acb->ccb_free_list); error = arcmsr_remap_pciregion(acb); if(!error){ goto pci_release_regs; } error = arcmsr_alloc_io_queue(acb); if (!error) goto unmap_pci_region; error = arcmsr_get_firmware_spec(acb); if(!error){ goto free_hbb_mu; } if (acb->adapter_type != ACB_ADAPTER_TYPE_F) arcmsr_free_io_queue(acb); error = arcmsr_alloc_ccb_pool(acb); if(error){ goto unmap_pci_region; } error = scsi_add_host(host, &pdev->dev); if(error){ goto free_ccb_pool; } if (arcmsr_request_irq(pdev, acb) == FAILED) goto scsi_host_remove; arcmsr_iop_init(acb); arcmsr_init_get_devmap_timer(acb); if (set_date_time) arcmsr_init_set_datetime_timer(acb); if(arcmsr_alloc_sysfs_attr(acb)) goto out_free_sysfs; scsi_scan_host(host); return 0; out_free_sysfs: if (set_date_time) del_timer_sync(&acb->refresh_timer); del_timer_sync(&acb->eternal_timer); flush_work(&acb->arcmsr_do_message_isr_bh); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); arcmsr_free_irq(pdev, acb); scsi_host_remove: scsi_remove_host(host); free_ccb_pool: arcmsr_free_ccb_pool(acb); goto unmap_pci_region; free_hbb_mu: arcmsr_free_io_queue(acb); unmap_pci_region: arcmsr_unmap_pciregion(acb); pci_release_regs: pci_release_regions(pdev); scsi_host_release: scsi_host_put(host); pci_disable_dev: pci_disable_device(pdev); return -ENODEV; } static void arcmsr_free_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb) { int i; for (i = 0; i < acb->vector_count; i++) free_irq(pci_irq_vector(pdev, i), acb); pci_free_irq_vectors(pdev); } static int __maybe_unused arcmsr_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct Scsi_Host *host = pci_get_drvdata(pdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata; arcmsr_disable_outbound_ints(acb); arcmsr_free_irq(pdev, acb); del_timer_sync(&acb->eternal_timer); if (set_date_time) del_timer_sync(&acb->refresh_timer); flush_work(&acb->arcmsr_do_message_isr_bh); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); return 0; } static int __maybe_unused arcmsr_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct Scsi_Host *host = pci_get_drvdata(pdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata; if (arcmsr_set_dma_mask(acb)) goto controller_unregister; if (arcmsr_request_irq(pdev, acb) == FAILED) goto controller_stop; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; uint32_t i; for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { reg->post_qbuffer[i] = 0; reg->done_qbuffer[i] = 0; } reg->postq_index = 0; reg->doneq_index = 0; break; } case ACB_ADAPTER_TYPE_E: writel(0, &acb->pmuE->host_int_status); writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); acb->in_doorbell = 0; acb->out_doorbell = 0; acb->doneq_index = 0; break; case ACB_ADAPTER_TYPE_F: writel(0, &acb->pmuF->host_int_status); writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell); acb->in_doorbell = 0; acb->out_doorbell = 0; acb->doneq_index = 0; arcmsr_hbaF_assign_regAddr(acb); break; } arcmsr_iop_init(acb); arcmsr_init_get_devmap_timer(acb); if (set_date_time) arcmsr_init_set_datetime_timer(acb); return 0; controller_stop: arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); controller_unregister: scsi_remove_host(host); arcmsr_free_ccb_pool(acb); if (acb->adapter_type == ACB_ADAPTER_TYPE_F) arcmsr_free_io_queue(acb); arcmsr_unmap_pciregion(acb); scsi_host_put(host); return -ENODEV; } static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0); if (!arcmsr_hbaA_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'abort all outstanding command' timeout\n" , acb->host->host_no); return false; } return true; } static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell); if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'abort all outstanding command' timeout\n" , acb->host->host_no); return false; } return true; } static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB) { struct MessageUnit_C __iomem *reg = pACB->pmuC; writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { printk(KERN_NOTICE "arcmsr%d: wait 'abort all outstanding command' timeout\n" , pACB->host->host_no); return false; } return true; } static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB) { struct MessageUnit_D *reg = pACB->pmuD; writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0); if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { pr_notice("arcmsr%d: wait 'abort all outstanding " "command' timeout\n", pACB->host->host_no); return false; } return true; } static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB) { struct MessageUnit_E __iomem *reg = pACB->pmuE; writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0); pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(pACB->out_doorbell, &reg->iobound_doorbell); if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { pr_notice("arcmsr%d: wait 'abort all outstanding " "command' timeout\n", pACB->host->host_no); return false; } return true; } static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) { uint8_t rtnval = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: rtnval = arcmsr_hbaA_abort_allcmd(acb); break; case ACB_ADAPTER_TYPE_B: rtnval = arcmsr_hbaB_abort_allcmd(acb); break; case ACB_ADAPTER_TYPE_C: rtnval = arcmsr_hbaC_abort_allcmd(acb); break; case ACB_ADAPTER_TYPE_D: rtnval = arcmsr_hbaD_abort_allcmd(acb); break; case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: rtnval = arcmsr_hbaE_abort_allcmd(acb); break; } return rtnval; } static void arcmsr_ccb_complete(struct CommandControlBlock *ccb) { struct AdapterControlBlock *acb = ccb->acb; struct scsi_cmnd *pcmd = ccb->pcmd; unsigned long flags; atomic_dec(&acb->ccboutstandingcount); scsi_dma_unmap(ccb->pcmd); ccb->startdone = ARCMSR_CCB_DONE; spin_lock_irqsave(&acb->ccblist_lock, flags); list_add_tail(&ccb->list, &acb->ccb_free_list); spin_unlock_irqrestore(&acb->ccblist_lock, flags); scsi_done(pcmd); } static void arcmsr_report_sense_info(struct CommandControlBlock *ccb) { struct scsi_cmnd *pcmd = ccb->pcmd; pcmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION; if (pcmd->sense_buffer) { struct SENSE_DATA *sensebuffer; memcpy_and_pad(pcmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, ccb->arcmsr_cdb.SenseData, sizeof(ccb->arcmsr_cdb.SenseData), 0); sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer; sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; sensebuffer->Valid = 1; } } static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) { u32 orig_mask = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A : { struct MessageUnit_A __iomem *reg = acb->pmuA; orig_mask = readl(&reg->outbound_intmask); writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ &reg->outbound_intmask); } break; case ACB_ADAPTER_TYPE_B : { struct MessageUnit_B *reg = acb->pmuB; orig_mask = readl(reg->iop2drv_doorbell_mask); writel(0, reg->iop2drv_doorbell_mask); } break; case ACB_ADAPTER_TYPE_C:{ struct MessageUnit_C __iomem *reg = acb->pmuC; /* disable all outbound interrupt */ orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */ writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask); } break; case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *reg = acb->pmuD; /* disable all outbound interrupt */ writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable); } break; case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: { struct MessageUnit_E __iomem *reg = acb->pmuE; orig_mask = readl(&reg->host_int_mask); writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, &reg->host_int_mask); readl(&reg->host_int_mask); /* Dummy readl to force pci flush */ } break; } return orig_mask; } static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb, bool error) { uint8_t id, lun; id = ccb->pcmd->device->id; lun = ccb->pcmd->device->lun; if (!error) { if (acb->devstate[id][lun] == ARECA_RAID_GONE) acb->devstate[id][lun] = ARECA_RAID_GOOD; ccb->pcmd->result = DID_OK << 16; arcmsr_ccb_complete(ccb); }else{ switch (ccb->arcmsr_cdb.DeviceStatus) { case ARCMSR_DEV_SELECT_TIMEOUT: { acb->devstate[id][lun] = ARECA_RAID_GONE; ccb->pcmd->result = DID_NO_CONNECT << 16; arcmsr_ccb_complete(ccb); } break; case ARCMSR_DEV_ABORTED: case ARCMSR_DEV_INIT_FAIL: { acb->devstate[id][lun] = ARECA_RAID_GONE; ccb->pcmd->result = DID_BAD_TARGET << 16; arcmsr_ccb_complete(ccb); } break; case ARCMSR_DEV_CHECK_CONDITION: { acb->devstate[id][lun] = ARECA_RAID_GOOD; arcmsr_report_sense_info(ccb); arcmsr_ccb_complete(ccb); } break; default: printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d isr get command error done, \ but got unknown DeviceStatus = 0x%x \n" , acb->host->host_no , id , lun , ccb->arcmsr_cdb.DeviceStatus); acb->devstate[id][lun] = ARECA_RAID_GONE; ccb->pcmd->result = DID_NO_CONNECT << 16; arcmsr_ccb_complete(ccb); break; } } } static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) { if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { if (pCCB->startdone == ARCMSR_CCB_ABORTED) { struct scsi_cmnd *abortcmd = pCCB->pcmd; if (abortcmd) { abortcmd->result |= DID_ABORT << 16; arcmsr_ccb_complete(pCCB); printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n", acb->host->host_no, pCCB); } return; } printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \ done acb = '0x%p'" "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x" " ccboutstandingcount = %d \n" , acb->host->host_no , acb , pCCB , pCCB->acb , pCCB->startdone , atomic_read(&acb->ccboutstandingcount)); return; } arcmsr_report_ccb_state(acb, pCCB, error); } static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) { int i = 0; uint32_t flag_ccb; struct ARCMSR_CDB *pARCMSR_CDB; bool error; struct CommandControlBlock *pCCB; unsigned long ccb_cdb_phy; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; uint32_t outbound_intstatus; outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable; /*clear and abort all outbound posted Q*/ writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/ while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) && (i++ < acb->maxOutstanding)) { ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; if (acb->cdb_phyadd_hipart) ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); } } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; /*clear all outbound posted Q*/ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */ for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { flag_ccb = reg->done_qbuffer[i]; if (flag_ccb != 0) { reg->done_qbuffer[i] = 0; ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; if (acb->cdb_phyadd_hipart) ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); } reg->post_qbuffer[i] = 0; } reg->doneq_index = 0; reg->postq_index = 0; } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) { /*need to do*/ flag_ccb = readl(&reg->outbound_queueport_low); ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); if (acb->cdb_phyadd_hipart) ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); } } break; case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *pmu = acb->pmuD; uint32_t outbound_write_pointer; uint32_t doneq_index, index_stripped, addressLow, residual, toggle; unsigned long flags; residual = atomic_read(&acb->ccboutstandingcount); for (i = 0; i < residual; i++) { spin_lock_irqsave(&acb->doneq_lock, flags); outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; doneq_index = pmu->doneq_index; if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) { toggle = doneq_index & 0x4000; index_stripped = (doneq_index & 0xFFF) + 1; index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; pmu->doneq_index = index_stripped ? (index_stripped | toggle) : ((toggle ^ 0x4000) + 1); doneq_index = pmu->doneq_index; spin_unlock_irqrestore(&acb->doneq_lock, flags); addressLow = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; ccb_cdb_phy = (addressLow & 0xFFFFFFF0); if (acb->cdb_phyadd_hipart) ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; pARCMSR_CDB = (struct ARCMSR_CDB *) (acb->vir2phy_offset + ccb_cdb_phy); pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); writel(doneq_index, pmu->outboundlist_read_pointer); } else { spin_unlock_irqrestore(&acb->doneq_lock, flags); mdelay(10); } } pmu->postq_index = 0; pmu->doneq_index = 0x40FF; } break; case ACB_ADAPTER_TYPE_E: arcmsr_hbaE_postqueue_isr(acb); break; case ACB_ADAPTER_TYPE_F: arcmsr_hbaF_postqueue_isr(acb); break; } } static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb) { char *acb_dev_map = (char *)acb->device_map; int target, lun, i; struct scsi_device *psdev; struct CommandControlBlock *ccb; char temp; for (i = 0; i < acb->maxFreeCCB; i++) { ccb = acb->pccb_pool[i]; if (ccb->startdone == ARCMSR_CCB_START) { ccb->pcmd->result = DID_NO_CONNECT << 16; scsi_dma_unmap(ccb->pcmd); scsi_done(ccb->pcmd); } } for (target = 0; target < ARCMSR_MAX_TARGETID; target++) { temp = *acb_dev_map; if (temp) { for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { if (temp & 1) { psdev = scsi_device_lookup(acb->host, 0, target, lun); if (psdev != NULL) { scsi_remove_device(psdev); scsi_device_put(psdev); } } temp >>= 1; } *acb_dev_map = 0; } acb_dev_map++; } } static void arcmsr_free_pcidev(struct AdapterControlBlock *acb) { struct pci_dev *pdev; struct Scsi_Host *host; host = acb->host; arcmsr_free_sysfs_attr(acb); scsi_remove_host(host); flush_work(&acb->arcmsr_do_message_isr_bh); del_timer_sync(&acb->eternal_timer); if (set_date_time) del_timer_sync(&acb->refresh_timer); pdev = acb->pdev; arcmsr_free_irq(pdev, acb); arcmsr_free_ccb_pool(acb); if (acb->adapter_type == ACB_ADAPTER_TYPE_F) arcmsr_free_io_queue(acb); arcmsr_unmap_pciregion(acb); pci_release_regions(pdev); scsi_host_put(host); pci_disable_device(pdev); } static void arcmsr_remove(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; int poll_count = 0; uint16_t dev_id; pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id); if (dev_id == 0xffff) { acb->acb_flags &= ~ACB_F_IOP_INITED; acb->acb_flags |= ACB_F_ADAPTER_REMOVED; arcmsr_remove_scsi_devices(acb); arcmsr_free_pcidev(acb); return; } arcmsr_free_sysfs_attr(acb); scsi_remove_host(host); flush_work(&acb->arcmsr_do_message_isr_bh); del_timer_sync(&acb->eternal_timer); if (set_date_time) del_timer_sync(&acb->refresh_timer); arcmsr_disable_outbound_ints(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); acb->acb_flags |= ACB_F_SCSISTOPADAPTER; acb->acb_flags &= ~ACB_F_IOP_INITED; for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){ if (!atomic_read(&acb->ccboutstandingcount)) break; arcmsr_interrupt(acb);/* FIXME: need spinlock */ msleep(25); } if (atomic_read(&acb->ccboutstandingcount)) { int i; arcmsr_abort_allcmd(acb); arcmsr_done4abort_postqueue(acb); for (i = 0; i < acb->maxFreeCCB; i++) { struct CommandControlBlock *ccb = acb->pccb_pool[i]; if (ccb->startdone == ARCMSR_CCB_START) { ccb->startdone = ARCMSR_CCB_ABORTED; ccb->pcmd->result = DID_ABORT << 16; arcmsr_ccb_complete(ccb); } } } arcmsr_free_irq(pdev, acb); arcmsr_free_ccb_pool(acb); if (acb->adapter_type == ACB_ADAPTER_TYPE_F) arcmsr_free_io_queue(acb); arcmsr_unmap_pciregion(acb); pci_release_regions(pdev); scsi_host_put(host); pci_disable_device(pdev); } static void arcmsr_shutdown(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata; if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) return; del_timer_sync(&acb->eternal_timer); if (set_date_time) del_timer_sync(&acb->refresh_timer); arcmsr_disable_outbound_ints(acb); arcmsr_free_irq(pdev, acb); flush_work(&acb->arcmsr_do_message_isr_bh); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); } static int __init arcmsr_module_init(void) { int error = 0; error = pci_register_driver(&arcmsr_pci_driver); return error; } static void __exit arcmsr_module_exit(void) { pci_unregister_driver(&arcmsr_pci_driver); } module_init(arcmsr_module_init); module_exit(arcmsr_module_exit); static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, u32 intmask_org) { u32 mask; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE| ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); writel(mask, &reg->outbound_intmask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); writel(mask, reg->iop2drv_doorbell_mask); acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); writel(intmask_org & mask, &reg->host_int_mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *reg = acb->pmuD; mask = ARCMSR_ARC1214_ALL_INT_ENABLE; writel(intmask_org | mask, reg->pcief0_int_enable); break; } case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: { struct MessageUnit_E __iomem *reg = acb->pmuE; mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR); writel(intmask_org & mask, &reg->host_int_mask); break; } } } static int arcmsr_build_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd) { struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; int8_t *psge = (int8_t *)&arcmsr_cdb->u; __le32 address_lo, address_hi; int arccdbsize = 0x30; __le32 length = 0; int i; struct scatterlist *sg; int nseg; ccb->pcmd = pcmd; memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); arcmsr_cdb->TargetID = pcmd->device->id; arcmsr_cdb->LUN = pcmd->device->lun; arcmsr_cdb->Function = 1; arcmsr_cdb->msgContext = 0; memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); nseg = scsi_dma_map(pcmd); if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0)) return FAILED; scsi_for_each_sg(pcmd, sg, nseg, i) { /* Get the physical address of the current data pointer */ length = cpu_to_le32(sg_dma_len(sg)); address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg))); address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg))); if (address_hi == 0) { struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; pdma_sg->address = address_lo; pdma_sg->length = length; psge += sizeof (struct SG32ENTRY); arccdbsize += sizeof (struct SG32ENTRY); } else { struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; pdma_sg->addresshigh = address_hi; pdma_sg->address = address_lo; pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR); psge += sizeof (struct SG64ENTRY); arccdbsize += sizeof (struct SG64ENTRY); } } arcmsr_cdb->sgcount = (uint8_t)nseg; arcmsr_cdb->DataLength = scsi_bufflen(pcmd); arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0); if ( arccdbsize > 256) arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; if (pcmd->sc_data_direction == DMA_TO_DEVICE) arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; ccb->arc_cdb_size = arccdbsize; return SUCCESS; } static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) { uint32_t cdb_phyaddr = ccb->cdb_phyaddr; struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; atomic_inc(&acb->ccboutstandingcount); ccb->startdone = ARCMSR_CCB_START; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, &reg->inbound_queueport); else writel(cdb_phyaddr, &reg->inbound_queueport); break; } case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; uint32_t ending_index, index = reg->postq_index; ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); reg->post_qbuffer[ending_index] = 0; if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { reg->post_qbuffer[index] = cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE; } else { reg->post_qbuffer[index] = cdb_phyaddr; } index++; index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */ reg->postq_index = index; writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell); } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *phbcmu = acb->pmuC; uint32_t ccb_post_stamp, arc_cdb_size; arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1); writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high); writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); } break; case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *pmu = acb->pmuD; u16 index_stripped; u16 postq_index, toggle; unsigned long flags; struct InBound_SRB *pinbound_srb; spin_lock_irqsave(&acb->postq_lock, flags); postq_index = pmu->postq_index; pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]); pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr); pinbound_srb->addressLow = cdb_phyaddr; pinbound_srb->length = ccb->arc_cdb_size >> 2; arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr); toggle = postq_index & 0x4000; index_stripped = postq_index + 1; index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1); pmu->postq_index = index_stripped ? (index_stripped | toggle) : (toggle ^ 0x4000); writel(postq_index, pmu->inboundlist_write_pointer); spin_unlock_irqrestore(&acb->postq_lock, flags); break; } case ACB_ADAPTER_TYPE_E: { struct MessageUnit_E __iomem *pmu = acb->pmuE; u32 ccb_post_stamp, arc_cdb_size; arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6)); writel(0, &pmu->inbound_queueport_high); writel(ccb_post_stamp, &pmu->inbound_queueport_low); break; } case ACB_ADAPTER_TYPE_F: { struct MessageUnit_F __iomem *pmu = acb->pmuF; u32 ccb_post_stamp, arc_cdb_size; if (ccb->arc_cdb_size <= 0x300) arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1; else { arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2; if (arc_cdb_size > 0xF) arc_cdb_size = 0xF; arc_cdb_size = (arc_cdb_size << 1) | 1; } ccb_post_stamp = (ccb->smid | arc_cdb_size); writel(0, &pmu->inbound_queueport_high); writel(ccb_post_stamp, &pmu->inbound_queueport_low); break; } } } static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0); if (!arcmsr_hbaA_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'stop adapter background rebuild' timeout\n" , acb->host->host_no); } } static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell); if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'stop adapter background rebuild' timeout\n" , acb->host->host_no); } } static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB) { struct MessageUnit_C __iomem *reg = pACB->pmuC; pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { printk(KERN_NOTICE "arcmsr%d: wait 'stop adapter background rebuild' timeout\n" , pACB->host->host_no); } return; } static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB) { struct MessageUnit_D *reg = pACB->pmuD; pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0); if (!arcmsr_hbaD_wait_msgint_ready(pACB)) pr_notice("arcmsr%d: wait 'stop adapter background rebuild' " "timeout\n", pACB->host->host_no); } static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB) { struct MessageUnit_E __iomem *reg = pACB->pmuE; pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0); pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(pACB->out_doorbell, &reg->iobound_doorbell); if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { pr_notice("arcmsr%d: wait 'stop adapter background rebuild' " "timeout\n", pACB->host->host_no); } } static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_hbaA_stop_bgrb(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_hbaB_stop_bgrb(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_hbaC_stop_bgrb(acb); break; case ACB_ADAPTER_TYPE_D: arcmsr_hbaD_stop_bgrb(acb); break; case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: arcmsr_hbaE_stop_bgrb(acb); break; } } static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) { dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle); } static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell); } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell); } break; case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *reg = acb->pmuD; writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, reg->inbound_doorbell); } break; case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: { struct MessageUnit_E __iomem *reg = acb->pmuE; acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; writel(acb->out_doorbell, &reg->iobound_doorbell); } break; } } static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell); } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell); } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell); } break; case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *reg = acb->pmuD; writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY, reg->inbound_doorbell); } break; case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: { struct MessageUnit_E __iomem *reg = acb->pmuE; acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK; writel(acb->out_doorbell, &reg->iobound_doorbell); } break; } } struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) { struct QBUFFER __iomem *qbuffer = NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *phbcmu = acb->pmuC; qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer; } break; case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *reg = acb->pmuD; qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; } break; case ACB_ADAPTER_TYPE_E: { struct MessageUnit_E __iomem *reg = acb->pmuE; qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer; } break; case ACB_ADAPTER_TYPE_F: { qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer; } break; } return qbuffer; } static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) { struct QBUFFER __iomem *pqbuffer = NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer; } break; case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *reg = acb->pmuD; pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; } break; case ACB_ADAPTER_TYPE_E: { struct MessageUnit_E __iomem *reg = acb->pmuE; pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer; } break; case ACB_ADAPTER_TYPE_F: pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer; break; } return pqbuffer; } static uint32_t arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb, struct QBUFFER __iomem *prbuffer) { uint8_t *pQbuffer; uint8_t *buf1 = NULL; uint32_t __iomem *iop_data; uint32_t iop_len, data_len, *buf2 = NULL; iop_data = (uint32_t __iomem *)prbuffer->data; iop_len = readl(&prbuffer->data_len); if (iop_len > 0) { buf1 = kmalloc(128, GFP_ATOMIC); buf2 = (uint32_t *)buf1; if (buf1 == NULL) return 0; data_len = iop_len; while (data_len >= 4) { *buf2++ = readl(iop_data); iop_data++; data_len -= 4; } if (data_len) *buf2 = readl(iop_data); buf2 = (uint32_t *)buf1; } while (iop_len > 0) { pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; *pQbuffer = *buf1; acb->rqbuf_putIndex++; /* if last, index number set it to 0 */ acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; buf1++; iop_len--; } kfree(buf2); /* let IOP know data has been read */ arcmsr_iop_message_read(acb); return 1; } uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER __iomem *prbuffer) { uint8_t *pQbuffer; uint8_t __iomem *iop_data; uint32_t iop_len; if (acb->adapter_type > ACB_ADAPTER_TYPE_B) return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer); iop_data = (uint8_t __iomem *)prbuffer->data; iop_len = readl(&prbuffer->data_len); while (iop_len > 0) { pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; *pQbuffer = readb(iop_data); acb->rqbuf_putIndex++; acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; iop_data++; iop_len--; } arcmsr_iop_message_read(acb); return 1; } static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) { unsigned long flags; struct QBUFFER __iomem *prbuffer; int32_t buf_empty_len; spin_lock_irqsave(&acb->rqbuffer_lock, flags); prbuffer = arcmsr_get_iop_rqbuffer(acb); if (acb->rqbuf_putIndex >= acb->rqbuf_getIndex) { buf_empty_len = (ARCMSR_MAX_QBUFFER - 1) - (acb->rqbuf_putIndex - acb->rqbuf_getIndex); } else buf_empty_len = acb->rqbuf_getIndex - acb->rqbuf_putIndex - 1; if (buf_empty_len >= readl(&prbuffer->data_len)) { if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } else acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); } static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb) { uint8_t *pQbuffer; struct QBUFFER __iomem *pwbuffer; uint8_t *buf1 = NULL; uint32_t __iomem *iop_data; uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data; if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { buf1 = kmalloc(128, GFP_ATOMIC); buf2 = (uint32_t *)buf1; if (buf1 == NULL) return; acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (uint32_t __iomem *)pwbuffer->data; while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex) && (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex]; *buf1 = *pQbuffer; acb->wqbuf_getIndex++; acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER; buf1++; allxfer_len++; } data_len = allxfer_len; buf1 = (uint8_t *)buf2; while (data_len >= 4) { data = *buf2++; writel(data, iop_data); iop_data++; data_len -= 4; } if (data_len) { data = *buf2; writel(data, iop_data); } writel(allxfer_len, &pwbuffer->data_len); kfree(buf1); arcmsr_iop_message_wrote(acb); } } void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb) { uint8_t *pQbuffer; struct QBUFFER __iomem *pwbuffer; uint8_t __iomem *iop_data; int32_t allxfer_len = 0; if (acb->adapter_type > ACB_ADAPTER_TYPE_B) { arcmsr_write_ioctldata2iop_in_DWORD(acb); return; } if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (uint8_t __iomem *)pwbuffer->data; while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex) && (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex]; writeb(*pQbuffer, iop_data); acb->wqbuf_getIndex++; acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER; iop_data++; allxfer_len++; } writel(allxfer_len, &pwbuffer->data_len); arcmsr_iop_message_wrote(acb); } } static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) { unsigned long flags; spin_lock_irqsave(&acb->wqbuffer_lock, flags); acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; if (acb->wqbuf_getIndex != acb->wqbuf_putIndex) arcmsr_write_ioctldata2iop(acb); if (acb->wqbuf_getIndex == acb->wqbuf_putIndex) acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); } static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb) { uint32_t outbound_doorbell; struct MessageUnit_A __iomem *reg = acb->pmuA; outbound_doorbell = readl(&reg->outbound_doorbell); do { writel(outbound_doorbell, &reg->outbound_doorbell); if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) arcmsr_iop2drv_data_wrote_handle(acb); if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) arcmsr_iop2drv_data_read_handle(acb); outbound_doorbell = readl(&reg->outbound_doorbell); } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)); } static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB) { uint32_t outbound_doorbell; struct MessageUnit_C __iomem *reg = pACB->pmuC; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ outbound_doorbell = readl(&reg->outbound_doorbell); do { writel(outbound_doorbell, &reg->outbound_doorbell_clear); readl(&reg->outbound_doorbell_clear); if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) arcmsr_iop2drv_data_wrote_handle(pACB); if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) arcmsr_iop2drv_data_read_handle(pACB); if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) arcmsr_hbaC_message_isr(pACB); outbound_doorbell = readl(&reg->outbound_doorbell); } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)); } static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB) { uint32_t outbound_doorbell; struct MessageUnit_D *pmu = pACB->pmuD; outbound_doorbell = readl(pmu->outbound_doorbell); do { writel(outbound_doorbell, pmu->outbound_doorbell); if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) arcmsr_hbaD_message_isr(pACB); if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) arcmsr_iop2drv_data_wrote_handle(pACB); if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK) arcmsr_iop2drv_data_read_handle(pACB); outbound_doorbell = readl(pmu->outbound_doorbell); } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)); } static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB) { uint32_t outbound_doorbell, in_doorbell, tmp, i; struct MessageUnit_E __iomem *reg = pACB->pmuE; if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) { for (i = 0; i < 5; i++) { in_doorbell = readl(&reg->iobound_doorbell); if (in_doorbell != 0) break; } } else in_doorbell = readl(&reg->iobound_doorbell); outbound_doorbell = in_doorbell ^ pACB->in_doorbell; do { writel(0, &reg->host_int_status); /* clear interrupt */ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(pACB); } if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(pACB); } if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbaE_message_isr(pACB); } tmp = in_doorbell; in_doorbell = readl(&reg->iobound_doorbell); outbound_doorbell = tmp ^ in_doorbell; } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE)); pACB->in_doorbell = in_doorbell; } static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb) { uint32_t flag_ccb; struct MessageUnit_A __iomem *reg = acb->pmuA; struct ARCMSR_CDB *pARCMSR_CDB; struct CommandControlBlock *pCCB; bool error; unsigned long cdb_phy_addr; while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) { cdb_phy_addr = (flag_ccb << 5) & 0xffffffff; if (acb->cdb_phyadd_hipart) cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart; pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr); pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); } } static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb) { uint32_t index; uint32_t flag_ccb; struct MessageUnit_B *reg = acb->pmuB; struct ARCMSR_CDB *pARCMSR_CDB; struct CommandControlBlock *pCCB; bool error; unsigned long cdb_phy_addr; index = reg->doneq_index; while ((flag_ccb = reg->done_qbuffer[index]) != 0) { cdb_phy_addr = (flag_ccb << 5) & 0xffffffff; if (acb->cdb_phyadd_hipart) cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart; pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr); pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); reg->done_qbuffer[index] = 0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; reg->doneq_index = index; } } static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) { struct MessageUnit_C __iomem *phbcmu; struct ARCMSR_CDB *arcmsr_cdb; struct CommandControlBlock *ccb; uint32_t flag_ccb, throttling = 0; unsigned long ccb_cdb_phy; int error; phbcmu = acb->pmuC; /* areca cdb command done */ /* Use correct offset and size for syncing */ while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) != 0xFFFFFFFF) { ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); if (acb->cdb_phyadd_hipart) ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; /* check if command done with no error */ arcmsr_drain_donequeue(acb, ccb, error); throttling++; if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, &phbcmu->inbound_doorbell); throttling = 0; } } } static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb) { u32 outbound_write_pointer, doneq_index, index_stripped, toggle; uint32_t addressLow; int error; struct MessageUnit_D *pmu; struct ARCMSR_CDB *arcmsr_cdb; struct CommandControlBlock *ccb; unsigned long flags, ccb_cdb_phy; spin_lock_irqsave(&acb->doneq_lock, flags); pmu = acb->pmuD; outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; doneq_index = pmu->doneq_index; if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) { do { toggle = doneq_index & 0x4000; index_stripped = (doneq_index & 0xFFF) + 1; index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; pmu->doneq_index = index_stripped ? (index_stripped | toggle) : ((toggle ^ 0x4000) + 1); doneq_index = pmu->doneq_index; addressLow = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; ccb_cdb_phy = (addressLow & 0xFFFFFFF0); if (acb->cdb_phyadd_hipart) ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; arcmsr_drain_donequeue(acb, ccb, error); writel(doneq_index, pmu->outboundlist_read_pointer); } while ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)); } writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR, pmu->outboundlist_interrupt_cause); readl(pmu->outboundlist_interrupt_cause); spin_unlock_irqrestore(&acb->doneq_lock, flags); } static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb) { uint32_t doneq_index; uint16_t cmdSMID; int error; struct MessageUnit_E __iomem *pmu; struct CommandControlBlock *ccb; unsigned long flags; spin_lock_irqsave(&acb->doneq_lock, flags); doneq_index = acb->doneq_index; pmu = acb->pmuE; while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) { cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; ccb = acb->pccb_pool[cmdSMID]; error = (acb->pCompletionQ[doneq_index].cmdFlag & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; arcmsr_drain_donequeue(acb, ccb, error); doneq_index++; if (doneq_index >= acb->completionQ_entry) doneq_index = 0; } acb->doneq_index = doneq_index; writel(doneq_index, &pmu->reply_post_consumer_index); spin_unlock_irqrestore(&acb->doneq_lock, flags); } static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb) { uint32_t doneq_index; uint16_t cmdSMID; int error; struct MessageUnit_F __iomem *phbcmu; struct CommandControlBlock *ccb; unsigned long flags; spin_lock_irqsave(&acb->doneq_lock, flags); doneq_index = acb->doneq_index; phbcmu = acb->pmuF; while (1) { cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; if (cmdSMID == 0xffff) break; ccb = acb->pccb_pool[cmdSMID]; error = (acb->pCompletionQ[doneq_index].cmdFlag & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; arcmsr_drain_donequeue(acb, ccb, error); acb->pCompletionQ[doneq_index].cmdSMID = 0xffff; doneq_index++; if (doneq_index >= acb->completionQ_entry) doneq_index = 0; } acb->doneq_index = doneq_index; writel(doneq_index, &phbcmu->reply_post_consumer_index); spin_unlock_irqrestore(&acb->doneq_lock, flags); } /* ********************************************************************************** ** Handle a message interrupt ** ** The only message interrupt we expect is in response to a query for the current adapter config. ** We want this in order to compare the drivemap so that we can detect newly-attached drives. ********************************************************************************** */ static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; /*clear interrupt and message state*/ writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus); if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) schedule_work(&acb->arcmsr_do_message_isr_bh); } static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; /*clear interrupt and message state*/ writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) schedule_work(&acb->arcmsr_do_message_isr_bh); } /* ********************************************************************************** ** Handle a message interrupt ** ** The only message interrupt we expect is in response to a query for the ** current adapter config. ** We want this in order to compare the drivemap so that we can detect newly-attached drives. ********************************************************************************** */ static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb) { struct MessageUnit_C __iomem *reg = acb->pmuC; /*clear interrupt and message state*/ writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear); if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) schedule_work(&acb->arcmsr_do_message_isr_bh); } static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb) { struct MessageUnit_D *reg = acb->pmuD; writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell); readl(reg->outbound_doorbell); if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) schedule_work(&acb->arcmsr_do_message_isr_bh); } static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb) { struct MessageUnit_E __iomem *reg = acb->pmuE; writel(0, &reg->host_int_status); if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) schedule_work(&acb->arcmsr_do_message_isr_bh); } static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb) { uint32_t outbound_intstatus; struct MessageUnit_A __iomem *reg = acb->pmuA; outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable; if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) return IRQ_NONE; do { writel(outbound_intstatus, &reg->outbound_intstatus); if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) arcmsr_hbaA_doorbell_isr(acb); if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) arcmsr_hbaA_postqueue_isr(acb); if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) arcmsr_hbaA_message_isr(acb); outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable; } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT | ARCMSR_MU_OUTBOUND_MESSAGE0_INT)); return IRQ_HANDLED; } static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb) { uint32_t outbound_doorbell; struct MessageUnit_B *reg = acb->pmuB; outbound_doorbell = readl(reg->iop2drv_doorbell) & acb->outbound_int_enable; if (!outbound_doorbell) return IRQ_NONE; do { writel(~outbound_doorbell, reg->iop2drv_doorbell); writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) arcmsr_iop2drv_data_wrote_handle(acb); if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) arcmsr_iop2drv_data_read_handle(acb); if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) arcmsr_hbaB_postqueue_isr(acb); if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) arcmsr_hbaB_message_isr(acb); outbound_doorbell = readl(reg->iop2drv_doorbell) & acb->outbound_int_enable; } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK | ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)); return IRQ_HANDLED; } static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB) { uint32_t host_interrupt_status; struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; /* ********************************************* ** check outbound intstatus ********************************************* */ host_interrupt_status = readl(&phbcmu->host_int_status) & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR); if (!host_interrupt_status) return IRQ_NONE; do { if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) arcmsr_hbaC_doorbell_isr(pACB); /* MU post queue interrupts*/ if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) arcmsr_hbaC_postqueue_isr(pACB); host_interrupt_status = readl(&phbcmu->host_int_status); } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); return IRQ_HANDLED; } static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB) { u32 host_interrupt_status; struct MessageUnit_D *pmu = pACB->pmuD; host_interrupt_status = readl(pmu->host_int_status) & (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR | ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR); if (!host_interrupt_status) return IRQ_NONE; do { /* MU post queue interrupts*/ if (host_interrupt_status & ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR) arcmsr_hbaD_postqueue_isr(pACB); if (host_interrupt_status & ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR) arcmsr_hbaD_doorbell_isr(pACB); host_interrupt_status = readl(pmu->host_int_status); } while (host_interrupt_status & (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR | ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)); return IRQ_HANDLED; } static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB) { uint32_t host_interrupt_status; struct MessageUnit_E __iomem *pmu = pACB->pmuE; host_interrupt_status = readl(&pmu->host_int_status) & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR); if (!host_interrupt_status) return IRQ_NONE; do { /* MU ioctl transfer doorbell interrupts*/ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) { arcmsr_hbaE_doorbell_isr(pACB); } /* MU post queue interrupts*/ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) { arcmsr_hbaE_postqueue_isr(pACB); } host_interrupt_status = readl(&pmu->host_int_status); } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)); return IRQ_HANDLED; } static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB) { uint32_t host_interrupt_status; struct MessageUnit_F __iomem *phbcmu = pACB->pmuF; host_interrupt_status = readl(&phbcmu->host_int_status) & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR); if (!host_interrupt_status) return IRQ_NONE; do { /* MU post queue interrupts*/ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) arcmsr_hbaF_postqueue_isr(pACB); /* MU ioctl transfer doorbell interrupts*/ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) arcmsr_hbaE_doorbell_isr(pACB); host_interrupt_status = readl(&phbcmu->host_int_status); } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)); return IRQ_HANDLED; } static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: return arcmsr_hbaA_handle_isr(acb); case ACB_ADAPTER_TYPE_B: return arcmsr_hbaB_handle_isr(acb); case ACB_ADAPTER_TYPE_C: return arcmsr_hbaC_handle_isr(acb); case ACB_ADAPTER_TYPE_D: return arcmsr_hbaD_handle_isr(acb); case ACB_ADAPTER_TYPE_E: return arcmsr_hbaE_handle_isr(acb); case ACB_ADAPTER_TYPE_F: return arcmsr_hbaF_handle_isr(acb); default: return IRQ_NONE; } } static void arcmsr_iop_parking(struct AdapterControlBlock *acb) { if (acb) { /* stop adapter background rebuild */ if (acb->acb_flags & ACB_F_MSG_START_BGRB) { uint32_t intmask_org; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; intmask_org = arcmsr_disable_outbound_ints(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); arcmsr_enable_outbound_ints(acb, intmask_org); } } } void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb) { uint32_t i; if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { for (i = 0; i < 15; i++) { if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; acb->rqbuf_getIndex = 0; acb->rqbuf_putIndex = 0; arcmsr_iop_message_read(acb); mdelay(30); } else if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) { acb->rqbuf_getIndex = 0; acb->rqbuf_putIndex = 0; mdelay(30); } else break; } } } static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd) { char *buffer; unsigned short use_sg; int retvalue = 0, transfer_len = 0; unsigned long flags; struct CMD_MESSAGE_FIELD *pcmdmessagefld; uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 | (uint32_t)cmd->cmnd[6] << 16 | (uint32_t)cmd->cmnd[7] << 8 | (uint32_t)cmd->cmnd[8]; struct scatterlist *sg; use_sg = scsi_sg_count(cmd); sg = scsi_sglist(cmd); buffer = kmap_atomic(sg_page(sg)) + sg->offset; if (use_sg > 1) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } transfer_len += sg->length; if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { retvalue = ARCMSR_MESSAGE_FAIL; pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__); goto message_out; } pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer; switch (controlcode) { case ARCMSR_MESSAGE_READ_RQBUFFER: { unsigned char *ver_addr; uint8_t *ptmpQbuffer; uint32_t allxfer_len = 0; ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); if (!ver_addr) { retvalue = ARCMSR_MESSAGE_FAIL; pr_info("%s: memory not enough!\n", __func__); goto message_out; } ptmpQbuffer = ver_addr; spin_lock_irqsave(&acb->rqbuffer_lock, flags); if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) { unsigned int tail = acb->rqbuf_getIndex; unsigned int head = acb->rqbuf_putIndex; unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER); allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER); if (allxfer_len > ARCMSR_API_DATA_BUFLEN) allxfer_len = ARCMSR_API_DATA_BUFLEN; if (allxfer_len <= cnt_to_end) memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len); else { memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end); memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end); } acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER; } memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER __iomem *prbuffer; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); kfree(ver_addr); pcmdmessagefld->cmdmessage.Length = allxfer_len; if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; else pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; break; } case ARCMSR_MESSAGE_WRITE_WQBUFFER: { unsigned char *ver_addr; uint32_t user_len; int32_t cnt2end; uint8_t *pQbuffer, *ptmpuserbuffer; user_len = pcmdmessagefld->cmdmessage.Length; if (user_len > ARCMSR_API_DATA_BUFLEN) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); if (!ver_addr) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } ptmpuserbuffer = ver_addr; memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); spin_lock_irqsave(&acb->wqbuffer_lock, flags); if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) { struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)cmd->sense_buffer; arcmsr_write_ioctldata2iop(acb); /* has error report sensedata */ sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; sensebuffer->SenseKey = ILLEGAL_REQUEST; sensebuffer->AdditionalSenseLength = 0x0A; sensebuffer->AdditionalSenseCode = 0x20; sensebuffer->Valid = 1; retvalue = ARCMSR_MESSAGE_FAIL; } else { pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex]; cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex; if (user_len > cnt2end) { memcpy(pQbuffer, ptmpuserbuffer, cnt2end); ptmpuserbuffer += cnt2end; user_len -= cnt2end; acb->wqbuf_putIndex = 0; pQbuffer = acb->wqbuffer; } memcpy(pQbuffer, ptmpuserbuffer, user_len); acb->wqbuf_putIndex += user_len; acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER; if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_write_ioctldata2iop(acb); } } spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); kfree(ver_addr); if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; else pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; break; } case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { uint8_t *pQbuffer = acb->rqbuffer; arcmsr_clear_iop2drv_rqueue_buffer(acb); spin_lock_irqsave(&acb->rqbuffer_lock, flags); acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_getIndex = 0; acb->rqbuf_putIndex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; else pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; break; } case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { uint8_t *pQbuffer = acb->wqbuffer; spin_lock_irqsave(&acb->wqbuffer_lock, flags); acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); acb->wqbuf_getIndex = 0; acb->wqbuf_putIndex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; else pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; break; } case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { uint8_t *pQbuffer; arcmsr_clear_iop2drv_rqueue_buffer(acb); spin_lock_irqsave(&acb->rqbuffer_lock, flags); acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_getIndex = 0; acb->rqbuf_putIndex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); spin_lock_irqsave(&acb->wqbuffer_lock, flags); acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); acb->wqbuf_getIndex = 0; acb->wqbuf_putIndex = 0; pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; else pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; break; } case ARCMSR_MESSAGE_RETURN_CODE_3F: { if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; else pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; break; } case ARCMSR_MESSAGE_SAY_HELLO: { int8_t *hello_string = "Hello! I am ARCMSR"; if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; else pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; memcpy(pcmdmessagefld->messagedatabuffer, hello_string, (int16_t)strlen(hello_string)); break; } case ARCMSR_MESSAGE_SAY_GOODBYE: { if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; else pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; arcmsr_iop_parking(acb); break; } case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { if (acb->fw_flag == FW_DEADLOCK) pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; else pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; arcmsr_flush_adapter_cache(acb); break; } default: retvalue = ARCMSR_MESSAGE_FAIL; pr_info("%s: unknown controlcode!\n", __func__); } message_out: if (use_sg) { struct scatterlist *sg = scsi_sglist(cmd); kunmap_atomic(buffer - sg->offset); } return retvalue; } static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb) { struct list_head *head; struct CommandControlBlock *ccb = NULL; unsigned long flags; spin_lock_irqsave(&acb->ccblist_lock, flags); head = &acb->ccb_free_list; if (!list_empty(head)) { ccb = list_entry(head->next, struct CommandControlBlock, list); list_del_init(&ccb->list); }else{ spin_unlock_irqrestore(&acb->ccblist_lock, flags); return NULL; } spin_unlock_irqrestore(&acb->ccblist_lock, flags); return ccb; } static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd) { switch (cmd->cmnd[0]) { case INQUIRY: { unsigned char inqdata[36]; char *buffer; struct scatterlist *sg; if (cmd->device->lun) { cmd->result = (DID_TIME_OUT << 16); scsi_done(cmd); return; } inqdata[0] = TYPE_PROCESSOR; /* Periph Qualifier & Periph Dev Type */ inqdata[1] = 0; /* rem media bit & Dev Type Modifier */ inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */ inqdata[4] = 31; /* length of additional data */ memcpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */ memcpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */ memcpy(&inqdata[32], "R001", 4); /* Product Revision */ sg = scsi_sglist(cmd); buffer = kmap_atomic(sg_page(sg)) + sg->offset; memcpy(buffer, inqdata, sizeof(inqdata)); sg = scsi_sglist(cmd); kunmap_atomic(buffer - sg->offset); scsi_done(cmd); } break; case WRITE_BUFFER: case READ_BUFFER: { if (arcmsr_iop_message_xfer(acb, cmd)) cmd->result = (DID_ERROR << 16); scsi_done(cmd); } break; default: scsi_done(cmd); } } static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; struct CommandControlBlock *ccb; int target = cmd->device->id; if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) { cmd->result = (DID_NO_CONNECT << 16); scsi_done(cmd); return 0; } cmd->host_scribble = NULL; cmd->result = 0; if (target == 16) { /* virtual device for iop message transfer */ arcmsr_handle_virtual_command(acb, cmd); return 0; } ccb = arcmsr_get_freeccb(acb); if (!ccb) return SCSI_MLQUEUE_HOST_BUSY; if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) { cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT; scsi_done(cmd); return 0; } arcmsr_post_ccb(acb, ccb); return 0; } static DEF_SCSI_QCMD(arcmsr_queue_command) static int arcmsr_slave_config(struct scsi_device *sdev) { unsigned int dev_timeout; dev_timeout = sdev->request_queue->rq_timeout; if ((cmd_timeout > 0) && ((cmd_timeout * HZ) > dev_timeout)) blk_queue_rq_timeout(sdev->request_queue, cmd_timeout * HZ); return 0; } static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer) { int count; uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model; uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version; uint32_t *acb_device_map = (uint32_t *)pACB->device_map; uint32_t *firm_model = &rwbuffer[15]; uint32_t *firm_version = &rwbuffer[17]; uint32_t *device_map = &rwbuffer[21]; count = 2; while (count) { *acb_firm_model = readl(firm_model); acb_firm_model++; firm_model++; count--; } count = 4; while (count) { *acb_firm_version = readl(firm_version); acb_firm_version++; firm_version++; count--; } count = 4; while (count) { *acb_device_map = readl(device_map); acb_device_map++; device_map++; count--; } pACB->signature = readl(&rwbuffer[0]); pACB->firm_request_len = readl(&rwbuffer[1]); pACB->firm_numbers_queue = readl(&rwbuffer[2]); pACB->firm_sdram_size = readl(&rwbuffer[3]); pACB->firm_hd_channels = readl(&rwbuffer[4]); pACB->firm_cfg_version = readl(&rwbuffer[25]); pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n", pACB->host->host_no, pACB->firm_model, pACB->firm_version); } static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; arcmsr_wait_firmware_ready(acb); writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); if (!arcmsr_hbaA_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ miscellaneous data' timeout \n", acb->host->host_no); return false; } arcmsr_get_adapter_config(acb, reg->message_rwbuffer); return true; } static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; arcmsr_wait_firmware_ready(acb); writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no); return false; } writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ miscellaneous data' timeout \n", acb->host->host_no); return false; } arcmsr_get_adapter_config(acb, reg->message_rwbuffer); return true; } static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB) { uint32_t intmask_org; struct MessageUnit_C __iomem *reg = pACB->pmuC; /* disable all outbound interrupt */ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */ writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask); /* wait firmware ready */ arcmsr_wait_firmware_ready(pACB); /* post "get config" instruction */ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); /* wait message ready */ if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ miscellaneous data' timeout \n", pACB->host->host_no); return false; } arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer); return true; } static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb) { struct MessageUnit_D *reg = acb->pmuD; if (readl(acb->pmuD->outbound_doorbell) & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, acb->pmuD->outbound_doorbell);/*clear interrupt*/ } arcmsr_wait_firmware_ready(acb); /* post "get config" instruction */ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0); /* wait message ready */ if (!arcmsr_hbaD_wait_msgint_ready(acb)) { pr_notice("arcmsr%d: wait get adapter firmware " "miscellaneous data timeout\n", acb->host->host_no); return false; } arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer); return true; } static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB) { struct MessageUnit_E __iomem *reg = pACB->pmuE; uint32_t intmask_org; /* disable all outbound interrupt */ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */ writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask); /* wait firmware ready */ arcmsr_wait_firmware_ready(pACB); mdelay(20); /* post "get config" instruction */ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(pACB->out_doorbell, &reg->iobound_doorbell); /* wait message ready */ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { pr_notice("arcmsr%d: wait get adapter firmware " "miscellaneous data timeout\n", pACB->host->host_no); return false; } arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer); return true; } static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB) { struct MessageUnit_F __iomem *reg = pACB->pmuF; uint32_t intmask_org; /* disable all outbound interrupt */ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */ writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask); /* wait firmware ready */ arcmsr_wait_firmware_ready(pACB); /* post "get config" instruction */ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(pACB->out_doorbell, &reg->iobound_doorbell); /* wait message ready */ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n", pACB->host->host_no); return false; } arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer); return true; } static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) { bool rtn = false; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: rtn = arcmsr_hbaA_get_config(acb); break; case ACB_ADAPTER_TYPE_B: rtn = arcmsr_hbaB_get_config(acb); break; case ACB_ADAPTER_TYPE_C: rtn = arcmsr_hbaC_get_config(acb); break; case ACB_ADAPTER_TYPE_D: rtn = arcmsr_hbaD_get_config(acb); break; case ACB_ADAPTER_TYPE_E: rtn = arcmsr_hbaE_get_config(acb); break; case ACB_ADAPTER_TYPE_F: rtn = arcmsr_hbaF_get_config(acb); break; default: break; } acb->maxOutstanding = acb->firm_numbers_queue - 1; if (acb->host->can_queue >= acb->firm_numbers_queue) acb->host->can_queue = acb->maxOutstanding; else acb->maxOutstanding = acb->host->can_queue; acb->maxFreeCCB = acb->host->can_queue; if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM) acb->maxFreeCCB += 64; return rtn; } static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { struct MessageUnit_A __iomem *reg = acb->pmuA; struct CommandControlBlock *ccb; struct ARCMSR_CDB *arcmsr_cdb; uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; int rtn; bool error; unsigned long ccb_cdb_phy; polling_hba_ccb_retry: poll_count++; outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable; writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/ while (1) { if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) { if (poll_ccb_done){ rtn = SUCCESS; break; }else { msleep(25); if (poll_count > 100){ rtn = FAILED; break; } goto polling_hba_ccb_retry; } } ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; if (acb->cdb_phyadd_hipart) ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" " poll command abort successfully \n" , acb->host->host_no , ccb->pcmd->device->id , (u32)ccb->pcmd->device->lun , ccb); ccb->pcmd->result = DID_ABORT << 16; arcmsr_ccb_complete(ccb); continue; } printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" " command done ccb = '0x%p'" "ccboutstandingcount = %d \n" , acb->host->host_no , ccb , atomic_read(&acb->ccboutstandingcount)); continue; } error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_report_ccb_state(acb, ccb, error); } return rtn; } static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { struct MessageUnit_B *reg = acb->pmuB; struct ARCMSR_CDB *arcmsr_cdb; struct CommandControlBlock *ccb; uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; int index, rtn; bool error; unsigned long ccb_cdb_phy; polling_hbb_ccb_retry: poll_count++; /* clear doorbell interrupt */ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); while(1){ index = reg->doneq_index; flag_ccb = reg->done_qbuffer[index]; if (flag_ccb == 0) { if (poll_ccb_done){ rtn = SUCCESS; break; }else { msleep(25); if (poll_count > 100){ rtn = FAILED; break; } goto polling_hbb_ccb_retry; } } reg->done_qbuffer[index] = 0; index++; /*if last index number set it to 0 */ index %= ARCMSR_MAX_HBB_POSTQUEUE; reg->doneq_index = index; /* check if command done with no error*/ ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; if (acb->cdb_phyadd_hipart) ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" " poll command abort successfully \n" ,acb->host->host_no ,ccb->pcmd->device->id ,(u32)ccb->pcmd->device->lun ,ccb); ccb->pcmd->result = DID_ABORT << 16; arcmsr_ccb_complete(ccb); continue; } printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" " command done ccb = '0x%p'" "ccboutstandingcount = %d \n" , acb->host->host_no , ccb , atomic_read(&acb->ccboutstandingcount)); continue; } error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_report_ccb_state(acb, ccb, error); } return rtn; } static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { struct MessageUnit_C __iomem *reg = acb->pmuC; uint32_t flag_ccb; struct ARCMSR_CDB *arcmsr_cdb; bool error; struct CommandControlBlock *pCCB; uint32_t poll_ccb_done = 0, poll_count = 0; int rtn; unsigned long ccb_cdb_phy; polling_hbc_ccb_retry: poll_count++; while (1) { if ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) { if (poll_ccb_done) { rtn = SUCCESS; break; } else { msleep(25); if (poll_count > 100) { rtn = FAILED; break; } goto polling_hbc_ccb_retry; } } flag_ccb = readl(&reg->outbound_queueport_low); ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); if (acb->cdb_phyadd_hipart) ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; /* check ifcommand done with no error*/ if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { if (pCCB->startdone == ARCMSR_CCB_ABORTED) { printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" " poll command abort successfully \n" , acb->host->host_no , pCCB->pcmd->device->id , (u32)pCCB->pcmd->device->lun , pCCB); pCCB->pcmd->result = DID_ABORT << 16; arcmsr_ccb_complete(pCCB); continue; } printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" " command done ccb = '0x%p'" "ccboutstandingcount = %d \n" , acb->host->host_no , pCCB , atomic_read(&acb->ccboutstandingcount)); continue; } error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; arcmsr_report_ccb_state(acb, pCCB, error); } return rtn; } static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { bool error; uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb; int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle; unsigned long flags, ccb_cdb_phy; struct ARCMSR_CDB *arcmsr_cdb; struct CommandControlBlock *pCCB; struct MessageUnit_D *pmu = acb->pmuD; polling_hbaD_ccb_retry: poll_count++; while (1) { spin_lock_irqsave(&acb->doneq_lock, flags); outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; doneq_index = pmu->doneq_index; if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) { spin_unlock_irqrestore(&acb->doneq_lock, flags); if (poll_ccb_done) { rtn = SUCCESS; break; } else { msleep(25); if (poll_count > 40) { rtn = FAILED; break; } goto polling_hbaD_ccb_retry; } } toggle = doneq_index & 0x4000; index_stripped = (doneq_index & 0xFFF) + 1; index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; pmu->doneq_index = index_stripped ? (index_stripped | toggle) : ((toggle ^ 0x4000) + 1); doneq_index = pmu->doneq_index; spin_unlock_irqrestore(&acb->doneq_lock, flags); flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); if (acb->cdb_phyadd_hipart) ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { if (pCCB->startdone == ARCMSR_CCB_ABORTED) { pr_notice("arcmsr%d: scsi id = %d " "lun = %d ccb = '0x%p' poll command " "abort successfully\n" , acb->host->host_no , pCCB->pcmd->device->id , (u32)pCCB->pcmd->device->lun , pCCB); pCCB->pcmd->result = DID_ABORT << 16; arcmsr_ccb_complete(pCCB); continue; } pr_notice("arcmsr%d: polling an illegal " "ccb command done ccb = '0x%p' " "ccboutstandingcount = %d\n" , acb->host->host_no , pCCB , atomic_read(&acb->ccboutstandingcount)); continue; } error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; arcmsr_report_ccb_state(acb, pCCB, error); } return rtn; } static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { bool error; uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index; uint16_t cmdSMID; unsigned long flags; int rtn; struct CommandControlBlock *pCCB; struct MessageUnit_E __iomem *reg = acb->pmuE; polling_hbaC_ccb_retry: poll_count++; while (1) { spin_lock_irqsave(&acb->doneq_lock, flags); doneq_index = acb->doneq_index; if ((readl(&reg->reply_post_producer_index) & 0xFFFF) == doneq_index) { spin_unlock_irqrestore(&acb->doneq_lock, flags); if (poll_ccb_done) { rtn = SUCCESS; break; } else { msleep(25); if (poll_count > 40) { rtn = FAILED; break; } goto polling_hbaC_ccb_retry; } } cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; doneq_index++; if (doneq_index >= acb->completionQ_entry) doneq_index = 0; acb->doneq_index = doneq_index; spin_unlock_irqrestore(&acb->doneq_lock, flags); pCCB = acb->pccb_pool[cmdSMID]; poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; /* check if command done with no error*/ if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { if (pCCB->startdone == ARCMSR_CCB_ABORTED) { pr_notice("arcmsr%d: scsi id = %d " "lun = %d ccb = '0x%p' poll command " "abort successfully\n" , acb->host->host_no , pCCB->pcmd->device->id , (u32)pCCB->pcmd->device->lun , pCCB); pCCB->pcmd->result = DID_ABORT << 16; arcmsr_ccb_complete(pCCB); continue; } pr_notice("arcmsr%d: polling an illegal " "ccb command done ccb = '0x%p' " "ccboutstandingcount = %d\n" , acb->host->host_no , pCCB , atomic_read(&acb->ccboutstandingcount)); continue; } error = (acb->pCompletionQ[doneq_index].cmdFlag & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; arcmsr_report_ccb_state(acb, pCCB, error); } writel(doneq_index, &reg->reply_post_consumer_index); return rtn; } static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { int rtn = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb); break; case ACB_ADAPTER_TYPE_B: rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb); break; case ACB_ADAPTER_TYPE_C: rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb); break; case ACB_ADAPTER_TYPE_D: rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb); break; case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb); break; } return rtn; } static void arcmsr_set_iop_datetime(struct timer_list *t) { struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer); unsigned int next_time; struct tm tm; union { struct { uint16_t signature; uint8_t year; uint8_t month; uint8_t date; uint8_t hour; uint8_t minute; uint8_t second; } a; struct { uint32_t msg_time[2]; } b; } datetime; time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm); datetime.a.signature = 0x55AA; datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */ datetime.a.month = tm.tm_mon; datetime.a.date = tm.tm_mday; datetime.a.hour = tm.tm_hour; datetime.a.minute = tm.tm_min; datetime.a.second = tm.tm_sec; switch (pacb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = pacb->pmuA; writel(datetime.b.msg_time[0], &reg->message_rwbuffer[0]); writel(datetime.b.msg_time[1], &reg->message_rwbuffer[1]); writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0); break; } case ACB_ADAPTER_TYPE_B: { uint32_t __iomem *rwbuffer; struct MessageUnit_B *reg = pacb->pmuB; rwbuffer = reg->message_rwbuffer; writel(datetime.b.msg_time[0], rwbuffer++); writel(datetime.b.msg_time[1], rwbuffer++); writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell); break; } case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = pacb->pmuC; writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]); writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]); writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); break; } case ACB_ADAPTER_TYPE_D: { uint32_t __iomem *rwbuffer; struct MessageUnit_D *reg = pacb->pmuD; rwbuffer = reg->msgcode_rwbuffer; writel(datetime.b.msg_time[0], rwbuffer++); writel(datetime.b.msg_time[1], rwbuffer++); writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0); break; } case ACB_ADAPTER_TYPE_E: { struct MessageUnit_E __iomem *reg = pacb->pmuE; writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]); writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]); writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0); pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(pacb->out_doorbell, &reg->iobound_doorbell); break; } case ACB_ADAPTER_TYPE_F: { struct MessageUnit_F __iomem *reg = pacb->pmuF; pacb->msgcode_rwbuffer[0] = datetime.b.msg_time[0]; pacb->msgcode_rwbuffer[1] = datetime.b.msg_time[1]; writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0); pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(pacb->out_doorbell, &reg->iobound_doorbell); break; } } if (sys_tz.tz_minuteswest) next_time = ARCMSR_HOURS; else next_time = ARCMSR_MINUTES; mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time)); } static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) { uint32_t cdb_phyaddr, cdb_phyaddr_hi32; dma_addr_t dma_coherent_handle; /* ******************************************************************** ** here we need to tell iop 331 our freeccb.HighPart ** if freeccb.HighPart is not zero ******************************************************************** */ switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_B: case ACB_ADAPTER_TYPE_D: dma_coherent_handle = acb->dma_coherent_handle2; break; case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: dma_coherent_handle = acb->dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); break; default: dma_coherent_handle = acb->dma_coherent_handle; break; } cdb_phyaddr = lower_32_bits(dma_coherent_handle); cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle); acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32; acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32; /* *********************************************************************** ** if adapter type B, set window of "post command Q" *********************************************************************** */ switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if (cdb_phyaddr_hi32 != 0) { struct MessageUnit_A __iomem *reg = acb->pmuA; writel(ARCMSR_SIGNATURE_SET_CONFIG, \ &reg->message_rwbuffer[0]); writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]); writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \ &reg->inbound_msgaddr0); if (!arcmsr_hbaA_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: ""set ccb high \ part physical address timeout\n", acb->host->host_no); return 1; } } } break; case ACB_ADAPTER_TYPE_B: { uint32_t __iomem *rwbuffer; struct MessageUnit_B *reg = acb->pmuB; reg->postq_index = 0; reg->doneq_index = 0; writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell); if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \ acb->host->host_no); return 1; } rwbuffer = reg->message_rwbuffer; /* driver "set config" signature */ writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); /* normal should be zero */ writel(cdb_phyaddr_hi32, rwbuffer++); /* postQ size (256 + 8)*4 */ writel(cdb_phyaddr, rwbuffer++); /* doneQ size (256 + 8)*4 */ writel(cdb_phyaddr + 1056, rwbuffer++); /* ccb maxQ size must be --> [(256 + 8)*4]*/ writel(1056, rwbuffer); writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell); if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ timeout \n",acb->host->host_no); return 1; } writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); if (!arcmsr_hbaB_wait_msgint_ready(acb)) { pr_err("arcmsr%d: can't set driver mode.\n", acb->host->host_no); return 1; } } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n", acb->adapter_index, cdb_phyaddr_hi32); writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]); writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]); writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); if (!arcmsr_hbaC_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ timeout \n", acb->host->host_no); return 1; } } break; case ACB_ADAPTER_TYPE_D: { uint32_t __iomem *rwbuffer; struct MessageUnit_D *reg = acb->pmuD; reg->postq_index = 0; reg->doneq_index = 0; rwbuffer = reg->msgcode_rwbuffer; writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); writel(cdb_phyaddr_hi32, rwbuffer++); writel(cdb_phyaddr, rwbuffer++); writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE * sizeof(struct InBound_SRB)), rwbuffer++); writel(0x100, rwbuffer); writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0); if (!arcmsr_hbaD_wait_msgint_ready(acb)) { pr_notice("arcmsr%d: 'set command Q window' timeout\n", acb->host->host_no); return 1; } } break; case ACB_ADAPTER_TYPE_E: { struct MessageUnit_E __iomem *reg = acb->pmuE; writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]); writel(ARCMSR_SIGNATURE_1884, &reg->msgcode_rwbuffer[1]); writel(cdb_phyaddr, &reg->msgcode_rwbuffer[2]); writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[3]); writel(acb->ccbsize, &reg->msgcode_rwbuffer[4]); writel(lower_32_bits(acb->dma_coherent_handle2), &reg->msgcode_rwbuffer[5]); writel(upper_32_bits(acb->dma_coherent_handle2), &reg->msgcode_rwbuffer[6]); writel(acb->ioqueue_size, &reg->msgcode_rwbuffer[7]); writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0); acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(acb->out_doorbell, &reg->iobound_doorbell); if (!arcmsr_hbaE_wait_msgint_ready(acb)) { pr_notice("arcmsr%d: 'set command Q window' timeout \n", acb->host->host_no); return 1; } } break; case ACB_ADAPTER_TYPE_F: { struct MessageUnit_F __iomem *reg = acb->pmuF; acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG; acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886; acb->msgcode_rwbuffer[2] = cdb_phyaddr; acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32; acb->msgcode_rwbuffer[4] = acb->ccbsize; acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2); acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2); acb->msgcode_rwbuffer[7] = acb->completeQ_size; writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0); acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(acb->out_doorbell, &reg->iobound_doorbell); if (!arcmsr_hbaE_wait_msgint_ready(acb)) { pr_notice("arcmsr%d: 'set command Q window' timeout\n", acb->host->host_no); return 1; } } break; } return 0; } static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) { uint32_t firmware_state = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; do { if (!(acb->acb_flags & ACB_F_IOP_INITED)) msleep(20); firmware_state = readl(&reg->outbound_msgaddr1); } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0); } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; do { if (!(acb->acb_flags & ACB_F_IOP_INITED)) msleep(20); firmware_state = readl(reg->iop2drv_doorbell); } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; do { if (!(acb->acb_flags & ACB_F_IOP_INITED)) msleep(20); firmware_state = readl(&reg->outbound_msgaddr1); } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0); } break; case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *reg = acb->pmuD; do { if (!(acb->acb_flags & ACB_F_IOP_INITED)) msleep(20); firmware_state = readl(reg->outbound_msgaddr1); } while ((firmware_state & ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0); } break; case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: { struct MessageUnit_E __iomem *reg = acb->pmuE; do { if (!(acb->acb_flags & ACB_F_IOP_INITED)) msleep(20); firmware_state = readl(&reg->outbound_msgaddr1); } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0); } break; } } static void arcmsr_request_device_map(struct timer_list *t) { struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer); if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) { mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); } else { acb->fw_flag = FW_NORMAL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); break; } case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); break; } case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); break; } case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *reg = acb->pmuD; writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0); break; } case ACB_ADAPTER_TYPE_E: { struct MessageUnit_E __iomem *reg = acb->pmuE; writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(acb->out_doorbell, &reg->iobound_doorbell); break; } case ACB_ADAPTER_TYPE_F: { struct MessageUnit_F __iomem *reg = acb->pmuF; uint32_t outMsg1 = readl(&reg->outbound_msgaddr1); if (!(outMsg1 & ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK) || (outMsg1 & ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE)) goto nxt6s; writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(acb->out_doorbell, &reg->iobound_doorbell); break; } default: return; } acb->acb_flags |= ACB_F_MSG_GET_CONFIG; nxt6s: mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); } } static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; acb->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0); if (!arcmsr_hbaA_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ rebuild' timeout \n", acb->host->host_no); } } static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; acb->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell); if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ rebuild' timeout \n",acb->host->host_no); } } static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB) { struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; pACB->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell); if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ rebuild' timeout \n", pACB->host->host_no); } return; } static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB) { struct MessageUnit_D *pmu = pACB->pmuD; pACB->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0); if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { pr_notice("arcmsr%d: wait 'start adapter " "background rebuild' timeout\n", pACB->host->host_no); } } static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB) { struct MessageUnit_E __iomem *pmu = pACB->pmuE; pACB->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0); pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(pACB->out_doorbell, &pmu->iobound_doorbell); if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { pr_notice("arcmsr%d: wait 'start adapter " "background rebuild' timeout \n", pACB->host->host_no); } } static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_hbaA_start_bgrb(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_hbaB_start_bgrb(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_hbaC_start_bgrb(acb); break; case ACB_ADAPTER_TYPE_D: arcmsr_hbaD_start_bgrb(acb); break; case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: arcmsr_hbaE_start_bgrb(acb); break; } } static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; uint32_t outbound_doorbell; /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = readl(&reg->outbound_doorbell); /*clear doorbell interrupt */ writel(outbound_doorbell, &reg->outbound_doorbell); writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell); } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; uint32_t outbound_doorbell, i; writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); /* let IOP know data has been read */ for(i=0; i < 200; i++) { msleep(20); outbound_doorbell = readl(reg->iop2drv_doorbell); if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); } else break; } } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; uint32_t outbound_doorbell, i; /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = readl(&reg->outbound_doorbell); writel(outbound_doorbell, &reg->outbound_doorbell_clear); writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell); for (i = 0; i < 200; i++) { msleep(20); outbound_doorbell = readl(&reg->outbound_doorbell); if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { writel(outbound_doorbell, &reg->outbound_doorbell_clear); writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell); } else break; } } break; case ACB_ADAPTER_TYPE_D: { struct MessageUnit_D *reg = acb->pmuD; uint32_t outbound_doorbell, i; /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = readl(reg->outbound_doorbell); writel(outbound_doorbell, reg->outbound_doorbell); writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, reg->inbound_doorbell); for (i = 0; i < 200; i++) { msleep(20); outbound_doorbell = readl(reg->outbound_doorbell); if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) { writel(outbound_doorbell, reg->outbound_doorbell); writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, reg->inbound_doorbell); } else break; } } break; case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F: { struct MessageUnit_E __iomem *reg = acb->pmuE; uint32_t i, tmp; acb->in_doorbell = readl(&reg->iobound_doorbell); writel(0, &reg->host_int_status); /*clear interrupt*/ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; writel(acb->out_doorbell, &reg->iobound_doorbell); for(i=0; i < 200; i++) { msleep(20); tmp = acb->in_doorbell; acb->in_doorbell = readl(&reg->iobound_doorbell); if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) { writel(0, &reg->host_int_status); /*clear interrupt*/ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; writel(acb->out_doorbell, &reg->iobound_doorbell); } else break; } } break; } } static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: return; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell); if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT"); return; } } break; case ACB_ADAPTER_TYPE_C: return; } return; } static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) { uint8_t value[64]; int i, count = 0; struct MessageUnit_A __iomem *pmuA = acb->pmuA; struct MessageUnit_C __iomem *pmuC = acb->pmuC; struct MessageUnit_D *pmuD = acb->pmuD; /* backup pci config data */ printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no); for (i = 0; i < 64; i++) { pci_read_config_byte(acb->pdev, i, &value[i]); } /* hardware reset signal */ if (acb->dev_id == 0x1680) { writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]); } else if (acb->dev_id == 0x1880) { do { count++; writel(0xF, &pmuC->write_sequence); writel(0x4, &pmuC->write_sequence); writel(0xB, &pmuC->write_sequence); writel(0x2, &pmuC->write_sequence); writel(0x7, &pmuC->write_sequence); writel(0xD, &pmuC->write_sequence); } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5)); writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic); } else if (acb->dev_id == 0x1884) { struct MessageUnit_E __iomem *pmuE = acb->pmuE; do { count++; writel(0x4, &pmuE->write_sequence_3xxx); writel(0xB, &pmuE->write_sequence_3xxx); writel(0x2, &pmuE->write_sequence_3xxx); writel(0x7, &pmuE->write_sequence_3xxx); writel(0xD, &pmuE->write_sequence_3xxx); mdelay(10); } while (((readl(&pmuE->host_diagnostic_3xxx) & ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5)); writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx); } else if (acb->dev_id == 0x1214) { writel(0x20, pmuD->reset_request); } else { pci_write_config_byte(acb->pdev, 0x84, 0x20); } msleep(2000); /* write back pci config data */ for (i = 0; i < 64; i++) { pci_write_config_byte(acb->pdev, i, value[i]); } msleep(1000); return; } static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb) { bool rtn = true; switch(acb->adapter_type) { case ACB_ADAPTER_TYPE_A:{ struct MessageUnit_A __iomem *reg = acb->pmuA; rtn = ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false; } break; case ACB_ADAPTER_TYPE_B:{ struct MessageUnit_B *reg = acb->pmuB; rtn = ((readl(reg->iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false; } break; case ACB_ADAPTER_TYPE_C:{ struct MessageUnit_C __iomem *reg = acb->pmuC; rtn = (readl(&reg->host_diagnostic) & 0x04) ? true : false; } break; case ACB_ADAPTER_TYPE_D:{ struct MessageUnit_D *reg = acb->pmuD; rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ? true : false; } break; case ACB_ADAPTER_TYPE_E: case ACB_ADAPTER_TYPE_F:{ struct MessageUnit_E __iomem *reg = acb->pmuE; rtn = (readl(&reg->host_diagnostic_3xxx) & ARCMSR_ARC188X_RESET_ADAPTER) ? true : false; } break; } return rtn; } static void arcmsr_iop_init(struct AdapterControlBlock *acb) { uint32_t intmask_org; /* disable all outbound interrupt */ intmask_org = arcmsr_disable_outbound_ints(acb); arcmsr_wait_firmware_ready(acb); arcmsr_iop_confirm(acb); /*start background rebuild*/ arcmsr_start_adapter_bgrb(acb); /* empty doorbell Qbuffer if door bell ringed */ arcmsr_clear_doorbell_queue_buffer(acb); arcmsr_enable_eoi_mode(acb); /* enable outbound Post Queue,outbound doorbell Interrupt */ arcmsr_enable_outbound_ints(acb, intmask_org); acb->acb_flags |= ACB_F_IOP_INITED; } static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) { struct CommandControlBlock *ccb; uint32_t intmask_org; uint8_t rtnval = 0x00; int i = 0; unsigned long flags; if (atomic_read(&acb->ccboutstandingcount) != 0) { /* disable all outbound interrupt */ intmask_org = arcmsr_disable_outbound_ints(acb); /* talk to iop 331 outstanding command aborted */ rtnval = arcmsr_abort_allcmd(acb); /* clear all outbound posted Q */ arcmsr_done4abort_postqueue(acb); for (i = 0; i < acb->maxFreeCCB; i++) { ccb = acb->pccb_pool[i]; if (ccb->startdone == ARCMSR_CCB_START) { scsi_dma_unmap(ccb->pcmd); ccb->startdone = ARCMSR_CCB_DONE; ccb->ccb_flags = 0; spin_lock_irqsave(&acb->ccblist_lock, flags); list_add_tail(&ccb->list, &acb->ccb_free_list); spin_unlock_irqrestore(&acb->ccblist_lock, flags); } } atomic_set(&acb->ccboutstandingcount, 0); /* enable all outbound interrupt */ arcmsr_enable_outbound_ints(acb, intmask_org); return rtnval; } return rtnval; } static int arcmsr_bus_reset(struct scsi_cmnd *cmd) { struct AdapterControlBlock *acb; int retry_count = 0; int rtn = FAILED; acb = (struct AdapterControlBlock *) cmd->device->host->hostdata; if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) return SUCCESS; pr_notice("arcmsr: executing bus reset eh.....num_resets = %d," " num_aborts = %d \n", acb->num_resets, acb->num_aborts); acb->num_resets++; if (acb->acb_flags & ACB_F_BUS_RESET) { long timeout; pr_notice("arcmsr: there is a bus reset eh proceeding...\n"); timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220 * HZ); if (timeout) return SUCCESS; } acb->acb_flags |= ACB_F_BUS_RESET; if (!arcmsr_iop_reset(acb)) { arcmsr_hardware_reset(acb); acb->acb_flags &= ~ACB_F_IOP_INITED; wait_reset_done: ssleep(ARCMSR_SLEEPTIME); if (arcmsr_reset_in_progress(acb)) { if (retry_count > ARCMSR_RETRYCOUNT) { acb->fw_flag = FW_DEADLOCK; pr_notice("arcmsr%d: waiting for hw bus reset" " return, RETRY TERMINATED!!\n", acb->host->host_no); return FAILED; } retry_count++; goto wait_reset_done; } arcmsr_iop_init(acb); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); acb->acb_flags &= ~ACB_F_BUS_RESET; rtn = SUCCESS; pr_notice("arcmsr: scsi bus reset eh returns with success\n"); } else { acb->acb_flags &= ~ACB_F_BUS_RESET; acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); rtn = SUCCESS; } return rtn; } static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) { int rtn; rtn = arcmsr_polling_ccbdone(acb, ccb); return rtn; } static int arcmsr_abort(struct scsi_cmnd *cmd) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)cmd->device->host->hostdata; int i = 0; int rtn = FAILED; uint32_t intmask_org; if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) return SUCCESS; printk(KERN_NOTICE "arcmsr%d: abort device command of scsi id = %d lun = %d\n", acb->host->host_no, cmd->device->id, (u32)cmd->device->lun); acb->acb_flags |= ACB_F_ABORT; acb->num_aborts++; /* ************************************************ ** the all interrupt service routine is locked ** we need to handle it as soon as possible and exit ************************************************ */ if (!atomic_read(&acb->ccboutstandingcount)) { acb->acb_flags &= ~ACB_F_ABORT; return rtn; } intmask_org = arcmsr_disable_outbound_ints(acb); for (i = 0; i < acb->maxFreeCCB; i++) { struct CommandControlBlock *ccb = acb->pccb_pool[i]; if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) { ccb->startdone = ARCMSR_CCB_ABORTED; rtn = arcmsr_abort_one_cmd(acb, ccb); break; } } acb->acb_flags &= ~ACB_F_ABORT; arcmsr_enable_outbound_ints(acb, intmask_org); return rtn; } static const char *arcmsr_info(struct Scsi_Host *host) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; static char buf[256]; char *type; int raid6 = 1; switch (acb->pdev->device) { case PCI_DEVICE_ID_ARECA_1110: case PCI_DEVICE_ID_ARECA_1200: case PCI_DEVICE_ID_ARECA_1202: case PCI_DEVICE_ID_ARECA_1210: raid6 = 0; fallthrough; case PCI_DEVICE_ID_ARECA_1120: case PCI_DEVICE_ID_ARECA_1130: case PCI_DEVICE_ID_ARECA_1160: case PCI_DEVICE_ID_ARECA_1170: case PCI_DEVICE_ID_ARECA_1201: case PCI_DEVICE_ID_ARECA_1203: case PCI_DEVICE_ID_ARECA_1220: case PCI_DEVICE_ID_ARECA_1230: case PCI_DEVICE_ID_ARECA_1260: case PCI_DEVICE_ID_ARECA_1270: case PCI_DEVICE_ID_ARECA_1280: type = "SATA"; break; case PCI_DEVICE_ID_ARECA_1214: case PCI_DEVICE_ID_ARECA_1380: case PCI_DEVICE_ID_ARECA_1381: case PCI_DEVICE_ID_ARECA_1680: case PCI_DEVICE_ID_ARECA_1681: case PCI_DEVICE_ID_ARECA_1880: case PCI_DEVICE_ID_ARECA_1884: type = "SAS/SATA"; break; case PCI_DEVICE_ID_ARECA_1886: type = "NVMe/SAS/SATA"; break; default: type = "unknown"; raid6 = 0; break; } sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n", type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION); return buf; }
linux-master
drivers/scsi/arcmsr/arcmsr_hba.c
/* ******************************************************************************* ** O.S : Linux ** FILE NAME : arcmsr_attr.c ** BY : Nick Cheng ** Description: attributes exported to sysfs and device host ******************************************************************************* ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved ** ** Web site: www.areca.com.tw ** E-mail: [email protected] ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License version 2 as ** published by the Free Software Foundation. ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ******************************************************************************* ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************* ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr ** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst ******************************************************************************* */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/circ_buf.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include "arcmsr.h" static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj,struct device,kobj); struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; uint8_t *ptmpQbuffer; int32_t allxfer_len = 0; unsigned long flags; if (!capable(CAP_SYS_ADMIN)) return -EACCES; /* do message unit read. */ ptmpQbuffer = (uint8_t *)buf; spin_lock_irqsave(&acb->rqbuffer_lock, flags); if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) { unsigned int tail = acb->rqbuf_getIndex; unsigned int head = acb->rqbuf_putIndex; unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER); allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER); if (allxfer_len > ARCMSR_API_DATA_BUFLEN) allxfer_len = ARCMSR_API_DATA_BUFLEN; if (allxfer_len <= cnt_to_end) memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len); else { memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end); memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end); } acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER; } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER __iomem *prbuffer; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); return allxfer_len; } static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj,struct device,kobj); struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; int32_t user_len, cnt2end; uint8_t *pQbuffer, *ptmpuserbuffer; unsigned long flags; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (count > ARCMSR_API_DATA_BUFLEN) return -EINVAL; /* do message unit write. */ ptmpuserbuffer = (uint8_t *)buf; user_len = (int32_t)count; spin_lock_irqsave(&acb->wqbuffer_lock, flags); if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) { arcmsr_write_ioctldata2iop(acb); spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); return 0; /*need retry*/ } else { pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex]; cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex; if (user_len > cnt2end) { memcpy(pQbuffer, ptmpuserbuffer, cnt2end); ptmpuserbuffer += cnt2end; user_len -= cnt2end; acb->wqbuf_putIndex = 0; pQbuffer = acb->wqbuffer; } memcpy(pQbuffer, ptmpuserbuffer, user_len); acb->wqbuf_putIndex += user_len; acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER; if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_write_ioctldata2iop(acb); } spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); return count; } } static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp, struct kobject *kobj, struct bin_attribute *bin, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj,struct device,kobj); struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; uint8_t *pQbuffer; unsigned long flags; if (!capable(CAP_SYS_ADMIN)) return -EACCES; arcmsr_clear_iop2drv_rqueue_buffer(acb); acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); spin_lock_irqsave(&acb->rqbuffer_lock, flags); acb->rqbuf_getIndex = 0; acb->rqbuf_putIndex = 0; spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); spin_lock_irqsave(&acb->wqbuffer_lock, flags); acb->wqbuf_getIndex = 0; acb->wqbuf_putIndex = 0; spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); return 1; } static const struct bin_attribute arcmsr_sysfs_message_read_attr = { .attr = { .name = "mu_read", .mode = S_IRUSR , }, .size = ARCMSR_API_DATA_BUFLEN, .read = arcmsr_sysfs_iop_message_read, }; static const struct bin_attribute arcmsr_sysfs_message_write_attr = { .attr = { .name = "mu_write", .mode = S_IWUSR, }, .size = ARCMSR_API_DATA_BUFLEN, .write = arcmsr_sysfs_iop_message_write, }; static const struct bin_attribute arcmsr_sysfs_message_clear_attr = { .attr = { .name = "mu_clear", .mode = S_IWUSR, }, .size = 1, .write = arcmsr_sysfs_iop_message_clear, }; int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb) { struct Scsi_Host *host = acb->host; int error; error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr); if (error) { printk(KERN_ERR "arcmsr: alloc sysfs mu_read failed\n"); goto error_bin_file_message_read; } error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr); if (error) { printk(KERN_ERR "arcmsr: alloc sysfs mu_write failed\n"); goto error_bin_file_message_write; } error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr); if (error) { printk(KERN_ERR "arcmsr: alloc sysfs mu_clear failed\n"); goto error_bin_file_message_clear; } return 0; error_bin_file_message_clear: sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr); error_bin_file_message_write: sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr); error_bin_file_message_read: return error; } void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb) { struct Scsi_Host *host = acb->host; sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr); sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr); sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr); } static ssize_t arcmsr_attr_host_driver_version(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", ARCMSR_DRIVER_VERSION); } static ssize_t arcmsr_attr_host_driver_posted_cmd(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, "%4d\n", atomic_read(&acb->ccboutstandingcount)); } static ssize_t arcmsr_attr_host_driver_reset(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, "%4d\n", acb->num_resets); } static ssize_t arcmsr_attr_host_driver_abort(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, "%4d\n", acb->num_aborts); } static ssize_t arcmsr_attr_host_fw_model(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, "%s\n", acb->firm_model); } static ssize_t arcmsr_attr_host_fw_version(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, "%s\n", acb->firm_version); } static ssize_t arcmsr_attr_host_fw_request_len(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, "%4d\n", acb->firm_request_len); } static ssize_t arcmsr_attr_host_fw_numbers_queue(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, "%4d\n", acb->firm_numbers_queue); } static ssize_t arcmsr_attr_host_fw_sdram_size(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, "%4d\n", acb->firm_sdram_size); } static ssize_t arcmsr_attr_host_fw_hd_channels(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, "%4d\n", acb->firm_hd_channels); } static DEVICE_ATTR(host_driver_version, S_IRUGO, arcmsr_attr_host_driver_version, NULL); static DEVICE_ATTR(host_driver_posted_cmd, S_IRUGO, arcmsr_attr_host_driver_posted_cmd, NULL); static DEVICE_ATTR(host_driver_reset, S_IRUGO, arcmsr_attr_host_driver_reset, NULL); static DEVICE_ATTR(host_driver_abort, S_IRUGO, arcmsr_attr_host_driver_abort, NULL); static DEVICE_ATTR(host_fw_model, S_IRUGO, arcmsr_attr_host_fw_model, NULL); static DEVICE_ATTR(host_fw_version, S_IRUGO, arcmsr_attr_host_fw_version, NULL); static DEVICE_ATTR(host_fw_request_len, S_IRUGO, arcmsr_attr_host_fw_request_len, NULL); static DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_queue, NULL); static DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL); static DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL); static struct attribute *arcmsr_host_attrs[] = { &dev_attr_host_driver_version.attr, &dev_attr_host_driver_posted_cmd.attr, &dev_attr_host_driver_reset.attr, &dev_attr_host_driver_abort.attr, &dev_attr_host_fw_model.attr, &dev_attr_host_fw_version.attr, &dev_attr_host_fw_request_len.attr, &dev_attr_host_fw_numbers_queue.attr, &dev_attr_host_fw_sdram_size.attr, &dev_attr_host_fw_hd_channels.attr, NULL, }; static const struct attribute_group arcmsr_host_attr_group = { .attrs = arcmsr_host_attrs, }; const struct attribute_group *arcmsr_host_groups[] = { &arcmsr_host_attr_group, NULL };
linux-master
drivers/scsi/arcmsr/arcmsr_attr.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell 88SE94xx hardware specific * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. <[email protected]> * Copyright 2009-2011 Marvell. <[email protected]> */ #include "mv_sas.h" #include "mv_94xx.h" #include "mv_chips.h" static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i) { u32 reg; struct mvs_phy *phy = &mvi->phy[i]; u32 phy_status; mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3); reg = mvs_read_port_vsr_data(mvi, i); phy_status = ((reg & 0x3f0000) >> 16) & 0xff; phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); switch (phy_status) { case 0x10: phy->phy_type |= PORT_TYPE_SAS; break; case 0x1d: default: phy->phy_type |= PORT_TYPE_SATA; break; } } static void set_phy_tuning(struct mvs_info *mvi, int phy_id, struct phy_tuning phy_tuning) { u32 tmp, setting_0 = 0, setting_1 = 0; u8 i; /* Remap information for B0 chip: * * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient) * R0Dh -> R118h[31:16] (Generation 1 Setting 0) * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1) * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0) * R10h -> R120h[15:0] (Generation 2 Setting 1) * R11h -> R120h[31:16] (Generation 3 Setting 0) * R12h -> R124h[15:0] (Generation 3 Setting 1) * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved)) */ /* A0 has a different set of registers */ if (mvi->pdev->revision == VANIR_A0_REV) return; for (i = 0; i < 3; i++) { /* loop 3 times, set Gen 1, Gen 2, Gen 3 */ switch (i) { case 0: setting_0 = GENERATION_1_SETTING; setting_1 = GENERATION_1_2_SETTING; break; case 1: setting_0 = GENERATION_1_2_SETTING; setting_1 = GENERATION_2_3_SETTING; break; case 2: setting_0 = GENERATION_2_3_SETTING; setting_1 = GENERATION_3_4_SETTING; break; } /* Set: * * Transmitter Emphasis Enable * Transmitter Emphasis Amplitude * Transmitter Amplitude */ mvs_write_port_vsr_addr(mvi, phy_id, setting_0); tmp = mvs_read_port_vsr_data(mvi, phy_id); tmp &= ~(0xFBE << 16); tmp |= (((phy_tuning.trans_emp_en << 11) | (phy_tuning.trans_emp_amp << 7) | (phy_tuning.trans_amp << 1)) << 16); mvs_write_port_vsr_data(mvi, phy_id, tmp); /* Set Transmitter Amplitude Adjust */ mvs_write_port_vsr_addr(mvi, phy_id, setting_1); tmp = mvs_read_port_vsr_data(mvi, phy_id); tmp &= ~(0xC000); tmp |= (phy_tuning.trans_amp_adj << 14); mvs_write_port_vsr_data(mvi, phy_id, tmp); } } static void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id, struct ffe_control ffe) { u32 tmp; /* Don't run this if A0/B0 */ if ((mvi->pdev->revision == VANIR_A0_REV) || (mvi->pdev->revision == VANIR_B0_REV)) return; /* FFE Resistor and Capacitor */ /* R10Ch DFE Resolution Control/Squelch and FFE Setting * * FFE_FORCE [7] * FFE_RES_SEL [6:4] * FFE_CAP_SEL [3:0] */ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL); tmp = mvs_read_port_vsr_data(mvi, phy_id); tmp &= ~0xFF; /* Read from HBA_Info_Page */ tmp |= ((0x1 << 7) | (ffe.ffe_rss_sel << 4) | (ffe.ffe_cap_sel << 0)); mvs_write_port_vsr_data(mvi, phy_id, tmp); /* R064h PHY Mode Register 1 * * DFE_DIS 18 */ mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL); tmp = mvs_read_port_vsr_data(mvi, phy_id); tmp &= ~0x40001; /* Hard coding */ /* No defines in HBA_Info_Page */ tmp |= (0 << 18); mvs_write_port_vsr_data(mvi, phy_id, tmp); /* R110h DFE F0-F1 Coefficient Control/DFE Update Control * * DFE_UPDATE_EN [11:6] * DFE_FX_FORCE [5:0] */ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL); tmp = mvs_read_port_vsr_data(mvi, phy_id); tmp &= ~0xFFF; /* Hard coding */ /* No defines in HBA_Info_Page */ tmp |= ((0x3F << 6) | (0x0 << 0)); mvs_write_port_vsr_data(mvi, phy_id, tmp); /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h * * FFE_TRAIN_EN 3 */ mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL); tmp = mvs_read_port_vsr_data(mvi, phy_id); tmp &= ~0x8; /* Hard coding */ /* No defines in HBA_Info_Page */ tmp |= (0 << 3); mvs_write_port_vsr_data(mvi, phy_id, tmp); } /*Notice: this function must be called when phy is disabled*/ static void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate) { union reg_phy_cfg phy_cfg, phy_cfg_tmp; mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id); phy_cfg.v = 0; phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy; phy_cfg.u.sas_support = 1; phy_cfg.u.sata_support = 1; phy_cfg.u.sata_host_mode = 1; switch (rate) { case 0x0: /* support 1.5 Gbps */ phy_cfg.u.speed_support = 1; phy_cfg.u.snw_3_support = 0; phy_cfg.u.tx_lnk_parity = 1; phy_cfg.u.tx_spt_phs_lnk_rate = 0x30; break; case 0x1: /* support 1.5, 3.0 Gbps */ phy_cfg.u.speed_support = 3; phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c; phy_cfg.u.tx_lgcl_lnk_rate = 0x08; break; case 0x2: default: /* support 1.5, 3.0, 6.0 Gbps */ phy_cfg.u.speed_support = 7; phy_cfg.u.snw_3_support = 1; phy_cfg.u.tx_lnk_parity = 1; phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f; phy_cfg.u.tx_lgcl_lnk_rate = 0x09; break; } mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v); } static void mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id) { u32 temp; temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]); if (temp == 0xFFFFFFFFL) { mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6; mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A; mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3; } temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]); if (temp == 0xFFL) { switch (mvi->pdev->revision) { case VANIR_A0_REV: case VANIR_B0_REV: mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7; mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7; break; case VANIR_C0_REV: case VANIR_C1_REV: case VANIR_C2_REV: default: mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7; mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC; break; } } temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]); if (temp == 0xFFL) /*set default phy_rate = 6Gbps*/ mvi->hba_info_param.phy_rate[phy_id] = 0x2; set_phy_tuning(mvi, phy_id, mvi->hba_info_param.phy_tuning[phy_id]); set_phy_ffe_tuning(mvi, phy_id, mvi->hba_info_param.ffe_ctl[phy_id]); set_phy_rate(mvi, phy_id, mvi->hba_info_param.phy_rate[phy_id]); } static void mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id) { void __iomem *regs = mvi->regs; u32 tmp; tmp = mr32(MVS_PCS); tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); mw32(MVS_PCS, tmp); } static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) { u32 tmp; u32 delay = 5000; if (hard == MVS_PHY_TUNE) { mvs_write_port_cfg_addr(mvi, phy_id, PHYR_SATA_CTL); tmp = mvs_read_port_cfg_data(mvi, phy_id); mvs_write_port_cfg_data(mvi, phy_id, tmp|0x20000000); mvs_write_port_cfg_data(mvi, phy_id, tmp|0x100000); return; } tmp = mvs_read_port_irq_stat(mvi, phy_id); tmp &= ~PHYEV_RDY_CH; mvs_write_port_irq_stat(mvi, phy_id, tmp); if (hard) { tmp = mvs_read_phy_ctl(mvi, phy_id); tmp |= PHY_RST_HARD; mvs_write_phy_ctl(mvi, phy_id, tmp); do { tmp = mvs_read_phy_ctl(mvi, phy_id); udelay(10); delay--; } while ((tmp & PHY_RST_HARD) && delay); if (!delay) mv_dprintk("phy hard reset failed.\n"); } else { tmp = mvs_read_phy_ctl(mvi, phy_id); tmp |= PHY_RST; mvs_write_phy_ctl(mvi, phy_id, tmp); } } static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id) { u32 tmp; mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); tmp = mvs_read_port_vsr_data(mvi, phy_id); mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000); } static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id) { u32 tmp; u8 revision = 0; revision = mvi->pdev->revision; if (revision == VANIR_A0_REV) { mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA); mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1); } if (revision == VANIR_B0_REV) { mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL); mvs_write_port_vsr_data(mvi, phy_id, 0x08001006); mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA); mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f); } mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); tmp = mvs_read_port_vsr_data(mvi, phy_id); tmp |= bit(0); mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff); } static void mvs_94xx_sgpio_init(struct mvs_info *mvi) { void __iomem *regs = mvi->regs_ex - 0x10200; u32 tmp; tmp = mr32(MVS_HST_CHIP_CONFIG); tmp |= 0x100; mw32(MVS_HST_CHIP_CONFIG, tmp); mw32(MVS_SGPIO_CTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, MVS_SGPIO_CTRL_SDOUT_AUTO << MVS_SGPIO_CTRL_SDOUT_SHIFT); mw32(MVS_SGPIO_CFG1 + MVS_SGPIO_HOST_OFFSET * mvi->id, 8 << MVS_SGPIO_CFG1_LOWA_SHIFT | 8 << MVS_SGPIO_CFG1_HIA_SHIFT | 4 << MVS_SGPIO_CFG1_LOWB_SHIFT | 4 << MVS_SGPIO_CFG1_HIB_SHIFT | 2 << MVS_SGPIO_CFG1_MAXACTON_SHIFT | 1 << MVS_SGPIO_CFG1_FORCEACTOFF_SHIFT ); mw32(MVS_SGPIO_CFG2 + MVS_SGPIO_HOST_OFFSET * mvi->id, (300000 / 100) << MVS_SGPIO_CFG2_CLK_SHIFT | /* 100kHz clock */ 66 << MVS_SGPIO_CFG2_BLINK_SHIFT /* (66 * 0,121 Hz?)*/ ); mw32(MVS_SGPIO_CFG0 + MVS_SGPIO_HOST_OFFSET * mvi->id, MVS_SGPIO_CFG0_ENABLE | MVS_SGPIO_CFG0_BLINKA | MVS_SGPIO_CFG0_BLINKB | /* 3*4 data bits / PDU */ (12 - 1) << MVS_SGPIO_CFG0_AUT_BITLEN_SHIFT ); mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, DEFAULT_SGPIO_BITS); mw32(MVS_SGPIO_DSRC + MVS_SGPIO_HOST_OFFSET * mvi->id, ((mvi->id * 4) + 3) << (8 * 3) | ((mvi->id * 4) + 2) << (8 * 2) | ((mvi->id * 4) + 1) << (8 * 1) | ((mvi->id * 4) + 0) << (8 * 0)); } static int mvs_94xx_init(struct mvs_info *mvi) { void __iomem *regs = mvi->regs; int i; u32 tmp, cctl; u8 revision; revision = mvi->pdev->revision; mvs_show_pcie_usage(mvi); if (mvi->flags & MVF_FLAG_SOC) { tmp = mr32(MVS_PHY_CTL); tmp &= ~PCTL_PWR_OFF; tmp |= PCTL_PHY_DSBL; mw32(MVS_PHY_CTL, tmp); } /* Init Chip */ /* make sure RST is set; HBA_RST /should/ have done that for us */ cctl = mr32(MVS_CTL) & 0xFFFF; if (cctl & CCTL_RST) cctl &= ~CCTL_RST; else mw32_f(MVS_CTL, cctl | CCTL_RST); if (mvi->flags & MVF_FLAG_SOC) { tmp = mr32(MVS_PHY_CTL); tmp &= ~PCTL_PWR_OFF; tmp |= PCTL_COM_ON; tmp &= ~PCTL_PHY_DSBL; tmp |= PCTL_LINK_RST; mw32(MVS_PHY_CTL, tmp); msleep(100); tmp &= ~PCTL_LINK_RST; mw32(MVS_PHY_CTL, tmp); msleep(100); } /* disable Multiplexing, enable phy implemented */ mw32(MVS_PORTS_IMP, 0xFF); if (revision == VANIR_A0_REV) { mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET); mw32(MVS_PA_VSR_PORT, 0x00018080); } mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2); if (revision == VANIR_A0_REV || revision == VANIR_B0_REV) /* set 6G/3G/1.5G, multiplexing, without SSC */ mw32(MVS_PA_VSR_PORT, 0x0084d4fe); else /* set 6G/3G/1.5G, multiplexing, with and without SSC */ mw32(MVS_PA_VSR_PORT, 0x0084fffe); if (revision == VANIR_B0_REV) { mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL); mw32(MVS_PA_VSR_PORT, 0x08001006); mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA); mw32(MVS_PA_VSR_PORT, 0x0000705f); } /* reset control */ mw32(MVS_PCS, 0); /* MVS_PCS */ mw32(MVS_STP_REG_SET_0, 0); mw32(MVS_STP_REG_SET_1, 0); /* init phys */ mvs_phy_hacks(mvi); /* disable non data frame retry */ tmp = mvs_cr32(mvi, CMD_SAS_CTL1); if ((revision == VANIR_A0_REV) || (revision == VANIR_B0_REV) || (revision == VANIR_C0_REV)) { tmp &= ~0xffff; tmp |= 0x007f; mvs_cw32(mvi, CMD_SAS_CTL1, tmp); } /* set LED blink when IO*/ mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED); tmp = mr32(MVS_PA_VSR_PORT); tmp &= 0xFFFF00FF; tmp |= 0x00003300; mw32(MVS_PA_VSR_PORT, tmp); mw32(MVS_CMD_LIST_LO, mvi->slot_dma); mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); mw32(MVS_TX_LO, mvi->tx_dma); mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); mw32(MVS_RX_CFG, MVS_RX_RING_SZ); mw32(MVS_RX_LO, mvi->rx_dma); mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); for (i = 0; i < mvi->chip->n_phy; i++) { mvs_94xx_phy_disable(mvi, i); /* set phy local SAS address */ mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4, cpu_to_le64(mvi->phy[i].dev_sas_addr)); mvs_94xx_enable_xmt(mvi, i); mvs_94xx_config_reg_from_hba(mvi, i); mvs_94xx_phy_enable(mvi, i); mvs_94xx_phy_reset(mvi, i, PHY_RST_HARD); msleep(500); mvs_94xx_detect_porttype(mvi, i); } if (mvi->flags & MVF_FLAG_SOC) { /* set select registers */ writel(0x0E008000, regs + 0x000); writel(0x59000008, regs + 0x004); writel(0x20, regs + 0x008); writel(0x20, regs + 0x00c); writel(0x20, regs + 0x010); writel(0x20, regs + 0x014); writel(0x20, regs + 0x018); writel(0x20, regs + 0x01c); } for (i = 0; i < mvi->chip->n_phy; i++) { /* clear phy int status */ tmp = mvs_read_port_irq_stat(mvi, i); tmp &= ~PHYEV_SIG_FIS; mvs_write_port_irq_stat(mvi, i, tmp); /* set phy int mask */ tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ; mvs_write_port_irq_mask(mvi, i, tmp); msleep(100); mvs_update_phyinfo(mvi, i, 1); } /* little endian for open address and command table, etc. */ cctl = mr32(MVS_CTL); cctl |= CCTL_ENDIAN_CMD; cctl &= ~CCTL_ENDIAN_OPEN; cctl |= CCTL_ENDIAN_RSP; mw32_f(MVS_CTL, cctl); /* reset CMD queue */ tmp = mr32(MVS_PCS); tmp |= PCS_CMD_RST; tmp &= ~PCS_SELF_CLEAR; mw32(MVS_PCS, tmp); /* * the max count is 0x1ff, while our max slot is 0x200, * it will make count 0. */ tmp = 0; if (MVS_CHIP_SLOT_SZ > 0x1ff) mw32(MVS_INT_COAL, 0x1ff | COAL_EN); else mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN); /* default interrupt coalescing time is 128us */ tmp = 0x10000 | interrupt_coalescing; mw32(MVS_INT_COAL_TMOUT, tmp); /* ladies and gentlemen, start your engines */ mw32(MVS_TX_CFG, 0); mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); /* enable CMD/CMPL_Q/RESP mode */ mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN | PCS_CMD_EN | PCS_CMD_STOP_ERR); /* enable completion queue interrupt */ tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR); tmp |= CINT_PHY_MASK; mw32(MVS_INT_MASK, tmp); tmp = mvs_cr32(mvi, CMD_LINK_TIMER); tmp |= 0xFFFF0000; mvs_cw32(mvi, CMD_LINK_TIMER, tmp); /* tune STP performance */ tmp = 0x003F003F; mvs_cw32(mvi, CMD_PL_TIMER, tmp); /* This can improve expander large block size seq write performance */ tmp = mvs_cr32(mvi, CMD_PORT_LAYER_TIMER1); tmp |= 0xFFFF007F; mvs_cw32(mvi, CMD_PORT_LAYER_TIMER1, tmp); /* change the connection open-close behavior (bit 9) * set bit8 to 1 for performance tuning */ tmp = mvs_cr32(mvi, CMD_SL_MODE0); tmp |= 0x00000300; /* set bit0 to 0 to enable retry for no_dest reject case */ tmp &= 0xFFFFFFFE; mvs_cw32(mvi, CMD_SL_MODE0, tmp); /* Enable SRS interrupt */ mw32(MVS_INT_MASK_SRS_0, 0xFFFF); mvs_94xx_sgpio_init(mvi); return 0; } static int mvs_94xx_ioremap(struct mvs_info *mvi) { if (!mvs_ioremap(mvi, 2, -1)) { mvi->regs_ex = mvi->regs + 0x10200; mvi->regs += 0x20000; if (mvi->id == 1) mvi->regs += 0x4000; return 0; } return -1; } static void mvs_94xx_iounmap(struct mvs_info *mvi) { if (mvi->regs) { mvi->regs -= 0x20000; if (mvi->id == 1) mvi->regs -= 0x4000; mvs_iounmap(mvi->regs); } } static void mvs_94xx_interrupt_enable(struct mvs_info *mvi) { void __iomem *regs = mvi->regs_ex; u32 tmp; tmp = mr32(MVS_GBL_CTL); tmp |= (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B); mw32(MVS_GBL_INT_STAT, tmp); writel(tmp, regs + 0x0C); writel(tmp, regs + 0x10); writel(tmp, regs + 0x14); writel(tmp, regs + 0x18); mw32(MVS_GBL_CTL, tmp); } static void mvs_94xx_interrupt_disable(struct mvs_info *mvi) { void __iomem *regs = mvi->regs_ex; u32 tmp; tmp = mr32(MVS_GBL_CTL); tmp &= ~(MVS_IRQ_SAS_A | MVS_IRQ_SAS_B); mw32(MVS_GBL_INT_STAT, tmp); writel(tmp, regs + 0x0C); writel(tmp, regs + 0x10); writel(tmp, regs + 0x14); writel(tmp, regs + 0x18); mw32(MVS_GBL_CTL, tmp); } static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq) { void __iomem *regs = mvi->regs_ex; u32 stat = 0; if (!(mvi->flags & MVF_FLAG_SOC)) { stat = mr32(MVS_GBL_INT_STAT); if (!(stat & (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B))) return 0; } return stat; } static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat) { void __iomem *regs = mvi->regs; if (((stat & MVS_IRQ_SAS_A) && mvi->id == 0) || ((stat & MVS_IRQ_SAS_B) && mvi->id == 1)) { mw32_f(MVS_INT_STAT, CINT_DONE); spin_lock(&mvi->lock); mvs_int_full(mvi); spin_unlock(&mvi->lock); } return IRQ_HANDLED; } static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) { u32 tmp; tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3)); if (tmp & 1 << (slot_idx % 32)) { mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx); mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3), 1 << (slot_idx % 32)); do { tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3)); } while (tmp & 1 << (slot_idx % 32)); } } static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all) { void __iomem *regs = mvi->regs; u32 tmp; if (clear_all) { tmp = mr32(MVS_INT_STAT_SRS_0); if (tmp) { mv_dprintk("check SRS 0 %08X.\n", tmp); mw32(MVS_INT_STAT_SRS_0, tmp); } tmp = mr32(MVS_INT_STAT_SRS_1); if (tmp) { mv_dprintk("check SRS 1 %08X.\n", tmp); mw32(MVS_INT_STAT_SRS_1, tmp); } } else { if (reg_set > 31) tmp = mr32(MVS_INT_STAT_SRS_1); else tmp = mr32(MVS_INT_STAT_SRS_0); if (tmp & (1 << (reg_set % 32))) { mv_dprintk("register set 0x%x was stopped.\n", reg_set); if (reg_set > 31) mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32)); else mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32)); } } } static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, u32 tfs) { void __iomem *regs = mvi->regs; u32 tmp; mvs_94xx_clear_srs_irq(mvi, 0, 1); tmp = mr32(MVS_INT_STAT); mw32(MVS_INT_STAT, tmp | CINT_CI_STOP); tmp = mr32(MVS_PCS) | 0xFF00; mw32(MVS_PCS, tmp); } static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi) { void __iomem *regs = mvi->regs; u32 err_0, err_1; u8 i; struct mvs_device *device; err_0 = mr32(MVS_NON_NCQ_ERR_0); err_1 = mr32(MVS_NON_NCQ_ERR_1); mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n", err_0, err_1); for (i = 0; i < 32; i++) { if (err_0 & bit(i)) { device = mvs_find_dev_by_reg_set(mvi, i); if (device) mvs_release_task(mvi, device->sas_device); } if (err_1 & bit(i)) { device = mvs_find_dev_by_reg_set(mvi, i+32); if (device) mvs_release_task(mvi, device->sas_device); } } mw32(MVS_NON_NCQ_ERR_0, err_0); mw32(MVS_NON_NCQ_ERR_1, err_1); } static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) { void __iomem *regs = mvi->regs; u8 reg_set = *tfs; if (*tfs == MVS_ID_NOT_MAPPED) return; mvi->sata_reg_set &= ~bit(reg_set); if (reg_set < 32) w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set); else w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32)); *tfs = MVS_ID_NOT_MAPPED; return; } static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) { int i; void __iomem *regs = mvi->regs; if (*tfs != MVS_ID_NOT_MAPPED) return 0; i = mv_ffc64(mvi->sata_reg_set); if (i >= 32) { mvi->sata_reg_set |= bit(i); w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32)); *tfs = i; return 0; } else if (i >= 0) { mvi->sata_reg_set |= bit(i); w_reg_set_enable(i, (u32)mvi->sata_reg_set); *tfs = i; return 0; } return MVS_ID_NOT_MAPPED; } static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd) { int i; struct scatterlist *sg; struct mvs_prd *buf_prd = prd; struct mvs_prd_imt im_len; *(u32 *)&im_len = 0; for_each_sg(scatter, sg, nr, i) { buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); im_len.len = sg_dma_len(sg); buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len); buf_prd++; } } static int mvs_94xx_oob_done(struct mvs_info *mvi, int i) { u32 phy_st; phy_st = mvs_read_phy_ctl(mvi, i); if (phy_st & PHY_READY_MASK) return 1; return 0; } static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id, struct sas_identify_frame *id) { int i; u32 id_frame[7]; for (i = 0; i < 7; i++) { mvs_write_port_cfg_addr(mvi, port_id, CONFIG_ID_FRAME0 + i * 4); id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id)); } memcpy(id, id_frame, 28); } static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id, struct sas_identify_frame *id) { int i; u32 id_frame[7]; for (i = 0; i < 7; i++) { mvs_write_port_cfg_addr(mvi, port_id, CONFIG_ATT_ID_FRAME0 + i * 4); id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id)); mv_dprintk("94xx phy %d atta frame %d %x.\n", port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]); } memcpy(id, id_frame, 28); } static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id) { u32 att_dev_info = 0; att_dev_info |= id->dev_type; if (id->stp_iport) att_dev_info |= PORT_DEV_STP_INIT; if (id->smp_iport) att_dev_info |= PORT_DEV_SMP_INIT; if (id->ssp_iport) att_dev_info |= PORT_DEV_SSP_INIT; if (id->stp_tport) att_dev_info |= PORT_DEV_STP_TRGT; if (id->smp_tport) att_dev_info |= PORT_DEV_SMP_TRGT; if (id->ssp_tport) att_dev_info |= PORT_DEV_SSP_TRGT; att_dev_info |= (u32)id->phy_id<<24; return att_dev_info; } static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id) { return mvs_94xx_make_dev_info(id); } static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i, struct sas_identify_frame *id) { struct mvs_phy *phy = &mvi->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status); sas_phy->linkrate = (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; sas_phy->linkrate += 0x8; mv_dprintk("get link rate is %d\n", sas_phy->linkrate); phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; mvs_94xx_get_dev_identify_frame(mvi, i, id); phy->dev_info = mvs_94xx_make_dev_info(id); if (phy->phy_type & PORT_TYPE_SAS) { mvs_94xx_get_att_identify_frame(mvi, i, id); phy->att_dev_info = mvs_94xx_make_att_info(id); phy->att_dev_sas_addr = *(u64 *)id->sas_addr; } else { phy->att_dev_info = PORT_DEV_STP_TRGT | 1; } /* enable spin up bit */ mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); mvs_write_port_cfg_data(mvi, i, 0x04); } static void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, struct sas_phy_linkrates *rates) { u32 lrmax = 0; u32 tmp; tmp = mvs_read_phy_ctl(mvi, phy_id); lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12; if (lrmax) { tmp &= ~(0x3 << 12); tmp |= lrmax; } mvs_write_phy_ctl(mvi, phy_id, tmp); mvs_94xx_phy_reset(mvi, phy_id, PHY_RST_HARD); } static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi) { u32 tmp; void __iomem *regs = mvi->regs; tmp = mr32(MVS_STP_REG_SET_0); mw32(MVS_STP_REG_SET_0, 0); mw32(MVS_STP_REG_SET_0, tmp); tmp = mr32(MVS_STP_REG_SET_1); mw32(MVS_STP_REG_SET_1, 0); mw32(MVS_STP_REG_SET_1, tmp); } static u32 mvs_94xx_spi_read_data(struct mvs_info *mvi) { void __iomem *regs = mvi->regs_ex - 0x10200; return mr32(SPI_RD_DATA_REG_94XX); } static void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data) { void __iomem *regs = mvi->regs_ex - 0x10200; mw32(SPI_RD_DATA_REG_94XX, data); } static int mvs_94xx_spi_buildcmd(struct mvs_info *mvi, u32 *dwCmd, u8 cmd, u8 read, u8 length, u32 addr ) { void __iomem *regs = mvi->regs_ex - 0x10200; u32 dwTmp; dwTmp = ((u32)cmd << 8) | ((u32)length << 4); if (read) dwTmp |= SPI_CTRL_READ_94XX; if (addr != MV_MAX_U32) { mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL)); dwTmp |= SPI_ADDR_VLD_94XX; } *dwCmd = dwTmp; return 0; } static int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) { void __iomem *regs = mvi->regs_ex - 0x10200; mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX); return 0; } static int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) { void __iomem *regs = mvi->regs_ex - 0x10200; u32 i, dwTmp; for (i = 0; i < timeout; i++) { dwTmp = mr32(SPI_CTRL_REG_94XX); if (!(dwTmp & SPI_CTRL_SpiStart_94XX)) return 0; msleep(10); } return -1; } static void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask, int buf_len, int from, void *prd) { int i; struct mvs_prd *buf_prd = prd; dma_addr_t buf_dma; struct mvs_prd_imt im_len; *(u32 *)&im_len = 0; buf_prd += from; #define PRD_CHAINED_ENTRY 0x01 if ((mvi->pdev->revision == VANIR_A0_REV) || (mvi->pdev->revision == VANIR_B0_REV)) buf_dma = (phy_mask <= 0x08) ? mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1; else return; for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) { if (i == MAX_SG_ENTRY - 1) { buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1)); im_len.len = 2; im_len.misc_ctl = PRD_CHAINED_ENTRY; } else { buf_prd->addr = cpu_to_le64(buf_dma); im_len.len = buf_len; } buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len); } } static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time) { void __iomem *regs = mvi->regs; u32 tmp = 0; /* * the max count is 0x1ff, while our max slot is 0x200, * it will make count 0. */ if (time == 0) { mw32(MVS_INT_COAL, 0); mw32(MVS_INT_COAL_TMOUT, 0x10000); } else { if (MVS_CHIP_SLOT_SZ > 0x1ff) mw32(MVS_INT_COAL, 0x1ff|COAL_EN); else mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN); tmp = 0x10000 | time; mw32(MVS_INT_COAL_TMOUT, tmp); } } static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv, u8 reg_type, u8 reg_index, u8 reg_count, u8 *write_data) { int i; switch (reg_type) { case SAS_GPIO_REG_TX_GP: if (reg_index == 0) return -EINVAL; if (reg_count > 1) return -EINVAL; if (reg_count == 0) return 0; /* maximum supported bits = hosts * 4 drives * 3 bits */ for (i = 0; i < mvs_prv->n_host * 4 * 3; i++) { /* select host */ struct mvs_info *mvi = mvs_prv->mvi[i/(4*3)]; void __iomem *regs = mvi->regs_ex - 0x10200; int drive = (i/3) & (4-1); /* drive number on host */ int driveshift = drive * 8; /* bit offset of drive */ u32 block = ioread32be(regs + MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id); /* * if bit is set then create a mask with the first * bit of the drive set in the mask ... */ u32 bit = get_unaligned_be32(write_data) & (1 << i) ? 1 << driveshift : 0; /* * ... and then shift it to the right position based * on the led type (activity/id/fail) */ switch (i%3) { case 0: /* activity */ block &= ~((0x7 << MVS_SGPIO_DCTRL_ACT_SHIFT) << driveshift); /* hardwire activity bit to SOF */ block |= LED_BLINKA_SOF << ( MVS_SGPIO_DCTRL_ACT_SHIFT + driveshift); break; case 1: /* id */ block &= ~((0x3 << MVS_SGPIO_DCTRL_LOC_SHIFT) << driveshift); block |= bit << MVS_SGPIO_DCTRL_LOC_SHIFT; break; case 2: /* fail */ block &= ~((0x7 << MVS_SGPIO_DCTRL_ERR_SHIFT) << driveshift); block |= bit << MVS_SGPIO_DCTRL_ERR_SHIFT; break; } iowrite32be(block, regs + MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id); } return reg_count; case SAS_GPIO_REG_TX: if (reg_index + reg_count > mvs_prv->n_host) return -EINVAL; for (i = 0; i < reg_count; i++) { struct mvs_info *mvi = mvs_prv->mvi[i+reg_index]; void __iomem *regs = mvi->regs_ex - 0x10200; mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, ((u32 *) write_data)[i]); } return reg_count; } return -ENOSYS; } const struct mvs_dispatch mvs_94xx_dispatch = { "mv94xx", mvs_94xx_init, NULL, mvs_94xx_ioremap, mvs_94xx_iounmap, mvs_94xx_isr, mvs_94xx_isr_status, mvs_94xx_interrupt_enable, mvs_94xx_interrupt_disable, mvs_read_phy_ctl, mvs_write_phy_ctl, mvs_read_port_cfg_data, mvs_write_port_cfg_data, mvs_write_port_cfg_addr, mvs_read_port_vsr_data, mvs_write_port_vsr_data, mvs_write_port_vsr_addr, mvs_read_port_irq_stat, mvs_write_port_irq_stat, mvs_read_port_irq_mask, mvs_write_port_irq_mask, mvs_94xx_command_active, mvs_94xx_clear_srs_irq, mvs_94xx_issue_stop, mvs_start_delivery, mvs_rx_update, mvs_int_full, mvs_94xx_assign_reg_set, mvs_94xx_free_reg_set, mvs_get_prd_size, mvs_get_prd_count, mvs_94xx_make_prd, mvs_94xx_detect_porttype, mvs_94xx_oob_done, mvs_94xx_fix_phy_info, NULL, mvs_94xx_phy_set_link_rate, mvs_hw_max_link_rate, mvs_94xx_phy_disable, mvs_94xx_phy_enable, mvs_94xx_phy_reset, NULL, mvs_94xx_clear_active_cmds, mvs_94xx_spi_read_data, mvs_94xx_spi_write_data, mvs_94xx_spi_buildcmd, mvs_94xx_spi_issuecmd, mvs_94xx_spi_waitdataready, mvs_94xx_fix_dma, mvs_94xx_tune_interrupt, mvs_94xx_non_spec_ncq_error, mvs_94xx_gpio_write, };
linux-master
drivers/scsi/mvsas/mv_94xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell 88SE64xx hardware specific * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. <[email protected]> * Copyright 2009-2011 Marvell. <[email protected]> */ #include "mv_sas.h" #include "mv_64xx.h" #include "mv_chips.h" static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i) { void __iomem *regs = mvi->regs; u32 reg; struct mvs_phy *phy = &mvi->phy[i]; reg = mr32(MVS_GBL_PORT_TYPE); phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); if (reg & MODE_SAS_SATA & (1 << i)) phy->phy_type |= PORT_TYPE_SAS; else phy->phy_type |= PORT_TYPE_SATA; } static void mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id) { void __iomem *regs = mvi->regs; u32 tmp; tmp = mr32(MVS_PCS); if (mvi->chip->n_phy <= MVS_SOC_PORTS) tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT); else tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); mw32(MVS_PCS, tmp); } static void mvs_64xx_phy_hacks(struct mvs_info *mvi) { void __iomem *regs = mvi->regs; int i; mvs_phy_hacks(mvi); if (!(mvi->flags & MVF_FLAG_SOC)) { for (i = 0; i < MVS_SOC_PORTS; i++) { mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE8); mvs_write_port_vsr_data(mvi, i, 0x2F0); } } else { /* disable auto port detection */ mw32(MVS_GBL_PORT_TYPE, 0); for (i = 0; i < mvi->chip->n_phy; i++) { mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7); mvs_write_port_vsr_data(mvi, i, 0x90000000); mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9); mvs_write_port_vsr_data(mvi, i, 0x50f2); mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11); mvs_write_port_vsr_data(mvi, i, 0x0e); } } } static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id) { void __iomem *regs = mvi->regs; u32 reg, tmp; if (!(mvi->flags & MVF_FLAG_SOC)) { if (phy_id < MVS_SOC_PORTS) pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg); else pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg); } else reg = mr32(MVS_PHY_CTL); tmp = reg; if (phy_id < MVS_SOC_PORTS) tmp |= (1U << phy_id) << PCTL_LINK_OFFS; else tmp |= (1U << (phy_id - MVS_SOC_PORTS)) << PCTL_LINK_OFFS; if (!(mvi->flags & MVF_FLAG_SOC)) { if (phy_id < MVS_SOC_PORTS) { pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); mdelay(10); pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg); } else { pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); mdelay(10); pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg); } } else { mw32(MVS_PHY_CTL, tmp); mdelay(10); mw32(MVS_PHY_CTL, reg); } } static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) { u32 tmp; tmp = mvs_read_port_irq_stat(mvi, phy_id); tmp &= ~PHYEV_RDY_CH; mvs_write_port_irq_stat(mvi, phy_id, tmp); tmp = mvs_read_phy_ctl(mvi, phy_id); if (hard == MVS_HARD_RESET) tmp |= PHY_RST_HARD; else if (hard == MVS_SOFT_RESET) tmp |= PHY_RST; mvs_write_phy_ctl(mvi, phy_id, tmp); if (hard) { do { tmp = mvs_read_phy_ctl(mvi, phy_id); } while (tmp & PHY_RST_HARD); } } static void mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all) { void __iomem *regs = mvi->regs; u32 tmp; if (clear_all) { tmp = mr32(MVS_INT_STAT_SRS_0); if (tmp) { printk(KERN_DEBUG "check SRS 0 %08X.\n", tmp); mw32(MVS_INT_STAT_SRS_0, tmp); } } else { tmp = mr32(MVS_INT_STAT_SRS_0); if (tmp & (1 << (reg_set % 32))) { printk(KERN_DEBUG "register set 0x%x was stopped.\n", reg_set); mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32)); } } } static int mvs_64xx_chip_reset(struct mvs_info *mvi) { void __iomem *regs = mvi->regs; u32 tmp; int i; /* make sure interrupts are masked immediately (paranoia) */ mw32(MVS_GBL_CTL, 0); tmp = mr32(MVS_GBL_CTL); /* Reset Controller */ if (!(tmp & HBA_RST)) { if (mvi->flags & MVF_PHY_PWR_FIX) { pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); tmp &= ~PCTL_PWR_OFF; tmp |= PCTL_PHY_DSBL; pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); tmp &= ~PCTL_PWR_OFF; tmp |= PCTL_PHY_DSBL; pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); } } /* make sure interrupts are masked immediately (paranoia) */ mw32(MVS_GBL_CTL, 0); tmp = mr32(MVS_GBL_CTL); /* Reset Controller */ if (!(tmp & HBA_RST)) { /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ mw32_f(MVS_GBL_CTL, HBA_RST); } /* wait for reset to finish; timeout is just a guess */ i = 1000; while (i-- > 0) { msleep(10); if (!(mr32(MVS_GBL_CTL) & HBA_RST)) break; } if (mr32(MVS_GBL_CTL) & HBA_RST) { dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n"); return -EBUSY; } return 0; } static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id) { void __iomem *regs = mvi->regs; u32 tmp; if (!(mvi->flags & MVF_FLAG_SOC)) { u32 offs; if (phy_id < 4) offs = PCR_PHY_CTL; else { offs = PCR_PHY_CTL2; phy_id -= 4; } pci_read_config_dword(mvi->pdev, offs, &tmp); tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id); pci_write_config_dword(mvi->pdev, offs, tmp); } else { tmp = mr32(MVS_PHY_CTL); tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id); mw32(MVS_PHY_CTL, tmp); } } static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id) { void __iomem *regs = mvi->regs; u32 tmp; if (!(mvi->flags & MVF_FLAG_SOC)) { u32 offs; if (phy_id < 4) offs = PCR_PHY_CTL; else { offs = PCR_PHY_CTL2; phy_id -= 4; } pci_read_config_dword(mvi->pdev, offs, &tmp); tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id)); pci_write_config_dword(mvi->pdev, offs, tmp); } else { tmp = mr32(MVS_PHY_CTL); tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id)); mw32(MVS_PHY_CTL, tmp); } } static int mvs_64xx_init(struct mvs_info *mvi) { void __iomem *regs = mvi->regs; int i; u32 tmp, cctl; if (mvi->pdev && mvi->pdev->revision == 0) mvi->flags |= MVF_PHY_PWR_FIX; if (!(mvi->flags & MVF_FLAG_SOC)) { mvs_show_pcie_usage(mvi); tmp = mvs_64xx_chip_reset(mvi); if (tmp) return tmp; } else { tmp = mr32(MVS_PHY_CTL); tmp &= ~PCTL_PWR_OFF; tmp |= PCTL_PHY_DSBL; mw32(MVS_PHY_CTL, tmp); } /* Init Chip */ /* make sure RST is set; HBA_RST /should/ have done that for us */ cctl = mr32(MVS_CTL) & 0xFFFF; if (cctl & CCTL_RST) cctl &= ~CCTL_RST; else mw32_f(MVS_CTL, cctl | CCTL_RST); if (!(mvi->flags & MVF_FLAG_SOC)) { /* write to device control _AND_ device status register */ pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); tmp &= ~PRD_REQ_MASK; tmp |= PRD_REQ_SIZE; pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); tmp &= ~PCTL_PWR_OFF; tmp &= ~PCTL_PHY_DSBL; pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); tmp &= PCTL_PWR_OFF; tmp &= ~PCTL_PHY_DSBL; pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); } else { tmp = mr32(MVS_PHY_CTL); tmp &= ~PCTL_PWR_OFF; tmp |= PCTL_COM_ON; tmp &= ~PCTL_PHY_DSBL; tmp |= PCTL_LINK_RST; mw32(MVS_PHY_CTL, tmp); msleep(100); tmp &= ~PCTL_LINK_RST; mw32(MVS_PHY_CTL, tmp); msleep(100); } /* reset control */ mw32(MVS_PCS, 0); /* MVS_PCS */ /* init phys */ mvs_64xx_phy_hacks(mvi); tmp = mvs_cr32(mvi, CMD_PHY_MODE_21); tmp &= 0x0000ffff; tmp |= 0x00fa0000; mvs_cw32(mvi, CMD_PHY_MODE_21, tmp); /* enable auto port detection */ mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN); mw32(MVS_CMD_LIST_LO, mvi->slot_dma); mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); mw32(MVS_TX_LO, mvi->tx_dma); mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); mw32(MVS_RX_CFG, MVS_RX_RING_SZ); mw32(MVS_RX_LO, mvi->rx_dma); mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); for (i = 0; i < mvi->chip->n_phy; i++) { /* set phy local SAS address */ /* should set little endian SAS address to 64xx chip */ mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI, cpu_to_be64(mvi->phy[i].dev_sas_addr)); mvs_64xx_enable_xmt(mvi, i); mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET); msleep(500); mvs_64xx_detect_porttype(mvi, i); } if (mvi->flags & MVF_FLAG_SOC) { /* set select registers */ writel(0x0E008000, regs + 0x000); writel(0x59000008, regs + 0x004); writel(0x20, regs + 0x008); writel(0x20, regs + 0x00c); writel(0x20, regs + 0x010); writel(0x20, regs + 0x014); writel(0x20, regs + 0x018); writel(0x20, regs + 0x01c); } for (i = 0; i < mvi->chip->n_phy; i++) { /* clear phy int status */ tmp = mvs_read_port_irq_stat(mvi, i); tmp &= ~PHYEV_SIG_FIS; mvs_write_port_irq_stat(mvi, i, tmp); /* set phy int mask */ tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR | PHYEV_DEC_ERR; mvs_write_port_irq_mask(mvi, i, tmp); msleep(100); mvs_update_phyinfo(mvi, i, 1); } /* little endian for open address and command table, etc. */ cctl = mr32(MVS_CTL); cctl |= CCTL_ENDIAN_CMD; cctl |= CCTL_ENDIAN_DATA; cctl &= ~CCTL_ENDIAN_OPEN; cctl |= CCTL_ENDIAN_RSP; mw32_f(MVS_CTL, cctl); /* reset CMD queue */ tmp = mr32(MVS_PCS); tmp |= PCS_CMD_RST; tmp &= ~PCS_SELF_CLEAR; mw32(MVS_PCS, tmp); /* * the max count is 0x1ff, while our max slot is 0x200, * it will make count 0. */ tmp = 0; if (MVS_CHIP_SLOT_SZ > 0x1ff) mw32(MVS_INT_COAL, 0x1ff | COAL_EN); else mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN); tmp = 0x10000 | interrupt_coalescing; mw32(MVS_INT_COAL_TMOUT, tmp); /* ladies and gentlemen, start your engines */ mw32(MVS_TX_CFG, 0); mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); /* enable CMD/CMPL_Q/RESP mode */ mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN | PCS_CMD_STOP_ERR); /* enable completion queue interrupt */ tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | CINT_DMA_PCIE); mw32(MVS_INT_MASK, tmp); /* Enable SRS interrupt */ mw32(MVS_INT_MASK_SRS_0, 0xFFFF); return 0; } static int mvs_64xx_ioremap(struct mvs_info *mvi) { if (!mvs_ioremap(mvi, 4, 2)) return 0; return -1; } static void mvs_64xx_iounmap(struct mvs_info *mvi) { mvs_iounmap(mvi->regs); mvs_iounmap(mvi->regs_ex); } static void mvs_64xx_interrupt_enable(struct mvs_info *mvi) { void __iomem *regs = mvi->regs; u32 tmp; tmp = mr32(MVS_GBL_CTL); mw32(MVS_GBL_CTL, tmp | INT_EN); } static void mvs_64xx_interrupt_disable(struct mvs_info *mvi) { void __iomem *regs = mvi->regs; u32 tmp; tmp = mr32(MVS_GBL_CTL); mw32(MVS_GBL_CTL, tmp & ~INT_EN); } static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq) { void __iomem *regs = mvi->regs; u32 stat; if (!(mvi->flags & MVF_FLAG_SOC)) { stat = mr32(MVS_GBL_INT_STAT); if (stat == 0 || stat == 0xffffffff) return 0; } else stat = 1; return stat; } static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat) { void __iomem *regs = mvi->regs; /* clear CMD_CMPLT ASAP */ mw32_f(MVS_INT_STAT, CINT_DONE); spin_lock(&mvi->lock); mvs_int_full(mvi); spin_unlock(&mvi->lock); return IRQ_HANDLED; } static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx) { u32 tmp; mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32)); mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32)); do { tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3)); } while (tmp & 1 << (slot_idx % 32)); do { tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3)); } while (tmp & 1 << (slot_idx % 32)); } static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, u32 tfs) { void __iomem *regs = mvi->regs; u32 tmp; if (type == PORT_TYPE_SATA) { tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); mw32(MVS_INT_STAT_SRS_0, tmp); } mw32(MVS_INT_STAT, CINT_CI_STOP); tmp = mr32(MVS_PCS) | 0xFF00; mw32(MVS_PCS, tmp); } static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) { void __iomem *regs = mvi->regs; u32 tmp, offs; if (*tfs == MVS_ID_NOT_MAPPED) return; offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); if (*tfs < 16) { tmp = mr32(MVS_PCS); mw32(MVS_PCS, tmp & ~offs); } else { tmp = mr32(MVS_CTL); mw32(MVS_CTL, tmp & ~offs); } tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs); if (tmp) mw32(MVS_INT_STAT_SRS_0, tmp); *tfs = MVS_ID_NOT_MAPPED; return; } static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) { int i; u32 tmp, offs; void __iomem *regs = mvi->regs; if (*tfs != MVS_ID_NOT_MAPPED) return 0; tmp = mr32(MVS_PCS); for (i = 0; i < mvi->chip->srs_sz; i++) { if (i == 16) tmp = mr32(MVS_CTL); offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); if (!(tmp & offs)) { *tfs = i; if (i < 16) mw32(MVS_PCS, tmp | offs); else mw32(MVS_CTL, tmp | offs); tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i); if (tmp) mw32(MVS_INT_STAT_SRS_0, tmp); return 0; } } return MVS_ID_NOT_MAPPED; } static void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd) { int i; struct scatterlist *sg; struct mvs_prd *buf_prd = prd; for_each_sg(scatter, sg, nr, i) { buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); buf_prd->len = cpu_to_le32(sg_dma_len(sg)); buf_prd++; } } static int mvs_64xx_oob_done(struct mvs_info *mvi, int i) { u32 phy_st; mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); phy_st = mvs_read_port_cfg_data(mvi, i); if (phy_st & PHY_OOB_DTCTD) return 1; return 0; } static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i, struct sas_identify_frame *id) { struct mvs_phy *phy = &mvi->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; sas_phy->linkrate = (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; phy->minimum_linkrate = (phy->phy_status & PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; phy->maximum_linkrate = (phy->phy_status & PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); phy->dev_info = mvs_read_port_cfg_data(mvi, i); mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); phy->att_dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr); } static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i) { u32 tmp; struct mvs_phy *phy = &mvi->phy[i]; mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); tmp = mvs_read_port_vsr_data(mvi, i); if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == SAS_LINK_RATE_1_5_GBPS) tmp &= ~PHY_MODE6_LATECLK; else tmp |= PHY_MODE6_LATECLK; mvs_write_port_vsr_data(mvi, i, tmp); } static void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, struct sas_phy_linkrates *rates) { u32 lrmin = 0, lrmax = 0; u32 tmp; tmp = mvs_read_phy_ctl(mvi, phy_id); lrmin = (rates->minimum_linkrate << 8); lrmax = (rates->maximum_linkrate << 12); if (lrmin) { tmp &= ~(0xf << 8); tmp |= lrmin; } if (lrmax) { tmp &= ~(0xf << 12); tmp |= lrmax; } mvs_write_phy_ctl(mvi, phy_id, tmp); mvs_64xx_phy_reset(mvi, phy_id, MVS_HARD_RESET); } static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi) { u32 tmp; void __iomem *regs = mvi->regs; tmp = mr32(MVS_PCS); mw32(MVS_PCS, tmp & 0xFFFF); mw32(MVS_PCS, tmp); tmp = mr32(MVS_CTL); mw32(MVS_CTL, tmp & 0xFFFF); mw32(MVS_CTL, tmp); } static u32 mvs_64xx_spi_read_data(struct mvs_info *mvi) { void __iomem *regs = mvi->regs_ex; return ior32(SPI_DATA_REG_64XX); } static void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data) { void __iomem *regs = mvi->regs_ex; iow32(SPI_DATA_REG_64XX, data); } static int mvs_64xx_spi_buildcmd(struct mvs_info *mvi, u32 *dwCmd, u8 cmd, u8 read, u8 length, u32 addr ) { u32 dwTmp; dwTmp = ((u32)cmd << 24) | ((u32)length << 19); if (read) dwTmp |= 1U<<23; if (addr != MV_MAX_U32) { dwTmp |= 1U<<22; dwTmp |= (addr & 0x0003FFFF); } *dwCmd = dwTmp; return 0; } static int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) { void __iomem *regs = mvi->regs_ex; int retry; for (retry = 0; retry < 1; retry++) { iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE); iow32(SPI_CMD_REG_64XX, cmd); iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART); } return 0; } static int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) { void __iomem *regs = mvi->regs_ex; u32 i, dwTmp; for (i = 0; i < timeout; i++) { dwTmp = ior32(SPI_CTRL_REG_64XX); if (!(dwTmp & SPI_CTRL_SPISTART)) return 0; msleep(10); } return -1; } static void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask, int buf_len, int from, void *prd) { int i; struct mvs_prd *buf_prd = prd; dma_addr_t buf_dma = mvi->bulk_buffer_dma; buf_prd += from; for (i = 0; i < MAX_SG_ENTRY - from; i++) { buf_prd->addr = cpu_to_le64(buf_dma); buf_prd->len = cpu_to_le32(buf_len); ++buf_prd; } } static void mvs_64xx_tune_interrupt(struct mvs_info *mvi, u32 time) { void __iomem *regs = mvi->regs; u32 tmp = 0; /* * the max count is 0x1ff, while our max slot is 0x200, * it will make count 0. */ if (time == 0) { mw32(MVS_INT_COAL, 0); mw32(MVS_INT_COAL_TMOUT, 0x10000); } else { if (MVS_CHIP_SLOT_SZ > 0x1ff) mw32(MVS_INT_COAL, 0x1ff|COAL_EN); else mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN); tmp = 0x10000 | time; mw32(MVS_INT_COAL_TMOUT, tmp); } } const struct mvs_dispatch mvs_64xx_dispatch = { "mv64xx", mvs_64xx_init, NULL, mvs_64xx_ioremap, mvs_64xx_iounmap, mvs_64xx_isr, mvs_64xx_isr_status, mvs_64xx_interrupt_enable, mvs_64xx_interrupt_disable, mvs_read_phy_ctl, mvs_write_phy_ctl, mvs_read_port_cfg_data, mvs_write_port_cfg_data, mvs_write_port_cfg_addr, mvs_read_port_vsr_data, mvs_write_port_vsr_data, mvs_write_port_vsr_addr, mvs_read_port_irq_stat, mvs_write_port_irq_stat, mvs_read_port_irq_mask, mvs_write_port_irq_mask, mvs_64xx_command_active, mvs_64xx_clear_srs_irq, mvs_64xx_issue_stop, mvs_start_delivery, mvs_rx_update, mvs_int_full, mvs_64xx_assign_reg_set, mvs_64xx_free_reg_set, mvs_get_prd_size, mvs_get_prd_count, mvs_64xx_make_prd, mvs_64xx_detect_porttype, mvs_64xx_oob_done, mvs_64xx_fix_phy_info, mvs_64xx_phy_work_around, mvs_64xx_phy_set_link_rate, mvs_hw_max_link_rate, mvs_64xx_phy_disable, mvs_64xx_phy_enable, mvs_64xx_phy_reset, mvs_64xx_stp_reset, mvs_64xx_clear_active_cmds, mvs_64xx_spi_read_data, mvs_64xx_spi_write_data, mvs_64xx_spi_buildcmd, mvs_64xx_spi_issuecmd, mvs_64xx_spi_waitdataready, mvs_64xx_fix_dma, mvs_64xx_tune_interrupt, NULL, };
linux-master
drivers/scsi/mvsas/mv_64xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell 88SE64xx/88SE94xx main function * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. <[email protected]> * Copyright 2009-2011 Marvell. <[email protected]> */ #include "mv_sas.h" static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) { if (task->lldd_task) { struct mvs_slot_info *slot; slot = task->lldd_task; *tag = slot->slot_tag; return 1; } return 0; } static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) { void *bitmap = mvi->rsvd_tags; clear_bit(tag, bitmap); } static void mvs_tag_free(struct mvs_info *mvi, u32 tag) { if (tag >= MVS_RSVD_SLOTS) return; mvs_tag_clear(mvi, tag); } static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) { void *bitmap = mvi->rsvd_tags; set_bit(tag, bitmap); } static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) { unsigned int index, tag; void *bitmap = mvi->rsvd_tags; index = find_first_zero_bit(bitmap, MVS_RSVD_SLOTS); tag = index; if (tag >= MVS_RSVD_SLOTS) return -SAS_QUEUE_FULL; mvs_tag_set(mvi, tag); *tag_out = tag; return 0; } static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) { unsigned long i = 0, j = 0, hi = 0; struct sas_ha_struct *sha = dev->port->ha; struct mvs_info *mvi = NULL; struct asd_sas_phy *phy; while (sha->sas_port[i]) { if (sha->sas_port[i] == dev->port) { spin_lock(&sha->sas_port[i]->phy_list_lock); phy = container_of(sha->sas_port[i]->phy_list.next, struct asd_sas_phy, port_phy_el); spin_unlock(&sha->sas_port[i]->phy_list_lock); j = 0; while (sha->sas_phy[j]) { if (sha->sas_phy[j] == phy) break; j++; } break; } i++; } hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; return mvi; } static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) { unsigned long i = 0, j = 0, n = 0, num = 0; struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; struct mvs_info *mvi = mvi_dev->mvi_info; struct sas_ha_struct *sha = dev->port->ha; while (sha->sas_port[i]) { if (sha->sas_port[i] == dev->port) { struct asd_sas_phy *phy; spin_lock(&sha->sas_port[i]->phy_list_lock); list_for_each_entry(phy, &sha->sas_port[i]->phy_list, port_phy_el) { j = 0; while (sha->sas_phy[j]) { if (sha->sas_phy[j] == phy) break; j++; } phyno[n] = (j >= mvi->chip->n_phy) ? (j - mvi->chip->n_phy) : j; num++; n++; } spin_unlock(&sha->sas_port[i]->phy_list_lock); break; } i++; } return num; } struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, u8 reg_set) { u32 dev_no; for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) { if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED) continue; if (mvi->devices[dev_no].taskfileset == reg_set) return &mvi->devices[dev_no]; } return NULL; } static inline void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_device *dev) { if (!dev) { mv_printk("device has been free.\n"); return; } if (dev->taskfileset == MVS_ID_NOT_MAPPED) return; MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); } static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_device *dev) { if (dev->taskfileset != MVS_ID_NOT_MAPPED) return 0; return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); } void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) { u32 no; for_each_phy(phy_mask, phy_mask, no) { if (!(phy_mask & 1)) continue; MVS_CHIP_DISP->phy_reset(mvi, no, hard); } } int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) { int rc = 0, phy_id = sas_phy->id; u32 tmp, i = 0, hi; struct sas_ha_struct *sha = sas_phy->ha; struct mvs_info *mvi = NULL; while (sha->sas_phy[i]) { if (sha->sas_phy[i] == sas_phy) break; i++; } hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; switch (func) { case PHY_FUNC_SET_LINK_RATE: MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); break; case PHY_FUNC_HARD_RESET: tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); if (tmp & PHY_RST_HARD) break; MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET); break; case PHY_FUNC_LINK_RESET: MVS_CHIP_DISP->phy_enable(mvi, phy_id); MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET); break; case PHY_FUNC_DISABLE: MVS_CHIP_DISP->phy_disable(mvi, phy_id); break; case PHY_FUNC_RELEASE_SPINUP_HOLD: default: rc = -ENOSYS; } msleep(200); return rc; } void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo, u32 off_hi, u64 sas_addr) { u32 lo = (u32)sas_addr; u32 hi = (u32)(sas_addr>>32); MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); } static void mvs_bytes_dmaed(struct mvs_info *mvi, int i, gfp_t gfp_flags) { struct mvs_phy *phy = &mvi->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; if (!phy->phy_attached) return; if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) && phy->phy_type & PORT_TYPE_SAS) { return; } sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); if (sas_phy->phy) { struct sas_phy *sphy = sas_phy->phy; sphy->negotiated_linkrate = sas_phy->linkrate; sphy->minimum_linkrate = phy->minimum_linkrate; sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; sphy->maximum_linkrate = phy->maximum_linkrate; sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); } if (phy->phy_type & PORT_TYPE_SAS) { struct sas_identify_frame *id; id = (struct sas_identify_frame *)phy->frame_rcvd; id->dev_type = phy->identify.device_type; id->initiator_bits = SAS_PROTOCOL_ALL; id->target_bits = phy->identify.target_port_protocols; /* direct attached SAS device */ if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00); } } else if (phy->phy_type & PORT_TYPE_SATA) { /*Nothing*/ } mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); sas_phy->frame_rcvd_size = phy->frame_rcvd_size; sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); } void mvs_scan_start(struct Scsi_Host *shost) { int i, j; unsigned short core_nr; struct mvs_info *mvi; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct mvs_prv_info *mvs_prv = sha->lldd_ha; core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; for (j = 0; j < core_nr; j++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; for (i = 0; i < mvi->chip->n_phy; ++i) mvs_bytes_dmaed(mvi, i, GFP_KERNEL); } mvs_prv->scan_finished = 1; } int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct mvs_prv_info *mvs_prv = sha->lldd_ha; if (mvs_prv->scan_finished == 0) return 0; sas_drain_work(sha); return 1; } static int mvs_task_prep_smp(struct mvs_info *mvi, struct mvs_task_exec_info *tei) { int elem, rc, i; struct sas_ha_struct *sha = mvi->sas; struct sas_task *task = tei->task; struct mvs_cmd_hdr *hdr = tei->hdr; struct domain_device *dev = task->dev; struct asd_sas_port *sas_port = dev->port; struct sas_phy *sphy = dev->phy; struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number]; struct scatterlist *sg_req, *sg_resp; u32 req_len, resp_len, tag = tei->tag; void *buf_tmp; u8 *buf_oaf; dma_addr_t buf_tmp_dma; void *buf_prd; struct mvs_slot_info *slot = &mvi->slot_info[tag]; u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); /* * DMA-map SMP request, response buffers */ sg_req = &task->smp_task.smp_req; elem = dma_map_sg(mvi->dev, sg_req, 1, DMA_TO_DEVICE); if (!elem) return -ENOMEM; req_len = sg_dma_len(sg_req); sg_resp = &task->smp_task.smp_resp; elem = dma_map_sg(mvi->dev, sg_resp, 1, DMA_FROM_DEVICE); if (!elem) { rc = -ENOMEM; goto err_out; } resp_len = SB_RFB_MAX; /* must be in dwords */ if ((req_len & 0x3) || (resp_len & 0x3)) { rc = -EINVAL; goto err_out_2; } /* * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs */ /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ buf_tmp = slot->buf; buf_tmp_dma = slot->buf_dma; hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ buf_oaf = buf_tmp; hdr->open_frame = cpu_to_le64(buf_tmp_dma); buf_tmp += MVS_OAF_SZ; buf_tmp_dma += MVS_OAF_SZ; /* region 3: PRD table *********************************** */ buf_prd = buf_tmp; if (tei->n_elem) hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); else hdr->prd_tbl = 0; i = MVS_CHIP_DISP->prd_size() * tei->n_elem; buf_tmp += i; buf_tmp_dma += i; /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); if (mvi->flags & MVF_FLAG_SOC) hdr->reserved[0] = 0; /* * Fill in TX ring and command slot header */ slot->tx = mvi->tx_prod; mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | TXQ_MODE_I | tag | (MVS_PHY_ID << TXQ_PHY_SHIFT)); hdr->flags |= flags; hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); hdr->tags = cpu_to_le32(tag); hdr->data_len = 0; /* generate open address frame hdr (first 12 bytes) */ /* initiator, SMP, ftype 1h */ buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); /* fill in PRD (scatter/gather) table, if any */ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); return 0; err_out_2: dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, DMA_FROM_DEVICE); err_out: dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, DMA_TO_DEVICE); return rc; } static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) { struct ata_queued_cmd *qc = task->uldd_task; if (qc) { if (qc->tf.command == ATA_CMD_FPDMA_WRITE || qc->tf.command == ATA_CMD_FPDMA_READ || qc->tf.command == ATA_CMD_FPDMA_RECV || qc->tf.command == ATA_CMD_FPDMA_SEND || qc->tf.command == ATA_CMD_NCQ_NON_DATA) { *tag = qc->tag; return 1; } } return 0; } static int mvs_task_prep_ata(struct mvs_info *mvi, struct mvs_task_exec_info *tei) { struct sas_task *task = tei->task; struct domain_device *dev = task->dev; struct mvs_device *mvi_dev = dev->lldd_dev; struct mvs_cmd_hdr *hdr = tei->hdr; struct asd_sas_port *sas_port = dev->port; struct mvs_slot_info *slot; void *buf_prd; u32 tag = tei->tag, hdr_tag; u32 flags, del_q; void *buf_tmp; u8 *buf_cmd, *buf_oaf; dma_addr_t buf_tmp_dma; u32 i, req_len, resp_len; const u32 max_resp_len = SB_RFB_MAX; if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) { mv_dprintk("Have not enough regiset for dev %d.\n", mvi_dev->device_id); return -EBUSY; } slot = &mvi->slot_info[tag]; slot->tx = mvi->tx_prod; del_q = TXQ_MODE_I | tag | (TXQ_CMD_STP << TXQ_CMD_SHIFT) | ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) | (mvi_dev->taskfileset << TXQ_SRS_SHIFT); mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); if (task->data_dir == DMA_FROM_DEVICE) flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); else flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); if (task->ata_task.use_ncq) flags |= MCH_FPDMA; if (dev->sata_dev.class == ATA_DEV_ATAPI) { if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) flags |= MCH_ATAPI; } hdr->flags = cpu_to_le32(flags); if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); else hdr_tag = tag; hdr->tags = cpu_to_le32(hdr_tag); hdr->data_len = cpu_to_le32(task->total_xfer_len); /* * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs */ /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ buf_cmd = buf_tmp = slot->buf; buf_tmp_dma = slot->buf_dma; hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); buf_tmp += MVS_ATA_CMD_SZ; buf_tmp_dma += MVS_ATA_CMD_SZ; /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ /* used for STP. unused for SATA? */ buf_oaf = buf_tmp; hdr->open_frame = cpu_to_le64(buf_tmp_dma); buf_tmp += MVS_OAF_SZ; buf_tmp_dma += MVS_OAF_SZ; /* region 3: PRD table ********************************************* */ buf_prd = buf_tmp; if (tei->n_elem) hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); else hdr->prd_tbl = 0; i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); buf_tmp += i; buf_tmp_dma += i; /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); if (mvi->flags & MVF_FLAG_SOC) hdr->reserved[0] = 0; req_len = sizeof(struct host_to_dev_fis); resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - sizeof(struct mvs_err_info) - i; /* request, response lengths */ resp_len = min(resp_len, max_resp_len); hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); if (likely(!task->ata_task.device_control_reg_update)) task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ /* fill in command FIS and ATAPI CDB */ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); if (dev->sata_dev.class == ATA_DEV_ATAPI) memcpy(buf_cmd + STP_ATAPI_CMD, task->ata_task.atapi_packet, 16); /* generate open address frame hdr (first 12 bytes) */ /* initiator, STP, ftype 1h */ buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); /* fill in PRD (scatter/gather) table, if any */ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); if (task->data_dir == DMA_FROM_DEVICE) MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask, TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); return 0; } static int mvs_task_prep_ssp(struct mvs_info *mvi, struct mvs_task_exec_info *tei, int is_tmf, struct sas_tmf_task *tmf) { struct sas_task *task = tei->task; struct mvs_cmd_hdr *hdr = tei->hdr; struct mvs_port *port = tei->port; struct domain_device *dev = task->dev; struct mvs_device *mvi_dev = dev->lldd_dev; struct asd_sas_port *sas_port = dev->port; struct mvs_slot_info *slot; void *buf_prd; struct ssp_frame_hdr *ssp_hdr; void *buf_tmp; u8 *buf_cmd, *buf_oaf; dma_addr_t buf_tmp_dma; u32 flags; u32 resp_len, req_len, i, tag = tei->tag; const u32 max_resp_len = SB_RFB_MAX; u32 phy_mask; slot = &mvi->slot_info[tag]; phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : sas_port->phy_mask) & TXQ_PHY_MASK; slot->tx = mvi->tx_prod; mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | (phy_mask << TXQ_PHY_SHIFT)); flags = MCH_RETRY; if (is_tmf) flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); else flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT); hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); hdr->tags = cpu_to_le32(tag); hdr->data_len = cpu_to_le32(task->total_xfer_len); /* * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs */ /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ buf_cmd = buf_tmp = slot->buf; buf_tmp_dma = slot->buf_dma; hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); buf_tmp += MVS_SSP_CMD_SZ; buf_tmp_dma += MVS_SSP_CMD_SZ; /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ buf_oaf = buf_tmp; hdr->open_frame = cpu_to_le64(buf_tmp_dma); buf_tmp += MVS_OAF_SZ; buf_tmp_dma += MVS_OAF_SZ; /* region 3: PRD table ********************************************* */ buf_prd = buf_tmp; if (tei->n_elem) hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); else hdr->prd_tbl = 0; i = MVS_CHIP_DISP->prd_size() * tei->n_elem; buf_tmp += i; buf_tmp_dma += i; /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ slot->response = buf_tmp; hdr->status_buf = cpu_to_le64(buf_tmp_dma); if (mvi->flags & MVF_FLAG_SOC) hdr->reserved[0] = 0; resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - sizeof(struct mvs_err_info) - i; resp_len = min(resp_len, max_resp_len); req_len = sizeof(struct ssp_frame_hdr) + 28; /* request, response lengths */ hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); /* generate open address frame hdr (first 12 bytes) */ /* initiator, SSP, ftype 1h */ buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); /* fill in SSP frame header (Command Table.SSP frame header) */ ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; if (is_tmf) ssp_hdr->frame_type = SSP_TASK; else ssp_hdr->frame_type = SSP_COMMAND; memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); memcpy(ssp_hdr->hashed_src_addr, dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); ssp_hdr->tag = cpu_to_be16(tag); /* fill in IU for TASK and Command Frame */ buf_cmd += sizeof(*ssp_hdr); memcpy(buf_cmd, &task->ssp_task.LUN, 8); if (ssp_hdr->frame_type != SSP_TASK) { buf_cmd[9] = task->ssp_task.task_attr; memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, task->ssp_task.cmd->cmd_len); } else{ buf_cmd[10] = tmf->tmf; switch (tmf->tmf) { case TMF_ABORT_TASK: case TMF_QUERY_TASK: buf_cmd[12] = (tmf->tag_of_task_to_be_managed >> 8) & 0xff; buf_cmd[13] = tmf->tag_of_task_to_be_managed & 0xff; break; default: break; } } /* fill in PRD (scatter/gather) table, if any */ MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); return 0; } #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED))) static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, struct sas_tmf_task *tmf, int *pass) { struct domain_device *dev = task->dev; struct mvs_device *mvi_dev = dev->lldd_dev; struct mvs_task_exec_info tei; struct mvs_slot_info *slot; u32 tag = 0xdeadbeef, n_elem = 0; struct request *rq; int rc = 0; if (!dev->port) { struct task_status_struct *tsm = &task->task_status; tsm->resp = SAS_TASK_UNDELIVERED; tsm->stat = SAS_PHY_DOWN; /* * libsas will use dev->port, should * not call task_done for sata */ if (dev->dev_type != SAS_SATA_DEV) task->task_done(task); return rc; } if (DEV_IS_GONE(mvi_dev)) { if (mvi_dev) mv_dprintk("device %d not ready.\n", mvi_dev->device_id); else mv_dprintk("device %016llx not ready.\n", SAS_ADDR(dev->sas_addr)); rc = SAS_PHY_DOWN; return rc; } tei.port = dev->port->lldd_port; if (tei.port && !tei.port->port_attached && !tmf) { if (sas_protocol_ata(task->task_proto)) { struct task_status_struct *ts = &task->task_status; mv_dprintk("SATA/STP port %d does not attach" "device.\n", dev->port->id); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_PHY_DOWN; task->task_done(task); } else { struct task_status_struct *ts = &task->task_status; mv_dprintk("SAS port %d does not attach" "device.\n", dev->port->id); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; task->task_done(task); } return rc; } if (!sas_protocol_ata(task->task_proto)) { if (task->num_scatter) { n_elem = dma_map_sg(mvi->dev, task->scatter, task->num_scatter, task->data_dir); if (!n_elem) { rc = -ENOMEM; goto prep_out; } } } else { n_elem = task->num_scatter; } rq = sas_task_find_rq(task); if (rq) { tag = rq->tag + MVS_RSVD_SLOTS; } else { rc = mvs_tag_alloc(mvi, &tag); if (rc) goto err_out; } slot = &mvi->slot_info[tag]; task->lldd_task = NULL; slot->n_elem = n_elem; slot->slot_tag = tag; slot->buf = dma_pool_zalloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); if (!slot->buf) { rc = -ENOMEM; goto err_out_tag; } tei.task = task; tei.hdr = &mvi->slot[tag]; tei.tag = tag; tei.n_elem = n_elem; switch (task->task_proto) { case SAS_PROTOCOL_SMP: rc = mvs_task_prep_smp(mvi, &tei); break; case SAS_PROTOCOL_SSP: rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: rc = mvs_task_prep_ata(mvi, &tei); break; default: dev_printk(KERN_ERR, mvi->dev, "unknown sas_task proto: 0x%x\n", task->task_proto); rc = -EINVAL; break; } if (rc) { mv_dprintk("rc is %x\n", rc); goto err_out_slot_buf; } slot->task = task; slot->port = tei.port; task->lldd_task = slot; list_add_tail(&slot->entry, &tei.port->list); mvi_dev->running_req++; ++(*pass); mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); return rc; err_out_slot_buf: dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); err_out_tag: mvs_tag_free(mvi, tag); err_out: dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc); if (!sas_protocol_ata(task->task_proto)) if (n_elem) dma_unmap_sg(mvi->dev, task->scatter, n_elem, task->data_dir); prep_out: return rc; } int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags) { struct mvs_info *mvi = NULL; u32 rc = 0; u32 pass = 0; unsigned long flags = 0; struct sas_tmf_task *tmf = task->tmf; int is_tmf = !!task->tmf; mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info; spin_lock_irqsave(&mvi->lock, flags); rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass); if (rc) dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); if (likely(pass)) MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); spin_unlock_irqrestore(&mvi->lock, flags); return rc; } static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) { u32 slot_idx = rx_desc & RXQ_SLOT_MASK; mvs_tag_free(mvi, slot_idx); } static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, struct mvs_slot_info *slot, u32 slot_idx) { if (!slot) return; if (!slot->task) return; if (!sas_protocol_ata(task->task_proto)) if (slot->n_elem) dma_unmap_sg(mvi->dev, task->scatter, slot->n_elem, task->data_dir); switch (task->task_proto) { case SAS_PROTOCOL_SMP: dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, DMA_FROM_DEVICE); dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, DMA_TO_DEVICE); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SSP: default: /* do nothing */ break; } if (slot->buf) { dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); slot->buf = NULL; } list_del_init(&slot->entry); task->lldd_task = NULL; slot->task = NULL; slot->port = NULL; slot->slot_tag = 0xFFFFFFFF; mvs_slot_free(mvi, slot_idx); } static void mvs_update_wideport(struct mvs_info *mvi, int phy_no) { struct mvs_phy *phy = &mvi->phy[phy_no]; struct mvs_port *port = phy->port; int j, no; for_each_phy(port->wide_port_phymap, j, no) { if (j & 1) { MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); MVS_CHIP_DISP->write_port_cfg_data(mvi, no, port->wide_port_phymap); } else { MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); MVS_CHIP_DISP->write_port_cfg_data(mvi, no, 0); } } } static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) { u32 tmp; struct mvs_phy *phy = &mvi->phy[i]; struct mvs_port *port = phy->port; tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { if (!port) phy->phy_attached = 1; return tmp; } if (port) { if (phy->phy_type & PORT_TYPE_SAS) { port->wide_port_phymap &= ~(1U << i); if (!port->wide_port_phymap) port->port_attached = 0; mvs_update_wideport(mvi, i); } else if (phy->phy_type & PORT_TYPE_SATA) port->port_attached = 0; phy->port = NULL; phy->phy_attached = 0; phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); } return 0; } static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) { u32 *s = (u32 *) buf; if (!s) return NULL; MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); return s; } static u32 mvs_is_sig_fis_received(u32 irq_status) { return irq_status & PHYEV_SIG_FIS; } static void mvs_sig_remove_timer(struct mvs_phy *phy) { if (phy->timer.function) del_timer(&phy->timer); phy->timer.function = NULL; } void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) { struct mvs_phy *phy = &mvi->phy[i]; struct sas_identify_frame *id; id = (struct sas_identify_frame *)phy->frame_rcvd; if (get_st) { phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); phy->phy_status = mvs_is_phy_ready(mvi, i); } if (phy->phy_status) { int oob_done = 0; struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; oob_done = MVS_CHIP_DISP->oob_done(mvi, i); MVS_CHIP_DISP->fix_phy_info(mvi, i, id); if (phy->phy_type & PORT_TYPE_SATA) { phy->identify.target_port_protocols = SAS_PROTOCOL_STP; if (mvs_is_sig_fis_received(phy->irq_status)) { mvs_sig_remove_timer(phy); phy->phy_attached = 1; phy->att_dev_sas_addr = i + mvi->id * mvi->chip->n_phy; if (oob_done) sas_phy->oob_mode = SATA_OOB_MODE; phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); mvs_get_d2h_reg(mvi, i, id); } else { u32 tmp; dev_printk(KERN_DEBUG, mvi->dev, "Phy%d : No sig fis\n", i); tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); MVS_CHIP_DISP->write_port_irq_mask(mvi, i, tmp | PHYEV_SIG_FIS); phy->phy_attached = 0; phy->phy_type &= ~PORT_TYPE_SATA; goto out_done; } } else if (phy->phy_type & PORT_TYPE_SAS || phy->att_dev_info & PORT_SSP_INIT_MASK) { phy->phy_attached = 1; phy->identify.device_type = phy->att_dev_info & PORT_DEV_TYPE_MASK; if (phy->identify.device_type == SAS_END_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; else if (phy->identify.device_type != SAS_PHY_UNUSED) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; if (oob_done) sas_phy->oob_mode = SAS_OOB_MODE; phy->frame_rcvd_size = sizeof(struct sas_identify_frame); } memcpy(sas_phy->attached_sas_addr, &phy->att_dev_sas_addr, SAS_ADDR_SIZE); if (MVS_CHIP_DISP->phy_work_around) MVS_CHIP_DISP->phy_work_around(mvi, i); } mv_dprintk("phy %d attach dev info is %x\n", i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); mv_dprintk("phy %d attach sas addr is %llx\n", i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); out_done: if (get_st) MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); } static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) { struct sas_ha_struct *sas_ha = sas_phy->ha; struct mvs_info *mvi = NULL; int i = 0, hi; struct mvs_phy *phy = sas_phy->lldd_phy; struct asd_sas_port *sas_port = sas_phy->port; struct mvs_port *port; unsigned long flags = 0; if (!sas_port) return; while (sas_ha->sas_phy[i]) { if (sas_ha->sas_phy[i] == sas_phy) break; i++; } hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; if (i >= mvi->chip->n_phy) port = &mvi->port[i - mvi->chip->n_phy]; else port = &mvi->port[i]; if (lock) spin_lock_irqsave(&mvi->lock, flags); port->port_attached = 1; phy->port = port; sas_port->lldd_port = port; if (phy->phy_type & PORT_TYPE_SAS) { port->wide_port_phymap = sas_port->phy_mask; mv_printk("set wide port phy map %x\n", sas_port->phy_mask); mvs_update_wideport(mvi, sas_phy->id); /* direct attached SAS device */ if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04); } } if (lock) spin_unlock_irqrestore(&mvi->lock, flags); } static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) { struct domain_device *dev; struct mvs_phy *phy = sas_phy->lldd_phy; struct mvs_info *mvi = phy->mvi; struct asd_sas_port *port = sas_phy->port; int phy_no = 0; while (phy != &mvi->phy[phy_no]) { phy_no++; if (phy_no >= MVS_MAX_PHYS) return; } list_for_each_entry(dev, &port->dev_list, dev_list_node) mvs_do_release_task(phy->mvi, phy_no, dev); } void mvs_port_formed(struct asd_sas_phy *sas_phy) { mvs_port_notify_formed(sas_phy, 1); } void mvs_port_deformed(struct asd_sas_phy *sas_phy) { mvs_port_notify_deformed(sas_phy, 1); } static struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) { u32 dev; for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) { mvi->devices[dev].device_id = dev; return &mvi->devices[dev]; } } if (dev == MVS_MAX_DEVICES) mv_printk("max support %d devices, ignore ..\n", MVS_MAX_DEVICES); return NULL; } static void mvs_free_dev(struct mvs_device *mvi_dev) { u32 id = mvi_dev->device_id; memset(mvi_dev, 0, sizeof(*mvi_dev)); mvi_dev->device_id = id; mvi_dev->dev_type = SAS_PHY_UNUSED; mvi_dev->dev_status = MVS_DEV_NORMAL; mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; } static int mvs_dev_found_notify(struct domain_device *dev, int lock) { unsigned long flags = 0; int res = 0; struct mvs_info *mvi = NULL; struct domain_device *parent_dev = dev->parent; struct mvs_device *mvi_device; mvi = mvs_find_dev_mvi(dev); if (lock) spin_lock_irqsave(&mvi->lock, flags); mvi_device = mvs_alloc_dev(mvi); if (!mvi_device) { res = -1; goto found_out; } dev->lldd_dev = mvi_device; mvi_device->dev_status = MVS_DEV_NORMAL; mvi_device->dev_type = dev->dev_type; mvi_device->mvi_info = mvi; mvi_device->sas_device = dev; if (parent_dev && dev_is_expander(parent_dev->dev_type)) { int phy_id; phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev); if (phy_id < 0) { mv_printk("Error: no attached dev:%016llx" "at ex:%016llx.\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(parent_dev->sas_addr)); res = phy_id; } else { mvi_device->attached_phy = phy_id; } } found_out: if (lock) spin_unlock_irqrestore(&mvi->lock, flags); return res; } int mvs_dev_found(struct domain_device *dev) { return mvs_dev_found_notify(dev, 1); } static void mvs_dev_gone_notify(struct domain_device *dev) { unsigned long flags = 0; struct mvs_device *mvi_dev = dev->lldd_dev; struct mvs_info *mvi; if (!mvi_dev) { mv_dprintk("found dev has gone.\n"); return; } mvi = mvi_dev->mvi_info; spin_lock_irqsave(&mvi->lock, flags); mv_dprintk("found dev[%d:%x] is gone.\n", mvi_dev->device_id, mvi_dev->dev_type); mvs_release_task(mvi, dev); mvs_free_reg_set(mvi, mvi_dev); mvs_free_dev(mvi_dev); dev->lldd_dev = NULL; mvi_dev->sas_device = NULL; spin_unlock_irqrestore(&mvi->lock, flags); } void mvs_dev_gone(struct domain_device *dev) { mvs_dev_gone_notify(dev); } /* Standard mandates link reset for ATA (type 0) and hard reset for SSP (type 1) , only for RECOVERY */ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) { int rc; struct sas_phy *phy = sas_get_local_phy(dev); int reset_type = (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; rc = sas_phy_reset(phy, reset_type); sas_put_local_phy(phy); msleep(2000); return rc; } /* mandatory SAM-3 */ int mvs_lu_reset(struct domain_device *dev, u8 *lun) { unsigned long flags; int rc = TMF_RESP_FUNC_FAILED; struct mvs_device * mvi_dev = dev->lldd_dev; struct mvs_info *mvi = mvi_dev->mvi_info; mvi_dev->dev_status = MVS_DEV_EH; rc = sas_lu_reset(dev, lun); if (rc == TMF_RESP_FUNC_COMPLETE) { spin_lock_irqsave(&mvi->lock, flags); mvs_release_task(mvi, dev); spin_unlock_irqrestore(&mvi->lock, flags); } /* If failed, fall-through I_T_Nexus reset */ mv_printk("%s for device[%x]:rc= %d\n", __func__, mvi_dev->device_id, rc); return rc; } int mvs_I_T_nexus_reset(struct domain_device *dev) { unsigned long flags; int rc = TMF_RESP_FUNC_FAILED; struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; struct mvs_info *mvi = mvi_dev->mvi_info; if (mvi_dev->dev_status != MVS_DEV_EH) return TMF_RESP_FUNC_COMPLETE; else mvi_dev->dev_status = MVS_DEV_NORMAL; rc = mvs_debug_I_T_nexus_reset(dev); mv_printk("%s for device[%x]:rc= %d\n", __func__, mvi_dev->device_id, rc); spin_lock_irqsave(&mvi->lock, flags); mvs_release_task(mvi, dev); spin_unlock_irqrestore(&mvi->lock, flags); return rc; } /* optional SAM-3 */ int mvs_query_task(struct sas_task *task) { u32 tag; int rc = TMF_RESP_FUNC_FAILED; if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { struct domain_device *dev = task->dev; struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; struct mvs_info *mvi = mvi_dev->mvi_info; rc = mvs_find_tag(mvi, task, &tag); if (rc == 0) { rc = TMF_RESP_FUNC_FAILED; return rc; } rc = sas_query_task(task, tag); switch (rc) { /* The task is still in Lun, release it then */ case TMF_RESP_FUNC_SUCC: /* The task is not in Lun or failed, reset the phy */ case TMF_RESP_FUNC_FAILED: case TMF_RESP_FUNC_COMPLETE: break; } } mv_printk("%s:rc= %d\n", __func__, rc); return rc; } /* mandatory SAM-3, still need free task/slot info */ int mvs_abort_task(struct sas_task *task) { struct domain_device *dev = task->dev; struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; struct mvs_info *mvi; int rc = TMF_RESP_FUNC_FAILED; unsigned long flags; u32 tag; if (!mvi_dev) { mv_printk("Device has removed\n"); return TMF_RESP_FUNC_FAILED; } mvi = mvi_dev->mvi_info; spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { spin_unlock_irqrestore(&task->task_state_lock, flags); rc = TMF_RESP_FUNC_COMPLETE; goto out; } spin_unlock_irqrestore(&task->task_state_lock, flags); mvi_dev->dev_status = MVS_DEV_EH; if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { rc = mvs_find_tag(mvi, task, &tag); if (rc == 0) { mv_printk("No such tag in %s\n", __func__); rc = TMF_RESP_FUNC_FAILED; return rc; } rc = sas_abort_task(task, tag); /* if successful, clear the task and callback forwards.*/ if (rc == TMF_RESP_FUNC_COMPLETE) { u32 slot_no; struct mvs_slot_info *slot; if (task->lldd_task) { slot = task->lldd_task; slot_no = (u32) (slot - mvi->slot_info); spin_lock_irqsave(&mvi->lock, flags); mvs_slot_complete(mvi, slot_no, 1); spin_unlock_irqrestore(&mvi->lock, flags); } } } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { if (SAS_SATA_DEV == dev->dev_type) { struct mvs_slot_info *slot = task->lldd_task; u32 slot_idx = (u32)(slot - mvi->slot_info); mv_dprintk("mvs_abort_task() mvi=%p task=%p " "slot=%p slot_idx=x%x\n", mvi, task, slot, slot_idx); task->task_state_flags |= SAS_TASK_STATE_ABORTED; mvs_slot_task_free(mvi, task, slot, slot_idx); rc = TMF_RESP_FUNC_COMPLETE; goto out; } } out: if (rc != TMF_RESP_FUNC_COMPLETE) mv_printk("%s:rc= %d\n", __func__, rc); return rc; } static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, u32 slot_idx, int err) { struct mvs_device *mvi_dev = task->dev->lldd_dev; struct task_status_struct *tstat = &task->task_status; struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; int stat = SAM_STAT_GOOD; resp->frame_len = sizeof(struct dev_to_host_fis); memcpy(&resp->ending_fis[0], SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), sizeof(struct dev_to_host_fis)); tstat->buf_valid_size = sizeof(*resp); if (unlikely(err)) { if (unlikely(err & CMD_ISS_STPD)) stat = SAS_OPEN_REJECT; else stat = SAS_PROTO_RESPONSE; } return stat; } static void mvs_set_sense(u8 *buffer, int len, int d_sense, int key, int asc, int ascq) { memset(buffer, 0, len); if (d_sense) { /* Descriptor format */ if (len < 4) { mv_printk("Length %d of sense buffer too small to " "fit sense %x:%x:%x", len, key, asc, ascq); } buffer[0] = 0x72; /* Response Code */ if (len > 1) buffer[1] = key; /* Sense Key */ if (len > 2) buffer[2] = asc; /* ASC */ if (len > 3) buffer[3] = ascq; /* ASCQ */ } else { if (len < 14) { mv_printk("Length %d of sense buffer too small to " "fit sense %x:%x:%x", len, key, asc, ascq); } buffer[0] = 0x70; /* Response Code */ if (len > 2) buffer[2] = key; /* Sense Key */ if (len > 7) buffer[7] = 0x0a; /* Additional Sense Length */ if (len > 12) buffer[12] = asc; /* ASC */ if (len > 13) buffer[13] = ascq; /* ASCQ */ } return; } static void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu, u8 key, u8 asc, u8 asc_q) { iu->datapres = SAS_DATAPRES_SENSE_DATA; iu->response_data_len = 0; iu->sense_data_len = 17; iu->status = 02; mvs_set_sense(iu->sense_data, 17, 0, key, asc, asc_q); } static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, u32 slot_idx) { struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; int stat; u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response); u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1)); u32 tfs = 0; enum mvs_port_type type = PORT_TYPE_SAS; if (err_dw0 & CMD_ISS_STPD) MVS_CHIP_DISP->issue_stop(mvi, type, tfs); MVS_CHIP_DISP->command_active(mvi, slot_idx); stat = SAM_STAT_CHECK_CONDITION; switch (task->task_proto) { case SAS_PROTOCOL_SSP: { stat = SAS_ABORTED_TASK; if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) { struct ssp_response_iu *iu = slot->response + sizeof(struct mvs_err_info); mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01); sas_ssp_task_response(mvi->dev, task, iu); stat = SAM_STAT_CHECK_CONDITION; } if (err_dw1 & bit(31)) mv_printk("reuse same slot, retry command.\n"); break; } case SAS_PROTOCOL_SMP: stat = SAM_STAT_CHECK_CONDITION; break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { task->ata_task.use_ncq = 0; stat = SAS_PROTO_RESPONSE; mvs_sata_done(mvi, task, slot_idx, err_dw0); } break; default: break; } return stat; } int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) { u32 slot_idx = rx_desc & RXQ_SLOT_MASK; struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; struct sas_task *task = slot->task; struct mvs_device *mvi_dev = NULL; struct task_status_struct *tstat; struct domain_device *dev; u32 aborted; void *to; enum exec_status sts; if (unlikely(!task || !task->lldd_task || !task->dev)) return -1; tstat = &task->task_status; dev = task->dev; mvi_dev = dev->lldd_dev; spin_lock(&task->task_state_lock); task->task_state_flags &= ~SAS_TASK_STATE_PENDING; task->task_state_flags |= SAS_TASK_STATE_DONE; /* race condition*/ aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; spin_unlock(&task->task_state_lock); memset(tstat, 0, sizeof(*tstat)); tstat->resp = SAS_TASK_COMPLETE; if (unlikely(aborted)) { tstat->stat = SAS_ABORTED_TASK; if (mvi_dev && mvi_dev->running_req) mvi_dev->running_req--; if (sas_protocol_ata(task->task_proto)) mvs_free_reg_set(mvi, mvi_dev); mvs_slot_task_free(mvi, task, slot, slot_idx); return -1; } /* when no device attaching, go ahead and complete by error handling*/ if (unlikely(!mvi_dev || flags)) { if (!mvi_dev) mv_dprintk("port has not device.\n"); tstat->stat = SAS_PHY_DOWN; goto out; } /* * error info record present; slot->response is 32 bit aligned but may * not be 64 bit aligned, so check for zero in two 32 bit reads */ if (unlikely((rx_desc & RXQ_ERR) && (*((u32 *)slot->response) || *(((u32 *)slot->response) + 1)))) { mv_dprintk("port %d slot %d rx_desc %X has error info" "%016llX.\n", slot->port->sas_port.id, slot_idx, rx_desc, get_unaligned_le64(slot->response)); tstat->stat = mvs_slot_err(mvi, task, slot_idx); tstat->resp = SAS_TASK_COMPLETE; goto out; } switch (task->task_proto) { case SAS_PROTOCOL_SSP: /* hw says status == 0, datapres == 0 */ if (rx_desc & RXQ_GOOD) { tstat->stat = SAS_SAM_STAT_GOOD; tstat->resp = SAS_TASK_COMPLETE; } /* response frame present */ else if (rx_desc & RXQ_RSP) { struct ssp_response_iu *iu = slot->response + sizeof(struct mvs_err_info); sas_ssp_task_response(mvi->dev, task, iu); } else tstat->stat = SAS_SAM_STAT_CHECK_CONDITION; break; case SAS_PROTOCOL_SMP: { struct scatterlist *sg_resp = &task->smp_task.smp_resp; tstat->stat = SAS_SAM_STAT_GOOD; to = kmap_atomic(sg_page(sg_resp)); memcpy(to + sg_resp->offset, slot->response + sizeof(struct mvs_err_info), sg_dma_len(sg_resp)); kunmap_atomic(to); break; } case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); break; } default: tstat->stat = SAS_SAM_STAT_CHECK_CONDITION; break; } if (!slot->port->port_attached) { mv_dprintk("port %d has removed.\n", slot->port->sas_port.id); tstat->stat = SAS_PHY_DOWN; } out: if (mvi_dev && mvi_dev->running_req) { mvi_dev->running_req--; if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req) mvs_free_reg_set(mvi, mvi_dev); } mvs_slot_task_free(mvi, task, slot, slot_idx); sts = tstat->stat; spin_unlock(&mvi->lock); if (task->task_done) task->task_done(task); spin_lock(&mvi->lock); return sts; } void mvs_do_release_task(struct mvs_info *mvi, int phy_no, struct domain_device *dev) { u32 slot_idx; struct mvs_phy *phy; struct mvs_port *port; struct mvs_slot_info *slot, *slot2; phy = &mvi->phy[phy_no]; port = phy->port; if (!port) return; /* clean cmpl queue in case request is already finished */ mvs_int_rx(mvi, false); list_for_each_entry_safe(slot, slot2, &port->list, entry) { struct sas_task *task; slot_idx = (u32) (slot - mvi->slot_info); task = slot->task; if (dev && task->dev != dev) continue; mv_printk("Release slot [%x] tag[%x], task [%p]:\n", slot_idx, slot->slot_tag, task); MVS_CHIP_DISP->command_active(mvi, slot_idx); mvs_slot_complete(mvi, slot_idx, 1); } } void mvs_release_task(struct mvs_info *mvi, struct domain_device *dev) { int i, phyno[WIDE_PORT_MAX_PHY], num; num = mvs_find_dev_phyno(dev, phyno); for (i = 0; i < num; i++) mvs_do_release_task(mvi, phyno[i], dev); } static void mvs_phy_disconnected(struct mvs_phy *phy) { phy->phy_attached = 0; phy->att_dev_info = 0; phy->att_dev_sas_addr = 0; } static void mvs_work_queue(struct work_struct *work) { struct delayed_work *dw = container_of(work, struct delayed_work, work); struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); struct mvs_info *mvi = mwq->mvi; unsigned long flags; u32 phy_no = (unsigned long) mwq->data; struct mvs_phy *phy = &mvi->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; spin_lock_irqsave(&mvi->lock, flags); if (mwq->handler & PHY_PLUG_EVENT) { if (phy->phy_event & PHY_PLUG_OUT) { u32 tmp; tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); phy->phy_event &= ~PHY_PLUG_OUT; if (!(tmp & PHY_READY_MASK)) { sas_phy_disconnected(sas_phy); mvs_phy_disconnected(phy); sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC); mv_dprintk("phy%d Removed Device\n", phy_no); } else { MVS_CHIP_DISP->detect_porttype(mvi, phy_no); mvs_update_phyinfo(mvi, phy_no, 1); mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC); mvs_port_notify_formed(sas_phy, 0); mv_dprintk("phy%d Attached Device\n", phy_no); } } } else if (mwq->handler & EXP_BRCT_CHG) { phy->phy_event &= ~EXP_BRCT_CHG; sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC); mv_dprintk("phy%d Got Broadcast Change\n", phy_no); } list_del(&mwq->entry); spin_unlock_irqrestore(&mvi->lock, flags); kfree(mwq); } static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) { struct mvs_wq *mwq; int ret = 0; mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC); if (mwq) { mwq->mvi = mvi; mwq->data = data; mwq->handler = handler; MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); list_add_tail(&mwq->entry, &mvi->wq_list); schedule_delayed_work(&mwq->work_q, HZ * 2); } else ret = -ENOMEM; return ret; } static void mvs_sig_time_out(struct timer_list *t) { struct mvs_phy *phy = from_timer(phy, t, timer); struct mvs_info *mvi = phy->mvi; u8 phy_no; for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { if (&mvi->phy[phy_no] == phy) { mv_dprintk("Get signature time out, reset phy %d\n", phy_no+mvi->id*mvi->chip->n_phy); MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET); } } } void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) { u32 tmp; struct mvs_phy *phy = &mvi->phy[phy_no]; phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy, MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy, phy->irq_status); /* * events is port event now , * we need check the interrupt status which belongs to per port. */ if (phy->irq_status & PHYEV_DCDR_ERR) { mv_dprintk("phy %d STP decoding error.\n", phy_no + mvi->id*mvi->chip->n_phy); } if (phy->irq_status & PHYEV_POOF) { mdelay(500); if (!(phy->phy_event & PHY_PLUG_OUT)) { int dev_sata = phy->phy_type & PORT_TYPE_SATA; int ready; mvs_do_release_task(mvi, phy_no, NULL); phy->phy_event |= PHY_PLUG_OUT; MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1); mvs_handle_event(mvi, (void *)(unsigned long)phy_no, PHY_PLUG_EVENT); ready = mvs_is_phy_ready(mvi, phy_no); if (ready || dev_sata) { if (MVS_CHIP_DISP->stp_reset) MVS_CHIP_DISP->stp_reset(mvi, phy_no); else MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_SOFT_RESET); return; } } } if (phy->irq_status & PHYEV_COMWAKE) { tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, tmp | PHYEV_SIG_FIS); if (phy->timer.function == NULL) { phy->timer.function = mvs_sig_time_out; phy->timer.expires = jiffies + 5*HZ; add_timer(&phy->timer); } } if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { phy->phy_status = mvs_is_phy_ready(mvi, phy_no); mv_dprintk("notify plug in on phy[%d]\n", phy_no); if (phy->phy_status) { mdelay(10); MVS_CHIP_DISP->detect_porttype(mvi, phy_no); if (phy->phy_type & PORT_TYPE_SATA) { tmp = MVS_CHIP_DISP->read_port_irq_mask( mvi, phy_no); tmp &= ~PHYEV_SIG_FIS; MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, tmp); } mvs_update_phyinfo(mvi, phy_no, 0); if (phy->phy_type & PORT_TYPE_SAS) { MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE); mdelay(10); } mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC); /* whether driver is going to handle hot plug */ if (phy->phy_event & PHY_PLUG_OUT) { mvs_port_notify_formed(&phy->sas_phy, 0); phy->phy_event &= ~PHY_PLUG_OUT; } } else { mv_dprintk("plugin interrupt but phy%d is gone\n", phy_no + mvi->id*mvi->chip->n_phy); } } else if (phy->irq_status & PHYEV_BROAD_CH) { mv_dprintk("phy %d broadcast change.\n", phy_no + mvi->id*mvi->chip->n_phy); mvs_handle_event(mvi, (void *)(unsigned long)phy_no, EXP_BRCT_CHG); } } int mvs_int_rx(struct mvs_info *mvi, bool self_clear) { u32 rx_prod_idx, rx_desc; bool attn = false; /* the first dword in the RX ring is special: it contains * a mirror of the hardware's RX producer index, so that * we don't have to stall the CPU reading that register. * The actual RX ring is offset by one dword, due to this. */ rx_prod_idx = mvi->rx_cons; mvi->rx_cons = le32_to_cpu(mvi->rx[0]); if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ return 0; /* The CMPL_Q may come late, read from register and try again * note: if coalescing is enabled, * it will need to read from register every time for sure */ if (unlikely(mvi->rx_cons == rx_prod_idx)) mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; if (mvi->rx_cons == rx_prod_idx) return 0; while (mvi->rx_cons != rx_prod_idx) { /* increment our internal RX consumer pointer */ rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); if (likely(rx_desc & RXQ_DONE)) mvs_slot_complete(mvi, rx_desc, 0); if (rx_desc & RXQ_ATTN) { attn = true; } else if (rx_desc & RXQ_ERR) { if (!(rx_desc & RXQ_DONE)) mvs_slot_complete(mvi, rx_desc, 0); } else if (rx_desc & RXQ_SLOT_RESET) { mvs_slot_free(mvi, rx_desc); } } if (attn && self_clear) MVS_CHIP_DISP->int_full(mvi); return 0; } int mvs_gpio_write(struct sas_ha_struct *sha, u8 reg_type, u8 reg_index, u8 reg_count, u8 *write_data) { struct mvs_prv_info *mvs_prv = sha->lldd_ha; struct mvs_info *mvi = mvs_prv->mvi[0]; if (MVS_CHIP_DISP->gpio_write) { return MVS_CHIP_DISP->gpio_write(mvs_prv, reg_type, reg_index, reg_count, write_data); } return -ENOSYS; }
linux-master
drivers/scsi/mvsas/mv_sas.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell 88SE64xx/88SE94xx pci init * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. <[email protected]> * Copyright 2009-2011 Marvell. <[email protected]> */ #include "mv_sas.h" int interrupt_coalescing = 0x80; static struct scsi_transport_template *mvs_stt; static const struct mvs_chip_info mvs_chips[] = { [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, }, [chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, [chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, }, [chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, }, [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, }; static const struct attribute_group *mvst_host_groups[]; #define SOC_SAS_NUM 2 static const struct scsi_host_template mvs_sht = { .module = THIS_MODULE, .name = DRV_NAME, .queuecommand = sas_queuecommand, .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .scan_finished = mvs_scan_finished, .scan_start = mvs_scan_start, .change_queue_depth = sas_change_queue_depth, .bios_param = sas_bios_param, .can_queue = 1, .this_id = -1, .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, .slave_alloc = sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = sas_ioctl, #endif .shost_groups = mvst_host_groups, .track_queue_depth = 1, }; static struct sas_domain_function_template mvs_transport_ops = { .lldd_dev_found = mvs_dev_found, .lldd_dev_gone = mvs_dev_gone, .lldd_execute_task = mvs_queue_command, .lldd_control_phy = mvs_phy_control, .lldd_abort_task = mvs_abort_task, .lldd_abort_task_set = sas_abort_task_set, .lldd_clear_task_set = sas_clear_task_set, .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, .lldd_lu_reset = mvs_lu_reset, .lldd_query_task = mvs_query_task, .lldd_port_formed = mvs_port_formed, .lldd_port_deformed = mvs_port_deformed, .lldd_write_gpio = mvs_gpio_write, }; static void mvs_phy_init(struct mvs_info *mvi, int phy_id) { struct mvs_phy *phy = &mvi->phy[phy_id]; struct asd_sas_phy *sas_phy = &phy->sas_phy; phy->mvi = mvi; phy->port = NULL; timer_setup(&phy->timer, NULL, 0); sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; sas_phy->iproto = SAS_PROTOCOL_ALL; sas_phy->tproto = 0; sas_phy->role = PHY_ROLE_INITIATOR; sas_phy->oob_mode = OOB_NOT_CONNECTED; sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; sas_phy->id = phy_id; sas_phy->sas_addr = &mvi->sas_addr[0]; sas_phy->frame_rcvd = &phy->frame_rcvd[0]; sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata; sas_phy->lldd_phy = phy; } static void mvs_free(struct mvs_info *mvi) { struct mvs_wq *mwq; int slot_nr; if (!mvi) return; if (mvi->flags & MVF_FLAG_SOC) slot_nr = MVS_SOC_SLOTS; else slot_nr = MVS_CHIP_SLOT_SZ; dma_pool_destroy(mvi->dma_pool); if (mvi->tx) dma_free_coherent(mvi->dev, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, mvi->tx, mvi->tx_dma); if (mvi->rx_fis) dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ, mvi->rx_fis, mvi->rx_fis_dma); if (mvi->rx) dma_free_coherent(mvi->dev, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), mvi->rx, mvi->rx_dma); if (mvi->slot) dma_free_coherent(mvi->dev, sizeof(*mvi->slot) * slot_nr, mvi->slot, mvi->slot_dma); if (mvi->bulk_buffer) dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, mvi->bulk_buffer, mvi->bulk_buffer_dma); if (mvi->bulk_buffer1) dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, mvi->bulk_buffer1, mvi->bulk_buffer_dma1); MVS_CHIP_DISP->chip_iounmap(mvi); if (mvi->shost) scsi_host_put(mvi->shost); list_for_each_entry(mwq, &mvi->wq_list, entry) cancel_delayed_work(&mwq->work_q); kfree(mvi->rsvd_tags); kfree(mvi); } #ifdef CONFIG_SCSI_MVSAS_TASKLET static void mvs_tasklet(unsigned long opaque) { u32 stat; u16 core_nr, i = 0; struct mvs_info *mvi; struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque; core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; if (unlikely(!mvi)) BUG_ON(1); stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq); if (!stat) goto out; for (i = 0; i < core_nr; i++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat); } out: MVS_CHIP_DISP->interrupt_enable(mvi); } #endif static irqreturn_t mvs_interrupt(int irq, void *opaque) { u32 stat; struct mvs_info *mvi; struct sas_ha_struct *sha = opaque; #ifndef CONFIG_SCSI_MVSAS_TASKLET u32 i; u32 core_nr; core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; #endif mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; if (unlikely(!mvi)) return IRQ_NONE; #ifdef CONFIG_SCSI_MVSAS_TASKLET MVS_CHIP_DISP->interrupt_disable(mvi); #endif stat = MVS_CHIP_DISP->isr_status(mvi, irq); if (!stat) { #ifdef CONFIG_SCSI_MVSAS_TASKLET MVS_CHIP_DISP->interrupt_enable(mvi); #endif return IRQ_NONE; } #ifdef CONFIG_SCSI_MVSAS_TASKLET tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); #else for (i = 0; i < core_nr; i++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; MVS_CHIP_DISP->isr(mvi, irq, stat); } #endif return IRQ_HANDLED; } static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) { int i = 0, slot_nr; char pool_name[32]; if (mvi->flags & MVF_FLAG_SOC) slot_nr = MVS_SOC_SLOTS; else slot_nr = MVS_CHIP_SLOT_SZ; spin_lock_init(&mvi->lock); for (i = 0; i < mvi->chip->n_phy; i++) { mvs_phy_init(mvi, i); mvi->port[i].wide_port_phymap = 0; mvi->port[i].port_attached = 0; INIT_LIST_HEAD(&mvi->port[i].list); } for (i = 0; i < MVS_MAX_DEVICES; i++) { mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; mvi->devices[i].dev_type = SAS_PHY_UNUSED; mvi->devices[i].device_id = i; mvi->devices[i].dev_status = MVS_DEV_NORMAL; } /* * alloc and init our DMA areas */ mvi->tx = dma_alloc_coherent(mvi->dev, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, &mvi->tx_dma, GFP_KERNEL); if (!mvi->tx) goto err_out; mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ, &mvi->rx_fis_dma, GFP_KERNEL); if (!mvi->rx_fis) goto err_out; mvi->rx = dma_alloc_coherent(mvi->dev, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), &mvi->rx_dma, GFP_KERNEL); if (!mvi->rx) goto err_out; mvi->rx[0] = cpu_to_le32(0xfff); mvi->rx_cons = 0xfff; mvi->slot = dma_alloc_coherent(mvi->dev, sizeof(*mvi->slot) * slot_nr, &mvi->slot_dma, GFP_KERNEL); if (!mvi->slot) goto err_out; mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, TRASH_BUCKET_SIZE, &mvi->bulk_buffer_dma, GFP_KERNEL); if (!mvi->bulk_buffer) goto err_out; mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev, TRASH_BUCKET_SIZE, &mvi->bulk_buffer_dma1, GFP_KERNEL); if (!mvi->bulk_buffer1) goto err_out; sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id); mvi->dma_pool = dma_pool_create(pool_name, &mvi->pdev->dev, MVS_SLOT_BUF_SZ, 16, 0); if (!mvi->dma_pool) { printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name); goto err_out; } return 0; err_out: return 1; } int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex) { unsigned long res_start, res_len, res_flag_ex = 0; struct pci_dev *pdev = mvi->pdev; if (bar_ex != -1) { /* * ioremap main and peripheral registers */ res_start = pci_resource_start(pdev, bar_ex); res_len = pci_resource_len(pdev, bar_ex); if (!res_start || !res_len) goto err_out; res_flag_ex = pci_resource_flags(pdev, bar_ex); if (res_flag_ex & IORESOURCE_MEM) mvi->regs_ex = ioremap(res_start, res_len); else mvi->regs_ex = (void *)res_start; if (!mvi->regs_ex) goto err_out; } res_start = pci_resource_start(pdev, bar); res_len = pci_resource_len(pdev, bar); if (!res_start || !res_len) { iounmap(mvi->regs_ex); mvi->regs_ex = NULL; goto err_out; } mvi->regs = ioremap(res_start, res_len); if (!mvi->regs) { if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM)) iounmap(mvi->regs_ex); mvi->regs_ex = NULL; goto err_out; } return 0; err_out: return -1; } void mvs_iounmap(void __iomem *regs) { iounmap(regs); } static struct mvs_info *mvs_pci_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, struct Scsi_Host *shost, unsigned int id) { struct mvs_info *mvi = NULL; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); mvi = kzalloc(sizeof(*mvi) + (1L << mvs_chips[ent->driver_data].slot_width) * sizeof(struct mvs_slot_info), GFP_KERNEL); if (!mvi) return NULL; mvi->pdev = pdev; mvi->dev = &pdev->dev; mvi->chip_id = ent->driver_data; mvi->chip = &mvs_chips[mvi->chip_id]; INIT_LIST_HEAD(&mvi->wq_list); ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; mvi->id = id; mvi->sas = sha; mvi->shost = shost; mvi->rsvd_tags = bitmap_zalloc(MVS_RSVD_SLOTS, GFP_KERNEL); if (!mvi->rsvd_tags) goto err_out; if (MVS_CHIP_DISP->chip_ioremap(mvi)) goto err_out; if (!mvs_alloc(mvi, shost)) return mvi; err_out: mvs_free(mvi); return NULL; } static int pci_go_64(struct pci_dev *pdev) { int rc; rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) { rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "32-bit DMA enable failed\n"); return rc; } } return rc; } static int mvs_prep_sas_ha_init(struct Scsi_Host *shost, const struct mvs_chip_info *chip_info) { int phy_nr, port_nr; unsigned short core_nr; struct asd_sas_phy **arr_phy; struct asd_sas_port **arr_port; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); core_nr = chip_info->n_host; phy_nr = core_nr * chip_info->n_phy; port_nr = phy_nr; memset(sha, 0x00, sizeof(struct sas_ha_struct)); arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); if (!arr_phy || !arr_port) goto exit_free; sha->sas_phy = arr_phy; sha->sas_port = arr_port; sha->shost = shost; sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL); if (!sha->lldd_ha) goto exit_free; ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; shost->transportt = mvs_stt; shost->max_id = MVS_MAX_DEVICES; shost->max_lun = ~0; shost->max_channel = 1; shost->max_cmd_len = 16; return 0; exit_free: kfree(arr_phy); kfree(arr_port); return -1; } static void mvs_post_sas_ha_init(struct Scsi_Host *shost, const struct mvs_chip_info *chip_info) { int can_queue, i = 0, j = 0; struct mvs_info *mvi = NULL; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; for (j = 0; j < nr_core; j++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; for (i = 0; i < chip_info->n_phy; i++) { sha->sas_phy[j * chip_info->n_phy + i] = &mvi->phy[i].sas_phy; sha->sas_port[j * chip_info->n_phy + i] = &mvi->port[i].sas_port; } } sha->sas_ha_name = DRV_NAME; sha->dev = mvi->dev; sha->sas_addr = &mvi->sas_addr[0]; sha->num_phys = nr_core * chip_info->n_phy; if (mvi->flags & MVF_FLAG_SOC) can_queue = MVS_SOC_CAN_QUEUE; else can_queue = MVS_CHIP_SLOT_SZ; can_queue -= MVS_RSVD_SLOTS; shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); shost->can_queue = can_queue; mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; sha->shost = mvi->shost; } static void mvs_init_sas_add(struct mvs_info *mvi) { u8 i; for (i = 0; i < mvi->chip->n_phy; i++) { mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL; mvi->phy[i].dev_sas_addr = cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr)); } memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE); } static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int rc, nhost = 0; struct mvs_info *mvi; irq_handler_t irq_handler = mvs_interrupt; struct Scsi_Host *shost = NULL; const struct mvs_chip_info *chip; dev_printk(KERN_INFO, &pdev->dev, "mvsas: driver version %s\n", DRV_VERSION); rc = pci_enable_device(pdev); if (rc) goto err_out_enable; pci_set_master(pdev); rc = pci_request_regions(pdev, DRV_NAME); if (rc) goto err_out_disable; rc = pci_go_64(pdev); if (rc) goto err_out_regions; shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); if (!shost) { rc = -ENOMEM; goto err_out_regions; } chip = &mvs_chips[ent->driver_data]; SHOST_TO_SAS_HA(shost) = kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); if (!SHOST_TO_SAS_HA(shost)) { scsi_host_put(shost); rc = -ENOMEM; goto err_out_regions; } rc = mvs_prep_sas_ha_init(shost, chip); if (rc) { scsi_host_put(shost); rc = -ENOMEM; goto err_out_regions; } pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); do { mvi = mvs_pci_alloc(pdev, ent, shost, nhost); if (!mvi) { rc = -ENOMEM; goto err_out_regions; } memset(&mvi->hba_info_param, 0xFF, sizeof(struct hba_info_page)); mvs_init_sas_add(mvi); mvi->instance = nhost; rc = MVS_CHIP_DISP->chip_init(mvi); if (rc) { mvs_free(mvi); goto err_out_regions; } nhost++; } while (nhost < chip->n_host); #ifdef CONFIG_SCSI_MVSAS_TASKLET { struct mvs_prv_info *mpi = SHOST_TO_SAS_HA(shost)->lldd_ha; tasklet_init(&(mpi->mv_tasklet), mvs_tasklet, (unsigned long)SHOST_TO_SAS_HA(shost)); } #endif mvs_post_sas_ha_init(shost, chip); rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_shost; rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); if (rc) goto err_out_shost; rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, SHOST_TO_SAS_HA(shost)); if (rc) goto err_not_sas; MVS_CHIP_DISP->interrupt_enable(mvi); scsi_scan_host(mvi->shost); return 0; err_not_sas: sas_unregister_ha(SHOST_TO_SAS_HA(shost)); err_out_shost: scsi_remove_host(mvi->shost); err_out_regions: pci_release_regions(pdev); err_out_disable: pci_disable_device(pdev); err_out_enable: return rc; } static void mvs_pci_remove(struct pci_dev *pdev) { unsigned short core_nr, i = 0; struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct mvs_info *mvi = NULL; core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; #ifdef CONFIG_SCSI_MVSAS_TASKLET tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); #endif sas_unregister_ha(sha); sas_remove_host(mvi->shost); MVS_CHIP_DISP->interrupt_disable(mvi); free_irq(mvi->pdev->irq, sha); for (i = 0; i < core_nr; i++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; mvs_free(mvi); } kfree(sha->sas_phy); kfree(sha->sas_port); kfree(sha); pci_release_regions(pdev); pci_disable_device(pdev); return; } static struct pci_device_id mvs_pci_table[] = { { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, { .vendor = PCI_VENDOR_ID_MARVELL, .device = 0x6440, .subvendor = PCI_ANY_ID, .subdevice = 0x6480, .class = 0, .class_mask = 0, .driver_data = chip_6485, }, { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 }, { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 }, { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, { PCI_VDEVICE(TTI, 0x2640), chip_6440 }, { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, { PCI_VDEVICE(TTI, 0x2722), chip_9480 }, { PCI_VDEVICE(TTI, 0x2740), chip_9480 }, { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, { .vendor = PCI_VENDOR_ID_MARVELL_EXT, .device = 0x9480, .subvendor = PCI_ANY_ID, .subdevice = 0x9480, .class = 0, .class_mask = 0, .driver_data = chip_9480, }, { .vendor = PCI_VENDOR_ID_MARVELL_EXT, .device = 0x9445, .subvendor = PCI_ANY_ID, .subdevice = 0x9480, .class = 0, .class_mask = 0, .driver_data = chip_9445, }, { PCI_VDEVICE(MARVELL_EXT, 0x9485), chip_9485 }, /* Marvell 9480/9485 (any vendor/model) */ { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1041), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1042), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1043), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1044), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1080), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1083), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1084), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { } /* terminate list */ }; static struct pci_driver mvs_pci_driver = { .name = DRV_NAME, .id_table = mvs_pci_table, .probe = mvs_pci_init, .remove = mvs_pci_remove, }; static ssize_t driver_version_show(struct device *cdev, struct device_attribute *attr, char *buffer) { return sysfs_emit(buffer, "%s\n", DRV_VERSION); } static DEVICE_ATTR_RO(driver_version); static ssize_t interrupt_coalescing_store(struct device *cdev, struct device_attribute *attr, const char *buffer, size_t size) { unsigned int val = 0; struct mvs_info *mvi = NULL; struct Scsi_Host *shost = class_to_shost(cdev); struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); u8 i, core_nr; if (buffer == NULL) return size; if (sscanf(buffer, "%u", &val) != 1) return -EINVAL; if (val >= 0x10000) { mv_dprintk("interrupt coalescing timer %d us is" "too long\n", val); return strlen(buffer); } interrupt_coalescing = val; core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; if (unlikely(!mvi)) return -EINVAL; for (i = 0; i < core_nr; i++) { mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; if (MVS_CHIP_DISP->tune_interrupt) MVS_CHIP_DISP->tune_interrupt(mvi, interrupt_coalescing); } mv_dprintk("set interrupt coalescing time to %d us\n", interrupt_coalescing); return strlen(buffer); } static ssize_t interrupt_coalescing_show(struct device *cdev, struct device_attribute *attr, char *buffer) { return sysfs_emit(buffer, "%d\n", interrupt_coalescing); } static DEVICE_ATTR_RW(interrupt_coalescing); static int __init mvs_init(void) { int rc; mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); if (!mvs_stt) return -ENOMEM; rc = pci_register_driver(&mvs_pci_driver); if (rc) goto err_out; return 0; err_out: sas_release_transport(mvs_stt); return rc; } static void __exit mvs_exit(void) { pci_unregister_driver(&mvs_pci_driver); sas_release_transport(mvs_stt); } static struct attribute *mvst_host_attrs[] = { &dev_attr_driver_version.attr, &dev_attr_interrupt_coalescing.attr, NULL, }; ATTRIBUTE_GROUPS(mvst_host); module_init(mvs_init); module_exit(mvs_exit); MODULE_AUTHOR("Jeff Garzik <[email protected]>"); MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); #ifdef CONFIG_PCI MODULE_DEVICE_TABLE(pci, mvs_pci_table); #endif
linux-master
drivers/scsi/mvsas/mv_init.c
/* * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. * * Copyright (c) 2010-2015 Chelsio Communications, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Karen Xie ([email protected]) * Written by: Rakesh Ranjan ([email protected]) */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/skbuff.h> #include <linux/crypto.h> #include <linux/scatterlist.h> #include <linux/pci.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <linux/if_vlan.h> #include <linux/inet.h> #include <net/dst.h> #include <net/route.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <linux/inetdevice.h> /* ip_dev_find */ #include <linux/module.h> #include <net/tcp.h> static unsigned int dbg_level; #include "libcxgbi.h" #define DRV_MODULE_NAME "libcxgbi" #define DRV_MODULE_DESC "Chelsio iSCSI driver library" #define DRV_MODULE_VERSION "0.9.1-ko" #define DRV_MODULE_RELDATE "Apr. 2015" static char version[] = DRV_MODULE_DESC " " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Chelsio Communications, Inc."); MODULE_DESCRIPTION(DRV_MODULE_DESC); MODULE_VERSION(DRV_MODULE_VERSION); MODULE_LICENSE("GPL"); module_param(dbg_level, uint, 0644); MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); /* * cxgbi device management * maintains a list of the cxgbi devices */ static LIST_HEAD(cdev_list); static DEFINE_MUTEX(cdev_mutex); static LIST_HEAD(cdev_rcu_list); static DEFINE_SPINLOCK(cdev_rcu_lock); static inline void cxgbi_decode_sw_tag(u32 sw_tag, int *idx, int *age) { if (age) *age = sw_tag & 0x7FFF; if (idx) *idx = (sw_tag >> 16) & 0x7FFF; } int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, unsigned int max_conn) { struct cxgbi_ports_map *pmap = &cdev->pmap; pmap->port_csk = kvzalloc(array_size(max_conn, sizeof(struct cxgbi_sock *)), GFP_KERNEL | __GFP_NOWARN); if (!pmap->port_csk) { pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn); return -ENOMEM; } pmap->max_connect = max_conn; pmap->sport_base = base; spin_lock_init(&pmap->lock); return 0; } EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create); void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev) { struct cxgbi_ports_map *pmap = &cdev->pmap; struct cxgbi_sock *csk; int i; for (i = 0; i < pmap->max_connect; i++) { if (pmap->port_csk[i]) { csk = pmap->port_csk[i]; pmap->port_csk[i] = NULL; log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p, cdev 0x%p, offload down.\n", csk, cdev); spin_lock_bh(&csk->lock); cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); cxgbi_sock_closed(csk); spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); } } } EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup); static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) { log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p# %u.\n", cdev, cdev->nports); cxgbi_hbas_remove(cdev); cxgbi_device_portmap_cleanup(cdev); if (cdev->cdev2ppm) cxgbi_ppm_release(cdev->cdev2ppm(cdev)); if (cdev->pmap.max_connect) kvfree(cdev->pmap.port_csk); kfree(cdev); } struct cxgbi_device *cxgbi_device_register(unsigned int extra, unsigned int nports) { struct cxgbi_device *cdev; cdev = kzalloc(sizeof(*cdev) + extra + nports * (sizeof(struct cxgbi_hba *) + sizeof(struct net_device *)), GFP_KERNEL); if (!cdev) { pr_warn("nport %d, OOM.\n", nports); return NULL; } cdev->ports = (struct net_device **)(cdev + 1); cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports * sizeof(struct net_device *)); if (extra) cdev->dd_data = ((char *)cdev->hbas) + nports * sizeof(struct cxgbi_hba *); spin_lock_init(&cdev->pmap.lock); mutex_lock(&cdev_mutex); list_add_tail(&cdev->list_head, &cdev_list); mutex_unlock(&cdev_mutex); spin_lock(&cdev_rcu_lock); list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list); spin_unlock(&cdev_rcu_lock); log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p# %u.\n", cdev, nports); return cdev; } EXPORT_SYMBOL_GPL(cxgbi_device_register); void cxgbi_device_unregister(struct cxgbi_device *cdev) { log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p# %u,%s.\n", cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); mutex_lock(&cdev_mutex); list_del(&cdev->list_head); mutex_unlock(&cdev_mutex); spin_lock(&cdev_rcu_lock); list_del_rcu(&cdev->rcu_node); spin_unlock(&cdev_rcu_lock); synchronize_rcu(); cxgbi_device_destroy(cdev); } EXPORT_SYMBOL_GPL(cxgbi_device_unregister); void cxgbi_device_unregister_all(unsigned int flag) { struct cxgbi_device *cdev, *tmp; mutex_lock(&cdev_mutex); list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { if ((cdev->flags & flag) == flag) { mutex_unlock(&cdev_mutex); cxgbi_device_unregister(cdev); mutex_lock(&cdev_mutex); } } mutex_unlock(&cdev_mutex); } EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all); struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) { struct cxgbi_device *cdev, *tmp; mutex_lock(&cdev_mutex); list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { if (cdev->lldev == lldev) { mutex_unlock(&cdev_mutex); return cdev; } } mutex_unlock(&cdev_mutex); log_debug(1 << CXGBI_DBG_DEV, "lldev 0x%p, NO match found.\n", lldev); return NULL; } EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, int *port) { struct net_device *vdev = NULL; struct cxgbi_device *cdev, *tmp; int i; if (is_vlan_dev(ndev)) { vdev = ndev; ndev = vlan_dev_real_dev(ndev); log_debug(1 << CXGBI_DBG_DEV, "vlan dev %s -> %s.\n", vdev->name, ndev->name); } mutex_lock(&cdev_mutex); list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { for (i = 0; i < cdev->nports; i++) { if (ndev == cdev->ports[i]) { cdev->hbas[i]->vdev = vdev; mutex_unlock(&cdev_mutex); if (port) *port = i; return cdev; } } } mutex_unlock(&cdev_mutex); log_debug(1 << CXGBI_DBG_DEV, "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); return NULL; } EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev, int *port) { struct net_device *vdev = NULL; struct cxgbi_device *cdev; int i; if (is_vlan_dev(ndev)) { vdev = ndev; ndev = vlan_dev_real_dev(ndev); pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); } rcu_read_lock(); list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) { for (i = 0; i < cdev->nports; i++) { if (ndev == cdev->ports[i]) { cdev->hbas[i]->vdev = vdev; rcu_read_unlock(); if (port) *port = i; return cdev; } } } rcu_read_unlock(); log_debug(1 << CXGBI_DBG_DEV, "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); return NULL; } EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu); static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, int *port) { struct net_device *vdev = NULL; struct cxgbi_device *cdev, *tmp; int i; if (is_vlan_dev(ndev)) { vdev = ndev; ndev = vlan_dev_real_dev(ndev); pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); } mutex_lock(&cdev_mutex); list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { for (i = 0; i < cdev->nports; i++) { if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr, MAX_ADDR_LEN)) { cdev->hbas[i]->vdev = vdev; mutex_unlock(&cdev_mutex); if (port) *port = i; return cdev; } } } mutex_unlock(&cdev_mutex); log_debug(1 << CXGBI_DBG_DEV, "ndev 0x%p, %s, NO match mac found.\n", ndev, ndev->name); return NULL; } void cxgbi_hbas_remove(struct cxgbi_device *cdev) { int i; struct cxgbi_hba *chba; log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports); for (i = 0; i < cdev->nports; i++) { chba = cdev->hbas[i]; if (chba) { cdev->hbas[i] = NULL; iscsi_host_remove(chba->shost, false); pci_dev_put(cdev->pdev); iscsi_host_free(chba->shost); } } } EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun, unsigned int max_conns, const struct scsi_host_template *sht, struct scsi_transport_template *stt) { struct cxgbi_hba *chba; struct Scsi_Host *shost; int i, err; log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports); for (i = 0; i < cdev->nports; i++) { shost = iscsi_host_alloc(sht, sizeof(*chba), 1); if (!shost) { pr_info("0x%p, p%d, %s, host alloc failed.\n", cdev, i, cdev->ports[i]->name); err = -ENOMEM; goto err_out; } shost->transportt = stt; shost->max_lun = max_lun; shost->max_id = max_conns - 1; shost->max_channel = 0; shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; chba = iscsi_host_priv(shost); chba->cdev = cdev; chba->ndev = cdev->ports[i]; chba->shost = shost; shost->can_queue = sht->can_queue - ISCSI_MGMT_CMDS_MAX; log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%d %s: chba 0x%p.\n", cdev, i, cdev->ports[i]->name, chba); pci_dev_get(cdev->pdev); err = iscsi_host_add(shost, &cdev->pdev->dev); if (err) { pr_info("cdev 0x%p, p#%d %s, host add failed.\n", cdev, i, cdev->ports[i]->name); pci_dev_put(cdev->pdev); scsi_host_put(shost); goto err_out; } cdev->hbas[i] = chba; } return 0; err_out: cxgbi_hbas_remove(cdev); return err; } EXPORT_SYMBOL_GPL(cxgbi_hbas_add); /* * iSCSI offload * * - source port management * To find a free source port in the port allocation map we use a very simple * rotor scheme to look for the next free port. * * If a source port has been specified make sure that it doesn't collide with * our normal source port allocation map. If it's outside the range of our * allocation/deallocation scheme just let them use it. * * If the source port is outside our allocation range, the caller is * responsible for keeping track of their port usage. */ static struct cxgbi_sock *find_sock_on_port(struct cxgbi_device *cdev, unsigned char port_id) { struct cxgbi_ports_map *pmap = &cdev->pmap; unsigned int i; unsigned int used; if (!pmap->max_connect || !pmap->used) return NULL; spin_lock_bh(&pmap->lock); used = pmap->used; for (i = 0; used && i < pmap->max_connect; i++) { struct cxgbi_sock *csk = pmap->port_csk[i]; if (csk) { if (csk->port_id == port_id) { spin_unlock_bh(&pmap->lock); return csk; } used--; } } spin_unlock_bh(&pmap->lock); return NULL; } static int sock_get_port(struct cxgbi_sock *csk) { struct cxgbi_device *cdev = csk->cdev; struct cxgbi_ports_map *pmap = &cdev->pmap; unsigned int start; int idx; __be16 *port; if (!pmap->max_connect) { pr_err("cdev 0x%p, p#%u %s, NO port map.\n", cdev, csk->port_id, cdev->ports[csk->port_id]->name); return -EADDRNOTAVAIL; } if (csk->csk_family == AF_INET) port = &csk->saddr.sin_port; else /* ipv6 */ port = &csk->saddr6.sin6_port; if (*port) { pr_err("source port NON-ZERO %u.\n", ntohs(*port)); return -EADDRINUSE; } spin_lock_bh(&pmap->lock); if (pmap->used >= pmap->max_connect) { spin_unlock_bh(&pmap->lock); pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n", cdev, csk->port_id, cdev->ports[csk->port_id]->name); return -EADDRNOTAVAIL; } start = idx = pmap->next; do { if (++idx >= pmap->max_connect) idx = 0; if (!pmap->port_csk[idx]) { pmap->used++; *port = htons(pmap->sport_base + idx); pmap->next = idx; pmap->port_csk[idx] = csk; spin_unlock_bh(&pmap->lock); cxgbi_sock_get(csk); log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, p#%u %s, p %u, %u.\n", cdev, csk->port_id, cdev->ports[csk->port_id]->name, pmap->sport_base + idx, pmap->next); return 0; } } while (idx != start); spin_unlock_bh(&pmap->lock); /* should not happen */ pr_warn("cdev 0x%p, p#%u %s, next %u?\n", cdev, csk->port_id, cdev->ports[csk->port_id]->name, pmap->next); return -EADDRNOTAVAIL; } static void sock_put_port(struct cxgbi_sock *csk) { struct cxgbi_device *cdev = csk->cdev; struct cxgbi_ports_map *pmap = &cdev->pmap; __be16 *port; if (csk->csk_family == AF_INET) port = &csk->saddr.sin_port; else /* ipv6 */ port = &csk->saddr6.sin6_port; if (*port) { int idx = ntohs(*port) - pmap->sport_base; *port = 0; if (idx < 0 || idx >= pmap->max_connect) { pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", cdev, csk->port_id, cdev->ports[csk->port_id]->name, ntohs(*port)); return; } spin_lock_bh(&pmap->lock); pmap->port_csk[idx] = NULL; pmap->used--; spin_unlock_bh(&pmap->lock); log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, p#%u %s, release %u.\n", cdev, csk->port_id, cdev->ports[csk->port_id]->name, pmap->sport_base + idx); cxgbi_sock_put(csk); } } /* * iscsi tcp connection */ void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) { if (csk->cpl_close) { kfree_skb(csk->cpl_close); csk->cpl_close = NULL; } if (csk->cpl_abort_req) { kfree_skb(csk->cpl_abort_req); csk->cpl_abort_req = NULL; } if (csk->cpl_abort_rpl) { kfree_skb(csk->cpl_abort_rpl); csk->cpl_abort_rpl = NULL; } } EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs); static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) { struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); if (!csk) { pr_info("alloc csk %zu failed.\n", sizeof(*csk)); return NULL; } if (cdev->csk_alloc_cpls(csk) < 0) { pr_info("csk 0x%p, alloc cpls failed.\n", csk); kfree(csk); return NULL; } spin_lock_init(&csk->lock); kref_init(&csk->refcnt); skb_queue_head_init(&csk->receive_queue); skb_queue_head_init(&csk->write_queue); timer_setup(&csk->retry_timer, NULL, 0); init_completion(&csk->cmpl); rwlock_init(&csk->callback_lock); csk->cdev = cdev; csk->flags = 0; cxgbi_sock_set_state(csk, CTP_CLOSED); log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); return csk; } static struct rtable *find_route_ipv4(struct flowi4 *fl4, __be32 saddr, __be32 daddr, __be16 sport, __be16 dport, u8 tos, int ifindex) { struct rtable *rt; rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr, dport, sport, IPPROTO_TCP, tos, ifindex); if (IS_ERR(rt)) return NULL; return rt; } static struct cxgbi_sock * cxgbi_check_route(struct sockaddr *dst_addr, int ifindex) { struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; struct dst_entry *dst; struct net_device *ndev; struct cxgbi_device *cdev; struct rtable *rt = NULL; struct neighbour *n; struct flowi4 fl4; struct cxgbi_sock *csk = NULL; unsigned int mtu = 0; int port = 0xFFFF; int err = 0; rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0, ifindex); if (!rt) { pr_info("no route to ipv4 0x%x, port %u.\n", be32_to_cpu(daddr->sin_addr.s_addr), be16_to_cpu(daddr->sin_port)); err = -ENETUNREACH; goto err_out; } dst = &rt->dst; n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr); if (!n) { err = -ENODEV; goto rel_rt; } ndev = n->dev; if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { pr_info("multi-cast route %pI4, port %u, dev %s.\n", &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), ndev->name); err = -ENETUNREACH; goto rel_neigh; } if (ndev->flags & IFF_LOOPBACK) { ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); if (!ndev) { err = -ENETUNREACH; goto rel_neigh; } mtu = ndev->mtu; pr_info("rt dev %s, loopback -> %s, mtu %u.\n", n->dev->name, ndev->name, mtu); } if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) { pr_info("%s interface not up.\n", ndev->name); err = -ENETDOWN; goto rel_neigh; } cdev = cxgbi_device_find_by_netdev(ndev, &port); if (!cdev) cdev = cxgbi_device_find_by_mac(ndev, &port); if (!cdev) { pr_info("dst %pI4, %s, NOT cxgbi device.\n", &daddr->sin_addr.s_addr, ndev->name); err = -ENETUNREACH; goto rel_neigh; } log_debug(1 << CXGBI_DBG_SOCK, "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), port, ndev->name, cdev); csk = cxgbi_sock_create(cdev); if (!csk) { err = -ENOMEM; goto rel_neigh; } csk->cdev = cdev; csk->port_id = port; csk->mtu = mtu; csk->dst = dst; csk->csk_family = AF_INET; csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; csk->daddr.sin_port = daddr->sin_port; csk->daddr.sin_family = daddr->sin_family; csk->saddr.sin_family = daddr->sin_family; csk->saddr.sin_addr.s_addr = fl4.saddr; neigh_release(n); return csk; rel_neigh: neigh_release(n); rel_rt: ip_rt_put(rt); err_out: return ERR_PTR(err); } #if IS_ENABLED(CONFIG_IPV6) static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, const struct in6_addr *daddr, int ifindex) { struct flowi6 fl; memset(&fl, 0, sizeof(fl)); fl.flowi6_oif = ifindex; if (saddr) memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); if (daddr) memcpy(&fl.daddr, daddr, sizeof(struct in6_addr)); return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); } static struct cxgbi_sock * cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex) { struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr; struct dst_entry *dst; struct net_device *ndev; struct cxgbi_device *cdev; struct rt6_info *rt = NULL; struct neighbour *n; struct in6_addr pref_saddr; struct cxgbi_sock *csk = NULL; unsigned int mtu = 0; int port = 0xFFFF; int err = 0; rt = find_route_ipv6(NULL, &daddr6->sin6_addr, ifindex); if (!rt) { pr_info("no route to ipv6 %pI6 port %u\n", daddr6->sin6_addr.s6_addr, be16_to_cpu(daddr6->sin6_port)); err = -ENETUNREACH; goto err_out; } dst = &rt->dst; n = dst_neigh_lookup(dst, &daddr6->sin6_addr); if (!n) { pr_info("%pI6, port %u, dst no neighbour.\n", daddr6->sin6_addr.s6_addr, be16_to_cpu(daddr6->sin6_port)); err = -ENETUNREACH; goto rel_rt; } ndev = n->dev; if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) { pr_info("%s interface not up.\n", ndev->name); err = -ENETDOWN; goto rel_rt; } if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) { pr_info("multi-cast route %pI6 port %u, dev %s.\n", daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), ndev->name); err = -ENETUNREACH; goto rel_rt; } cdev = cxgbi_device_find_by_netdev(ndev, &port); if (!cdev) cdev = cxgbi_device_find_by_mac(ndev, &port); if (!cdev) { pr_info("dst %pI6 %s, NOT cxgbi device.\n", daddr6->sin6_addr.s6_addr, ndev->name); err = -ENETUNREACH; goto rel_rt; } log_debug(1 << CXGBI_DBG_SOCK, "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n", daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port, ndev->name, cdev); csk = cxgbi_sock_create(cdev); if (!csk) { err = -ENOMEM; goto rel_rt; } csk->cdev = cdev; csk->port_id = port; csk->mtu = mtu; csk->dst = dst; rt6_get_prefsrc(rt, &pref_saddr); if (ipv6_addr_any(&pref_saddr)) { struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL, &daddr6->sin6_addr, 0, &pref_saddr); if (err) { pr_info("failed to get source address to reach %pI6\n", &daddr6->sin6_addr); goto rel_rt; } } csk->csk_family = AF_INET6; csk->daddr6.sin6_addr = daddr6->sin6_addr; csk->daddr6.sin6_port = daddr6->sin6_port; csk->daddr6.sin6_family = daddr6->sin6_family; csk->saddr6.sin6_family = daddr6->sin6_family; csk->saddr6.sin6_addr = pref_saddr; neigh_release(n); return csk; rel_rt: if (n) neigh_release(n); ip6_rt_put(rt); if (csk) cxgbi_sock_closed(csk); err_out: return ERR_PTR(err); } #endif /* IS_ENABLED(CONFIG_IPV6) */ void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, unsigned int opt) { csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; dst_confirm(csk->dst); smp_mb(); cxgbi_sock_set_state(csk, CTP_ESTABLISHED); } EXPORT_SYMBOL_GPL(cxgbi_sock_established); static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) { log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n", csk, csk->state, csk->flags, csk->user_data); if (csk->state != CTP_ESTABLISHED) { read_lock_bh(&csk->callback_lock); if (csk->user_data) iscsi_conn_failure(csk->user_data, ISCSI_ERR_TCP_CONN_CLOSE); read_unlock_bh(&csk->callback_lock); } } void cxgbi_sock_closed(struct cxgbi_sock *csk) { log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, (csk)->state, (csk)->flags, (csk)->tid); cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) return; if (csk->saddr.sin_port) sock_put_port(csk); if (csk->dst) dst_release(csk->dst); csk->cdev->csk_release_offload_resources(csk); cxgbi_sock_set_state(csk, CTP_CLOSED); cxgbi_inform_iscsi_conn_closing(csk); cxgbi_sock_put(csk); } EXPORT_SYMBOL_GPL(cxgbi_sock_closed); static void need_active_close(struct cxgbi_sock *csk) { int data_lost; int close_req = 0; log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, (csk)->state, (csk)->flags, (csk)->tid); spin_lock_bh(&csk->lock); if (csk->dst) dst_confirm(csk->dst); data_lost = skb_queue_len(&csk->receive_queue); __skb_queue_purge(&csk->receive_queue); if (csk->state == CTP_ACTIVE_OPEN) cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); else if (csk->state == CTP_ESTABLISHED) { close_req = 1; cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); } else if (csk->state == CTP_PASSIVE_CLOSE) { close_req = 1; cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); } if (close_req) { if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) || data_lost) csk->cdev->csk_send_abort_req(csk); else csk->cdev->csk_send_close_req(csk); } spin_unlock_bh(&csk->lock); } void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) { pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n", csk, csk->state, csk->flags, &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, errno); cxgbi_sock_set_state(csk, CTP_CONNECTING); csk->err = errno; cxgbi_sock_closed(csk); } EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) { struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; struct module *owner = csk->cdev->owner; log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, (csk)->state, (csk)->flags, (csk)->tid); cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); if (csk->state == CTP_ACTIVE_OPEN) cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); __kfree_skb(skb); module_put(owner); } EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) { cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", csk, csk->state, csk->flags, csk->tid); cxgbi_sock_closed(csk); } spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); } EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl); void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) { log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, (csk)->state, (csk)->flags, (csk)->tid); cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) goto done; switch (csk->state) { case CTP_ESTABLISHED: cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); break; case CTP_ACTIVE_CLOSE: cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); break; case CTP_CLOSE_WAIT_1: cxgbi_sock_closed(csk); break; case CTP_ABORTING: break; default: pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", csk, csk->state, csk->flags, csk->tid); } cxgbi_inform_iscsi_conn_closing(csk); done: spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); } EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close); void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) { log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, (csk)->state, (csk)->flags, (csk)->tid); cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); csk->snd_una = snd_nxt - 1; if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) goto done; switch (csk->state) { case CTP_ACTIVE_CLOSE: cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); break; case CTP_CLOSE_WAIT_1: case CTP_CLOSE_WAIT_2: cxgbi_sock_closed(csk); break; case CTP_ABORTING: break; default: pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", csk, csk->state, csk->flags, csk->tid); } done: spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); } EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl); void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, unsigned int snd_una, int seq_chk) { log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n", csk, csk->state, csk->flags, csk->tid, credits, csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); spin_lock_bh(&csk->lock); csk->wr_cred += credits; if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; while (credits) { struct sk_buff *p = cxgbi_sock_peek_wr(csk); if (unlikely(!p)) { pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n", csk, csk->state, csk->flags, csk->tid, credits, csk->wr_cred, csk->wr_una_cred); break; } if (unlikely(credits < p->csum)) { pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n", csk, csk->state, csk->flags, csk->tid, credits, csk->wr_cred, csk->wr_una_cred, p->csum); p->csum -= credits; break; } else { cxgbi_sock_dequeue_wr(csk); credits -= p->csum; kfree_skb(p); } } cxgbi_sock_check_wr_invariants(csk); if (seq_chk) { if (unlikely(before(snd_una, csk->snd_una))) { pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.", csk, csk->state, csk->flags, csk->tid, snd_una, csk->snd_una); goto done; } if (csk->snd_una != snd_una) { csk->snd_una = snd_una; dst_confirm(csk->dst); } } if (skb_queue_len(&csk->write_queue)) { if (csk->cdev->csk_push_tx_frames(csk, 0)) cxgbi_conn_tx_open(csk); } else cxgbi_conn_tx_open(csk); done: spin_unlock_bh(&csk->lock); } EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack); static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, unsigned short mtu) { int i = 0; while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) ++i; return i; } unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) { unsigned int idx; struct dst_entry *dst = csk->dst; csk->advmss = dst_metric_advmss(dst); if (csk->advmss > pmtu - 40) csk->advmss = pmtu - 40; if (csk->advmss < csk->cdev->mtus[0] - 40) csk->advmss = csk->cdev->mtus[0] - 40; idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); return idx; } EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss); void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) { cxgbi_skcb_tcp_seq(skb) = csk->write_seq; __skb_queue_tail(&csk->write_queue, skb); } EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail); void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) { struct sk_buff *skb; while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) kfree_skb(skb); } EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue); void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) { int pending = cxgbi_sock_count_pending_wrs(csk); if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); } EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants); static inline void scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl, unsigned int *sgcnt, unsigned int *dlen, unsigned int prot) { struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : &sc->sdb; *sgl = sdb->table.sgl; *sgcnt = sdb->table.nents; *dlen = sdb->length; /* Caution: for protection sdb, sdb->length is invalid */ } void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *ppod, struct cxgbi_task_tag_info *ttinfo, struct scatterlist **sg_pp, unsigned int *sg_off) { struct scatterlist *sg = sg_pp ? *sg_pp : NULL; unsigned int offset = sg_off ? *sg_off : 0; dma_addr_t addr = 0UL; unsigned int len = 0; int i; memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr)); if (sg) { addr = sg_dma_address(sg); len = sg_dma_len(sg); } for (i = 0; i < PPOD_PAGES_MAX; i++) { if (sg) { ppod->addr[i] = cpu_to_be64(addr + offset); offset += PAGE_SIZE; if (offset == (len + sg->offset)) { offset = 0; sg = sg_next(sg); if (sg) { addr = sg_dma_address(sg); len = sg_dma_len(sg); } } } else { ppod->addr[i] = 0ULL; } } /* * the fifth address needs to be repeated in the next ppod, so do * not move sg */ if (sg_pp) { *sg_pp = sg; *sg_off = offset; } if (offset == len) { offset = 0; sg = sg_next(sg); if (sg) { addr = sg_dma_address(sg); len = sg_dma_len(sg); } } ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL; } EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod); /* * APIs interacting with open-iscsi libraries */ int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev, struct cxgbi_tag_format *tformat, unsigned int iscsi_size, unsigned int llimit, unsigned int start, unsigned int rsvd_factor, unsigned int edram_start, unsigned int edram_size) { int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev, cdev->lldev, tformat, iscsi_size, llimit, start, rsvd_factor, edram_start, edram_size); if (err >= 0) { struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp); if (ppm->ppmax < 1024 || ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) cdev->flags |= CXGBI_FLAG_DDP_OFF; err = 0; } else { cdev->flags |= CXGBI_FLAG_DDP_OFF; } return err; } EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup); static int cxgbi_ddp_sgl_check(struct scatterlist *sgl, int nents) { int i; int last_sgidx = nents - 1; struct scatterlist *sg = sgl; for (i = 0; i < nents; i++, sg = sg_next(sg)) { unsigned int len = sg->length + sg->offset; if ((sg->offset & 0x3) || (i && sg->offset) || ((i != last_sgidx) && len != PAGE_SIZE)) { log_debug(1 << CXGBI_DBG_DDP, "sg %u/%u, %u,%u, not aligned.\n", i, nents, sg->offset, sg->length); goto err_out; } } return 0; err_out: return -EINVAL; } static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn, struct cxgbi_task_data *tdata, u32 sw_tag, unsigned int xferlen) { struct cxgbi_sock *csk = cconn->cep->csk; struct cxgbi_device *cdev = csk->cdev; struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; struct scatterlist *sgl = ttinfo->sgl; unsigned int sgcnt = ttinfo->nents; unsigned int sg_offset = sgl->offset; int err; if (cdev->flags & CXGBI_FLAG_DDP_OFF) { log_debug(1 << CXGBI_DBG_DDP, "cdev 0x%p DDP off.\n", cdev); return -EINVAL; } if (!ppm || xferlen < DDP_THRESHOLD || !sgcnt || ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) { log_debug(1 << CXGBI_DBG_DDP, "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n", ppm, ppm ? ppm->tformat.pgsz_idx_dflt : DDP_PGIDX_MAX, xferlen, ttinfo->nents); return -EINVAL; } /* make sure the buffer is suitable for ddp */ if (cxgbi_ddp_sgl_check(sgl, sgcnt) < 0) return -EINVAL; ttinfo->nr_pages = (xferlen + sgl->offset + (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT; /* * the ddp tag will be used for the itt in the outgoing pdu, * the itt genrated by libiscsi is saved in the ppm and can be * retrieved via the ddp tag */ err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, &ttinfo->tag, (unsigned long)sw_tag); if (err < 0) { cconn->ddp_full++; return err; } ttinfo->npods = err; /* setup dma from scsi command sgl */ sgl->offset = 0; err = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); sgl->offset = sg_offset; if (err == 0) { pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", __func__, sw_tag, xferlen, sgcnt); goto rel_ppods; } if (err != ttinfo->nr_pages) { log_debug(1 << CXGBI_DBG_DDP, "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n", __func__, sw_tag, xferlen, sgcnt, err); } ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_MAPPED; ttinfo->cid = csk->port_id; cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, xferlen, &ttinfo->hdr); if (cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ) { /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */ ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_VALID; } else { /* write ppod from control queue now */ err = cdev->csk_ddp_set_map(ppm, csk, ttinfo); if (err < 0) goto rel_ppods; } return 0; rel_ppods: cxgbi_ppm_ppod_release(ppm, ttinfo->idx); if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_MAPPED) { ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_MAPPED; dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); } return -EINVAL; } static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) { struct scsi_cmnd *sc = task->sc; struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; struct cxgbi_device *cdev = cconn->chba->cdev; struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); u32 tag = ntohl((__force u32)hdr_itt); log_debug(1 << CXGBI_DBG_DDP, "cdev 0x%p, task 0x%p, release tag 0x%x.\n", cdev, task, tag); if (sc && sc->sc_data_direction == DMA_FROM_DEVICE && cxgbi_ppm_is_ddp_tag(ppm, tag)) { struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; if (!(cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ)) cdev->csk_ddp_clear_map(cdev, ppm, ttinfo); cxgbi_ppm_ppod_release(ppm, ttinfo->idx); dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents, DMA_FROM_DEVICE); } } static inline u32 cxgbi_build_sw_tag(u32 idx, u32 age) { /* assume idx and age both are < 0x7FFF (32767) */ return (idx << 16) | age; } static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) { struct scsi_cmnd *sc = task->sc; struct iscsi_conn *conn = task->conn; struct iscsi_session *sess = conn->session; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; struct cxgbi_device *cdev = cconn->chba->cdev; struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); u32 sw_tag = cxgbi_build_sw_tag(task->itt, sess->age); u32 tag = 0; int err = -EINVAL; if (sc && sc->sc_data_direction == DMA_FROM_DEVICE) { struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; scmd_get_params(sc, &ttinfo->sgl, &ttinfo->nents, &tdata->dlen, 0); err = cxgbi_ddp_reserve(cconn, tdata, sw_tag, tdata->dlen); if (!err) tag = ttinfo->tag; else log_debug(1 << CXGBI_DBG_DDP, "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", cconn->cep->csk, task, tdata->dlen, ttinfo->nents); } if (err < 0) { err = cxgbi_ppm_make_non_ddp_tag(ppm, sw_tag, &tag); if (err < 0) return err; } /* the itt need to sent in big-endian order */ *hdr_itt = (__force itt_t)htonl(tag); log_debug(1 << CXGBI_DBG_DDP, "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); return 0; } void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; struct cxgbi_device *cdev = cconn->chba->cdev; struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); u32 tag = ntohl((__force u32)itt); u32 sw_bits; if (ppm) { if (cxgbi_ppm_is_ddp_tag(ppm, tag)) sw_bits = cxgbi_ppm_get_tag_caller_data(ppm, tag); else sw_bits = cxgbi_ppm_decode_non_ddp_tag(ppm, tag); } else { sw_bits = tag; } cxgbi_decode_sw_tag(sw_bits, idx, age); log_debug(1 << CXGBI_DBG_DDP, "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, age ? *age : 0xFF); } EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); void cxgbi_conn_tx_open(struct cxgbi_sock *csk) { struct iscsi_conn *conn = csk->user_data; if (conn) { log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p, cid %d.\n", csk, conn->id); iscsi_conn_queue_xmit(conn); } } EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open); /* * pdu receive, interact with libiscsi_tcp */ static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb, unsigned int offset, int offloaded) { int status = 0; int bytes_read; bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); switch (status) { case ISCSI_TCP_CONN_ERR: pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n", skb, offset, offloaded); return -EIO; case ISCSI_TCP_SUSPENDED: log_debug(1 << CXGBI_DBG_PDU_RX, "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n", skb, offset, offloaded, bytes_read); /* no transfer - just have caller flush queue */ return bytes_read; case ISCSI_TCP_SKB_DONE: pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n", skb, offset, offloaded); /* * pdus should always fit in the skb and we should get * segment done notifcation. */ iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); return -EFAULT; case ISCSI_TCP_SEGMENT_DONE: log_debug(1 << CXGBI_DBG_PDU_RX, "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n", skb, offset, offloaded, bytes_read); return bytes_read; default: pr_info("skb 0x%p, off %u, %d, invalid status %d.\n", skb, offset, offloaded, status); return -EINVAL; } } static int skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn, struct sk_buff *skb) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; int err; log_debug(1 << CXGBI_DBG_PDU_RX, "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", conn, skb, skb->len, cxgbi_skcb_flags(skb)); if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb); iscsi_conn_failure(conn, ISCSI_ERR_PROTO); return -EIO; } if (conn->hdrdgst_en && cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) { pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb); iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); return -EIO; } if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) && cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) { /* If completion flag is set and data is directly * placed in to the host memory then update * task->exp_datasn to the datasn in completion * iSCSI hdr as T6 adapter generates completion only * for the last pdu of a sequence. */ itt_t itt = ((struct iscsi_data *)skb->data)->itt; struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt); u32 data_sn = be32_to_cpu(((struct iscsi_data *) skb->data)->datasn); if (task && task->sc) { struct iscsi_tcp_task *tcp_task = task->dd_data; tcp_task->exp_datasn = data_sn; } } err = read_pdu_skb(conn, skb, 0, 0); if (likely(err >= 0)) { struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data; u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK; if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP)) cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD); } return err; } static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, struct sk_buff *skb, unsigned int offset) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; bool offloaded = 0; int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; log_debug(1 << CXGBI_DBG_PDU_RX, "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", conn, skb, skb->len, cxgbi_skcb_flags(skb)); if (conn->datadgst_en && cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) { pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n", conn, lskb, cxgbi_skcb_flags(lskb)); iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); return -EIO; } if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) return 0; /* coalesced, add header digest length */ if (lskb == skb && conn->hdrdgst_en) offset += ISCSI_DIGEST_SIZE; if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD)) offloaded = 1; if (opcode == ISCSI_OP_SCSI_DATA_IN) log_debug(1 << CXGBI_DBG_PDU_RX, "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n", skb, opcode, ntohl(tcp_conn->in.hdr->itt), tcp_conn->in.datalen, offloaded ? "is" : "not"); return read_pdu_skb(conn, skb, offset, offloaded); } static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) { struct cxgbi_device *cdev = csk->cdev; int must_send; u32 credits; log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n", csk, csk->state, csk->flags, csk->tid, csk->copied_seq, csk->rcv_wup, cdev->rx_credit_thres, csk->rcv_win); if (!cdev->rx_credit_thres) return; if (csk->state != CTP_ESTABLISHED) return; credits = csk->copied_seq - csk->rcv_wup; if (unlikely(!credits)) return; must_send = credits + 16384 >= csk->rcv_win; if (must_send || credits >= cdev->rx_credit_thres) csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); } void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) { struct cxgbi_device *cdev = csk->cdev; struct iscsi_conn *conn = csk->user_data; struct sk_buff *skb; unsigned int read = 0; int err = 0; log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, conn 0x%p.\n", csk, conn); if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n", csk, conn, conn ? conn->id : 0xFF, conn ? conn->flags : 0xFF); return; } while (!err) { skb = skb_peek(&csk->receive_queue); if (!skb || !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) { if (skb) log_debug(1 << CXGBI_DBG_PDU_RX, "skb 0x%p, NOT ready 0x%lx.\n", skb, cxgbi_skcb_flags(skb)); break; } __skb_unlink(skb, &csk->receive_queue); read += cxgbi_skcb_rx_pdulen(skb); log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n", csk, skb, skb->len, cxgbi_skcb_flags(skb), cxgbi_skcb_rx_pdulen(skb)); if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { err = skb_read_pdu_bhs(csk, conn, skb); if (err < 0) { pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " "f 0x%lx, plen %u.\n", csk, skb, skb->len, cxgbi_skcb_flags(skb), cxgbi_skcb_rx_pdulen(skb)); goto skb_done; } err = skb_read_pdu_data(conn, skb, skb, err + cdev->skb_rx_extra); if (err < 0) pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, " "f 0x%lx, plen %u.\n", csk, skb, skb->len, cxgbi_skcb_flags(skb), cxgbi_skcb_rx_pdulen(skb)); } else { err = skb_read_pdu_bhs(csk, conn, skb); if (err < 0) { pr_err("bhs, csk 0x%p, skb 0x%p,%u, " "f 0x%lx, plen %u.\n", csk, skb, skb->len, cxgbi_skcb_flags(skb), cxgbi_skcb_rx_pdulen(skb)); goto skb_done; } if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { struct sk_buff *dskb; dskb = skb_peek(&csk->receive_queue); if (!dskb) { pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx," " plen %u, NO data.\n", csk, skb, skb->len, cxgbi_skcb_flags(skb), cxgbi_skcb_rx_pdulen(skb)); err = -EIO; goto skb_done; } __skb_unlink(dskb, &csk->receive_queue); err = skb_read_pdu_data(conn, skb, dskb, 0); if (err < 0) pr_err("data, csk 0x%p, skb 0x%p,%u, " "f 0x%lx, plen %u, dskb 0x%p," "%u.\n", csk, skb, skb->len, cxgbi_skcb_flags(skb), cxgbi_skcb_rx_pdulen(skb), dskb, dskb->len); __kfree_skb(dskb); } else err = skb_read_pdu_data(conn, skb, skb, 0); } skb_done: __kfree_skb(skb); if (err < 0) break; } log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); if (read) { csk->copied_seq += read; csk_return_rx_credits(csk, read); conn->rxdata_octets += read; } if (err < 0) { pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n", csk, conn, err, read); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); } } EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready); static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, unsigned int offset, unsigned int *off, struct scatterlist **sgp) { int i; struct scatterlist *sg; for_each_sg(sgl, sg, sgcnt, i) { if (offset < sg->length) { *off = offset; *sgp = sg; return 0; } offset -= sg->length; } return -EFAULT; } static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, unsigned int dlen, struct page_frag *frags, int frag_max, u32 *dlimit) { unsigned int datalen = dlen; unsigned int sglen = sg->length - sgoffset; struct page *page = sg_page(sg); int i; i = 0; do { unsigned int copy; if (!sglen) { sg = sg_next(sg); if (!sg) { pr_warn("sg %d NULL, len %u/%u.\n", i, datalen, dlen); return -EINVAL; } sgoffset = 0; sglen = sg->length; page = sg_page(sg); } copy = min(datalen, sglen); if (i && page == frags[i - 1].page && sgoffset + sg->offset == frags[i - 1].offset + frags[i - 1].size) { frags[i - 1].size += copy; } else { if (i >= frag_max) { pr_warn("too many pages %u, dlen %u.\n", frag_max, dlen); *dlimit = dlen - datalen; return -EINVAL; } frags[i].page = page; frags[i].offset = sg->offset + sgoffset; frags[i].size = copy; i++; } datalen -= copy; sgoffset += copy; sglen -= copy; } while (datalen); return i; } static void cxgbi_task_data_sgl_check(struct iscsi_task *task) { struct scsi_cmnd *sc = task->sc; struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct scatterlist *sg, *sgl = NULL; u32 sgcnt = 0; int i; tdata->flags = CXGBI_TASK_SGL_CHECKED; if (!sc) return; scmd_get_params(sc, &sgl, &sgcnt, &tdata->dlen, 0); if (!sgl || !sgcnt) { tdata->flags |= CXGBI_TASK_SGL_COPY; return; } for_each_sg(sgl, sg, sgcnt, i) { if (page_count(sg_page(sg)) < 1) { tdata->flags |= CXGBI_TASK_SGL_COPY; return; } } } static int cxgbi_task_data_sgl_read(struct iscsi_task *task, u32 offset, u32 count, u32 *dlimit) { struct scsi_cmnd *sc = task->sc; struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct scatterlist *sgl = NULL; struct scatterlist *sg; u32 dlen = 0; u32 sgcnt; int err; if (!sc) return 0; scmd_get_params(sc, &sgl, &sgcnt, &dlen, 0); if (!sgl || !sgcnt) return 0; err = sgl_seek_offset(sgl, sgcnt, offset, &tdata->sgoffset, &sg); if (err < 0) { pr_warn("tpdu max, sgl %u, bad offset %u/%u.\n", sgcnt, offset, tdata->dlen); return err; } err = sgl_read_to_frags(sg, tdata->sgoffset, count, tdata->frags, MAX_SKB_FRAGS, dlimit); if (err < 0) { log_debug(1 << CXGBI_DBG_ISCSI, "sgl max limit, sgl %u, offset %u, %u/%u, dlimit %u.\n", sgcnt, offset, count, tdata->dlen, *dlimit); return err; } tdata->offset = offset; tdata->count = count; tdata->nr_frags = err; tdata->total_count = count; tdata->total_offset = offset; log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, "%s: offset %u, count %u,\n" "err %u, total_count %u, total_offset %u\n", __func__, offset, count, err, tdata->total_count, tdata->total_offset); return 0; } int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 op) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = task->conn->session; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; struct cxgbi_device *cdev = cconn->chba->cdev; struct cxgbi_sock *csk = cconn->cep ? cconn->cep->csk : NULL; struct iscsi_tcp_task *tcp_task = task->dd_data; struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct scsi_cmnd *sc = task->sc; u32 headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; u32 max_txdata_len = conn->max_xmit_dlength; u32 iso_tx_rsvd = 0, local_iso_info = 0; u32 last_tdata_offset, last_tdata_count; int err = 0; if (!tcp_task) { pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p.\n", task, tcp_task, tdata); return -ENOMEM; } if (!csk) { pr_err("task 0x%p, csk gone.\n", task); return -EPIPE; } op &= ISCSI_OPCODE_MASK; tcp_task->dd_data = tdata; task->hdr = NULL; last_tdata_count = tdata->count; last_tdata_offset = tdata->offset; if ((op == ISCSI_OP_SCSI_DATA_OUT) || ((op == ISCSI_OP_SCSI_CMD) && (sc->sc_data_direction == DMA_TO_DEVICE))) { u32 remaining_data_tosend, dlimit = 0; u32 max_pdu_size, max_num_pdu, num_pdu; u32 count; /* Preserve conn->max_xmit_dlength because it can get updated to * ISO data size. */ if (task->state == ISCSI_TASK_PENDING) tdata->max_xmit_dlength = conn->max_xmit_dlength; if (!tdata->offset) cxgbi_task_data_sgl_check(task); remaining_data_tosend = tdata->dlen - tdata->offset - tdata->count; recalculate_sgl: max_txdata_len = tdata->max_xmit_dlength; log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, "tdata->dlen %u, remaining to send %u " "conn->max_xmit_dlength %u, " "tdata->max_xmit_dlength %u\n", tdata->dlen, remaining_data_tosend, conn->max_xmit_dlength, tdata->max_xmit_dlength); if (cdev->skb_iso_txhdr && !csk->disable_iso && (remaining_data_tosend > tdata->max_xmit_dlength) && !(remaining_data_tosend % 4)) { u32 max_iso_data; if ((op == ISCSI_OP_SCSI_CMD) && session->initial_r2t_en) goto no_iso; max_pdu_size = tdata->max_xmit_dlength + ISCSI_PDU_NONPAYLOAD_LEN; max_iso_data = rounddown(CXGBI_MAX_ISO_DATA_IN_SKB, csk->advmss); max_num_pdu = max_iso_data / max_pdu_size; num_pdu = (remaining_data_tosend + tdata->max_xmit_dlength - 1) / tdata->max_xmit_dlength; if (num_pdu > max_num_pdu) num_pdu = max_num_pdu; conn->max_xmit_dlength = tdata->max_xmit_dlength * num_pdu; max_txdata_len = conn->max_xmit_dlength; iso_tx_rsvd = cdev->skb_iso_txhdr; local_iso_info = sizeof(struct cxgbi_iso_info); log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, "max_pdu_size %u, max_num_pdu %u, " "max_txdata %u, num_pdu %u\n", max_pdu_size, max_num_pdu, max_txdata_len, num_pdu); } no_iso: count = min_t(u32, max_txdata_len, remaining_data_tosend); err = cxgbi_task_data_sgl_read(task, tdata->offset + tdata->count, count, &dlimit); if (unlikely(err < 0)) { log_debug(1 << CXGBI_DBG_ISCSI, "task 0x%p, tcp_task 0x%p, tdata 0x%p, " "sgl err %d, count %u, dlimit %u\n", task, tcp_task, tdata, err, count, dlimit); if (dlimit) { remaining_data_tosend = rounddown(dlimit, tdata->max_xmit_dlength); if (!remaining_data_tosend) remaining_data_tosend = dlimit; dlimit = 0; conn->max_xmit_dlength = remaining_data_tosend; goto recalculate_sgl; } pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p, " "sgl err %d\n", task, tcp_task, tdata, err); goto ret_err; } if ((tdata->flags & CXGBI_TASK_SGL_COPY) || (tdata->nr_frags > MAX_SKB_FRAGS)) headroom += conn->max_xmit_dlength; } tdata->skb = alloc_skb(local_iso_info + cdev->skb_tx_rsvd + iso_tx_rsvd + headroom, GFP_ATOMIC); if (!tdata->skb) { tdata->count = last_tdata_count; tdata->offset = last_tdata_offset; err = -ENOMEM; goto ret_err; } skb_reserve(tdata->skb, local_iso_info + cdev->skb_tx_rsvd + iso_tx_rsvd); if (task->sc) { task->hdr = (struct iscsi_hdr *)tdata->skb->data; } else { task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC); if (!task->hdr) { __kfree_skb(tdata->skb); tdata->skb = NULL; return -ENOMEM; } } task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; if (iso_tx_rsvd) cxgbi_skcb_set_flag(tdata->skb, SKCBF_TX_ISO); /* data_out uses scsi_cmd's itt */ if (op != ISCSI_OP_SCSI_DATA_OUT) task_reserve_itt(task, &task->hdr->itt); log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n", task, op, tdata->skb, cdev->skb_tx_rsvd, headroom, conn->max_xmit_dlength, be32_to_cpu(task->hdr->itt)); return 0; ret_err: conn->max_xmit_dlength = tdata->max_xmit_dlength; return err; } EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); static int cxgbi_prep_iso_info(struct iscsi_task *task, struct sk_buff *skb, u32 count) { struct cxgbi_iso_info *iso_info = (struct cxgbi_iso_info *)skb->head; struct iscsi_r2t_info *r2t; struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct iscsi_tcp_task *tcp_task = task->dd_data; u32 burst_size = 0, r2t_dlength = 0, dlength; u32 max_pdu_len = tdata->max_xmit_dlength; u32 segment_offset = 0; u32 num_pdu; if (unlikely(!cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) return 0; memset(iso_info, 0, sizeof(struct cxgbi_iso_info)); if (task->hdr->opcode == ISCSI_OP_SCSI_CMD && session->imm_data_en) { iso_info->flags |= CXGBI_ISO_INFO_IMM_ENABLE; burst_size = count; } dlength = ntoh24(task->hdr->dlength); dlength = min(dlength, max_pdu_len); hton24(task->hdr->dlength, dlength); num_pdu = (count + max_pdu_len - 1) / max_pdu_len; if (iscsi_task_has_unsol_data(task)) r2t = &task->unsol_r2t; else r2t = tcp_task->r2t; if (r2t) { log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, "count %u, tdata->count %u, num_pdu %u," "task->hdr_len %u, r2t->data_length %u, r2t->sent %u\n", count, tdata->count, num_pdu, task->hdr_len, r2t->data_length, r2t->sent); r2t_dlength = r2t->data_length - r2t->sent; segment_offset = r2t->sent; r2t->datasn += num_pdu - 1; } if (!r2t || !r2t->sent) iso_info->flags |= CXGBI_ISO_INFO_FSLICE; if (task->hdr->flags & ISCSI_FLAG_CMD_FINAL) iso_info->flags |= CXGBI_ISO_INFO_LSLICE; task->hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; iso_info->op = task->hdr->opcode; iso_info->ahs = task->hdr->hlength; iso_info->num_pdu = num_pdu; iso_info->mpdu = max_pdu_len; iso_info->burst_size = (burst_size + r2t_dlength) >> 2; iso_info->len = count + task->hdr_len; iso_info->segment_offset = segment_offset; cxgbi_skcb_tx_iscsi_hdrlen(skb) = task->hdr_len; return 0; } static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) { if (hcrc || dcrc) { u8 submode = 0; if (hcrc) submode |= 1; if (dcrc) submode |= 2; cxgbi_skcb_tx_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; } else cxgbi_skcb_tx_ulp_mode(skb) = 0; } static struct page *rsvd_page; int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, unsigned int count) { struct iscsi_conn *conn = task->conn; struct iscsi_tcp_task *tcp_task = task->dd_data; struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct sk_buff *skb; struct scsi_cmnd *sc = task->sc; u32 expected_count, expected_offset; u32 datalen = count, dlimit = 0; u32 i, padlen = iscsi_padding(count); struct page *pg; int err; if (!tcp_task || (tcp_task->dd_data != tdata)) { pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n", task, task->sc, tcp_task, tcp_task ? tcp_task->dd_data : NULL, tdata); return -EINVAL; } skb = tdata->skb; log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n", task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, be32_to_cpu(task->cmdsn), be32_to_cpu(task->hdr->itt), offset, count); skb_put(skb, task->hdr_len); tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); if (!count) { tdata->count = count; tdata->offset = offset; tdata->nr_frags = 0; tdata->total_offset = 0; tdata->total_count = 0; if (tdata->max_xmit_dlength) conn->max_xmit_dlength = tdata->max_xmit_dlength; cxgbi_skcb_clear_flag(skb, SKCBF_TX_ISO); return 0; } log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, "data->total_count %u, tdata->total_offset %u\n", tdata->total_count, tdata->total_offset); expected_count = tdata->total_count; expected_offset = tdata->total_offset; if ((count != expected_count) || (offset != expected_offset)) { err = cxgbi_task_data_sgl_read(task, offset, count, &dlimit); if (err < 0) { pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p " "dlimit %u, sgl err %d.\n", task, task->sc, tcp_task, tcp_task ? tcp_task->dd_data : NULL, tdata, dlimit, err); return err; } } /* Restore original value of conn->max_xmit_dlength because * it can get updated to ISO data size. */ conn->max_xmit_dlength = tdata->max_xmit_dlength; if (sc) { struct page_frag *frag = tdata->frags; if ((tdata->flags & CXGBI_TASK_SGL_COPY) || (tdata->nr_frags > MAX_SKB_FRAGS) || (padlen && (tdata->nr_frags == MAX_SKB_FRAGS))) { char *dst = skb->data + task->hdr_len; /* data fits in the skb's headroom */ for (i = 0; i < tdata->nr_frags; i++, frag++) { char *src = kmap_atomic(frag->page); memcpy(dst, src + frag->offset, frag->size); dst += frag->size; kunmap_atomic(src); } if (padlen) { memset(dst, 0, padlen); padlen = 0; } skb_put(skb, count + padlen); } else { for (i = 0; i < tdata->nr_frags; i++, frag++) { get_page(frag->page); skb_fill_page_desc(skb, i, frag->page, frag->offset, frag->size); } skb->len += count; skb->data_len += count; skb->truesize += count; } } else { pg = virt_to_head_page(task->data); get_page(pg); skb_fill_page_desc(skb, 0, pg, task->data - (char *)page_address(pg), count); skb->len += count; skb->data_len += count; skb->truesize += count; } if (padlen) { get_page(rsvd_page); skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rsvd_page, 0, padlen); skb->data_len += padlen; skb->truesize += padlen; skb->len += padlen; } if (likely(count > tdata->max_xmit_dlength)) cxgbi_prep_iso_info(task, skb, count); else cxgbi_skcb_clear_flag(skb, SKCBF_TX_ISO); return 0; } EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu); static int cxgbi_sock_tx_queue_up(struct cxgbi_sock *csk, struct sk_buff *skb) { struct cxgbi_device *cdev = csk->cdev; struct cxgbi_iso_info *iso_cpl; u32 frags = skb_shinfo(skb)->nr_frags; u32 extra_len, num_pdu, hdr_len; u32 iso_tx_rsvd = 0; if (csk->state != CTP_ESTABLISHED) { log_debug(1 << CXGBI_DBG_PDU_TX, "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n", csk, csk->state, csk->flags, csk->tid); return -EPIPE; } if (csk->err) { log_debug(1 << CXGBI_DBG_PDU_TX, "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n", csk, csk->state, csk->flags, csk->tid, csk->err); return -EPIPE; } if ((cdev->flags & CXGBI_FLAG_DEV_T3) && before((csk->snd_win + csk->snd_una), csk->write_seq)) { log_debug(1 << CXGBI_DBG_PDU_TX, "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", csk, csk->state, csk->flags, csk->tid, csk->write_seq, csk->snd_una, csk->snd_win); return -ENOBUFS; } if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) iso_tx_rsvd = cdev->skb_iso_txhdr; if (unlikely(skb_headroom(skb) < (cdev->skb_tx_rsvd + iso_tx_rsvd))) { pr_err("csk 0x%p, skb head %u < %u.\n", csk, skb_headroom(skb), cdev->skb_tx_rsvd); return -EINVAL; } if (skb->len != skb->data_len) frags++; if (frags >= SKB_WR_LIST_SIZE) { pr_err("csk 0x%p, frags %u, %u,%u >%u.\n", csk, skb_shinfo(skb)->nr_frags, skb->len, skb->data_len, (unsigned int)SKB_WR_LIST_SIZE); return -EINVAL; } cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR); skb_reset_transport_header(skb); cxgbi_sock_skb_entail(csk, skb); extra_len = cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb)); if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) { iso_cpl = (struct cxgbi_iso_info *)skb->head; num_pdu = iso_cpl->num_pdu; hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb); extra_len = (cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb)) * num_pdu) + (hdr_len * (num_pdu - 1)); } csk->write_seq += (skb->len + extra_len); return 0; } static int cxgbi_sock_send_skb(struct cxgbi_sock *csk, struct sk_buff *skb) { struct cxgbi_device *cdev = csk->cdev; int len = skb->len; int err; spin_lock_bh(&csk->lock); err = cxgbi_sock_tx_queue_up(csk, skb); if (err < 0) { spin_unlock_bh(&csk->lock); return err; } if (likely(skb_queue_len(&csk->write_queue))) cdev->csk_push_tx_frames(csk, 0); spin_unlock_bh(&csk->lock); return len; } int cxgbi_conn_xmit_pdu(struct iscsi_task *task) { struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; struct iscsi_tcp_task *tcp_task = task->dd_data; struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; struct sk_buff *skb; struct cxgbi_sock *csk = NULL; u32 pdulen = 0; u32 datalen; int err; if (!tcp_task || (tcp_task->dd_data != tdata)) { pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n", task, task->sc, tcp_task, tcp_task ? tcp_task->dd_data : NULL, tdata); return -EINVAL; } skb = tdata->skb; if (!skb) { log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, "task 0x%p, skb NULL.\n", task); return 0; } if (cconn && cconn->cep) csk = cconn->cep->csk; if (!csk) { log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, "task 0x%p, csk gone.\n", task); return -EPIPE; } tdata->skb = NULL; datalen = skb->data_len; /* write ppod first if using ofldq to write ppod */ if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev); ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_VALID; if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0) pr_err("task 0x%p, ppod writing using ofldq failed.\n", task); /* continue. Let fl get the data */ } if (!task->sc) memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX); err = cxgbi_sock_send_skb(csk, skb); if (err > 0) { pdulen += err; log_debug(1 << CXGBI_DBG_PDU_TX, "task 0x%p,0x%p, rv %d.\n", task, task->sc, err); if (task->conn->hdrdgst_en) pdulen += ISCSI_DIGEST_SIZE; if (datalen && task->conn->datadgst_en) pdulen += ISCSI_DIGEST_SIZE; task->conn->txdata_octets += pdulen; if (unlikely(cxgbi_is_iso_config(csk) && cxgbi_is_iso_disabled(csk))) { if (time_after(jiffies, csk->prev_iso_ts + HZ)) { csk->disable_iso = false; csk->prev_iso_ts = 0; log_debug(1 << CXGBI_DBG_PDU_TX, "enable iso: csk 0x%p\n", csk); } } return 0; } if (err == -EAGAIN || err == -ENOBUFS) { log_debug(1 << CXGBI_DBG_PDU_TX, "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", task, skb, skb->len, skb->data_len, err); /* reset skb to send when we are called again */ tdata->skb = skb; if (cxgbi_is_iso_config(csk) && !cxgbi_is_iso_disabled(csk) && (csk->no_tx_credits++ >= 2)) { csk->disable_iso = true; csk->prev_iso_ts = jiffies; log_debug(1 << CXGBI_DBG_PDU_TX, "disable iso:csk 0x%p, ts:%lu\n", csk, csk->prev_iso_ts); } return err; } log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", task->itt, skb, skb->len, skb->data_len, err); __kfree_skb(skb); iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); return err; } EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); void cxgbi_cleanup_task(struct iscsi_task *task) { struct iscsi_tcp_task *tcp_task = task->dd_data; struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); if (!tcp_task || (tcp_task->dd_data != tdata)) { pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n", task, task->sc, tcp_task, tcp_task ? tcp_task->dd_data : NULL, tdata); return; } log_debug(1 << CXGBI_DBG_ISCSI, "task 0x%p, skb 0x%p, itt 0x%x.\n", task, tdata->skb, task->hdr_itt); tcp_task->dd_data = NULL; if (!task->sc) kfree(task->hdr); task->hdr = NULL; /* never reached the xmit task callout */ if (tdata->skb) { __kfree_skb(tdata->skb); tdata->skb = NULL; } task_release_itt(task, task->hdr_itt); memset(tdata, 0, sizeof(*tdata)); iscsi_tcp_cleanup_task(task); } EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { struct iscsi_conn *conn = cls_conn->dd_data; stats->txdata_octets = conn->txdata_octets; stats->rxdata_octets = conn->rxdata_octets; stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; stats->dataout_pdus = conn->dataout_pdus_cnt; stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; stats->datain_pdus = conn->datain_pdus_cnt; stats->r2t_pdus = conn->r2t_pdus_cnt; stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; stats->digest_err = 0; stats->timeout_err = 0; stats->custom_length = 1; strcpy(stats->custom[0].desc, "eh_abort_cnt"); stats->custom[0].value = conn->eh_abort_cnt; } EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats); static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; struct cxgbi_device *cdev = cconn->chba->cdev; unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd); unsigned int max_def = 512 * MAX_SKB_FRAGS; unsigned int max = max(max_def, headroom); max = min(cconn->chba->cdev->tx_max_size, max); if (conn->max_xmit_dlength) conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); else conn->max_xmit_dlength = max; cxgbi_align_pdu_size(conn->max_xmit_dlength); return 0; } static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; unsigned int max = cconn->chba->cdev->rx_max_size; cxgbi_align_pdu_size(max); if (conn->max_recv_dlength) { if (conn->max_recv_dlength > max) { pr_err("MaxRecvDataSegmentLength %u > %u.\n", conn->max_recv_dlength, max); return -EINVAL; } conn->max_recv_dlength = min(conn->max_recv_dlength, max); cxgbi_align_pdu_size(conn->max_recv_dlength); } else conn->max_recv_dlength = max; return 0; } int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf, int buflen) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; struct cxgbi_sock *csk = cconn->cep->csk; int err; log_debug(1 << CXGBI_DBG_ISCSI, "cls_conn 0x%p, param %d, buf(%d) %s.\n", cls_conn, param, buflen, buf); switch (param) { case ISCSI_PARAM_HDRDGST_EN: err = iscsi_set_param(cls_conn, param, buf, buflen); if (!err && conn->hdrdgst_en) err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, conn->hdrdgst_en, conn->datadgst_en); break; case ISCSI_PARAM_DATADGST_EN: err = iscsi_set_param(cls_conn, param, buf, buflen); if (!err && conn->datadgst_en) err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, conn->hdrdgst_en, conn->datadgst_en); break; case ISCSI_PARAM_MAX_R2T: return iscsi_tcp_set_max_r2t(conn, buf); case ISCSI_PARAM_MAX_RECV_DLENGTH: err = iscsi_set_param(cls_conn, param, buf, buflen); if (!err) err = cxgbi_conn_max_recv_dlength(conn); break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: err = iscsi_set_param(cls_conn, param, buf, buflen); if (!err) err = cxgbi_conn_max_xmit_dlength(conn); break; default: return iscsi_set_param(cls_conn, param, buf, buflen); } return err; } EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf) { struct cxgbi_endpoint *cep = ep->dd_data; struct cxgbi_sock *csk; log_debug(1 << CXGBI_DBG_ISCSI, "cls_conn 0x%p, param %d.\n", ep, param); switch (param) { case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_CONN_ADDRESS: if (!cep) return -ENOTCONN; csk = cep->csk; if (!csk) return -ENOTCONN; return iscsi_conn_get_addr_param((struct sockaddr_storage *) &csk->daddr, param, buf); default: break; } return -ENOSYS; } EXPORT_SYMBOL_GPL(cxgbi_get_ep_param); struct iscsi_cls_conn * cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) { struct iscsi_cls_conn *cls_conn; struct iscsi_conn *conn; struct iscsi_tcp_conn *tcp_conn; struct cxgbi_conn *cconn; cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); if (!cls_conn) return NULL; conn = cls_conn->dd_data; tcp_conn = conn->dd_data; cconn = tcp_conn->dd_data; cconn->iconn = conn; log_debug(1 << CXGBI_DBG_ISCSI, "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n", cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn); return cls_conn; } EXPORT_SYMBOL_GPL(cxgbi_create_conn); int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, u64 transport_eph, int is_leading) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; struct cxgbi_ppm *ppm; struct iscsi_endpoint *ep; struct cxgbi_endpoint *cep; struct cxgbi_sock *csk; int err; ep = iscsi_lookup_endpoint(transport_eph); if (!ep) return -EINVAL; /* setup ddp pagesize */ cep = ep->dd_data; csk = cep->csk; ppm = csk->cdev->cdev2ppm(csk->cdev); err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, ppm->tformat.pgsz_idx_dflt); if (err < 0) goto put_ep; err = iscsi_conn_bind(cls_session, cls_conn, is_leading); if (err) { err = -EINVAL; goto put_ep; } /* calculate the tag idx bits needed for this conn based on cmds_max */ cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; write_lock_bh(&csk->callback_lock); csk->user_data = conn; cconn->chba = cep->chba; cconn->cep = cep; cep->cconn = cconn; write_unlock_bh(&csk->callback_lock); cxgbi_conn_max_xmit_dlength(conn); cxgbi_conn_max_recv_dlength(conn); log_debug(1 << CXGBI_DBG_ISCSI, "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", cls_session, cls_conn, ep, cconn, csk); /* init recv engine */ iscsi_tcp_hdr_recv_prep(tcp_conn); put_ep: iscsi_put_endpoint(ep); return err; } EXPORT_SYMBOL_GPL(cxgbi_bind_conn); struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth, u32 initial_cmdsn) { struct cxgbi_endpoint *cep; struct cxgbi_hba *chba; struct Scsi_Host *shost; struct iscsi_cls_session *cls_session; struct iscsi_session *session; if (!ep) { pr_err("missing endpoint.\n"); return NULL; } cep = ep->dd_data; chba = cep->chba; shost = chba->shost; BUG_ON(chba != iscsi_host_priv(shost)); cls_session = iscsi_session_setup(chba->cdev->itp, shost, cmds_max, 0, sizeof(struct iscsi_tcp_task) + sizeof(struct cxgbi_task_data), initial_cmdsn, ISCSI_MAX_TARGET); if (!cls_session) return NULL; session = cls_session->dd_data; if (iscsi_tcp_r2tpool_alloc(session)) goto remove_session; log_debug(1 << CXGBI_DBG_ISCSI, "ep 0x%p, cls sess 0x%p.\n", ep, cls_session); return cls_session; remove_session: iscsi_session_teardown(cls_session); return NULL; } EXPORT_SYMBOL_GPL(cxgbi_create_session); void cxgbi_destroy_session(struct iscsi_cls_session *cls_session) { log_debug(1 << CXGBI_DBG_ISCSI, "cls sess 0x%p.\n", cls_session); iscsi_tcp_r2tpool_free(cls_session->dd_data); iscsi_session_teardown(cls_session); } EXPORT_SYMBOL_GPL(cxgbi_destroy_session); int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf, int buflen) { struct cxgbi_hba *chba = iscsi_host_priv(shost); if (!chba->ndev) { shost_printk(KERN_ERR, shost, "Could not get host param. " "netdev for host not set.\n"); return -ENODEV; } log_debug(1 << CXGBI_DBG_ISCSI, "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n", shost, chba, chba->ndev->name, param, buflen, buf); switch (param) { case ISCSI_HOST_PARAM_IPADDRESS: { __be32 addr = in_aton(buf); log_debug(1 << CXGBI_DBG_ISCSI, "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr); cxgbi_set_iscsi_ipv4(chba, addr); return 0; } case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_NETDEV_NAME: return 0; default: return iscsi_host_set_param(shost, param, buf, buflen); } } EXPORT_SYMBOL_GPL(cxgbi_set_host_param); int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct cxgbi_hba *chba = iscsi_host_priv(shost); int len = 0; if (!chba->ndev) { shost_printk(KERN_ERR, shost, "Could not get host param. " "netdev for host not set.\n"); return -ENODEV; } log_debug(1 << CXGBI_DBG_ISCSI, "shost 0x%p, hba 0x%p,%s, param %d.\n", shost, chba, chba->ndev->name, param); switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6); break; case ISCSI_HOST_PARAM_NETDEV_NAME: len = sprintf(buf, "%s\n", chba->ndev->name); break; case ISCSI_HOST_PARAM_IPADDRESS: { struct cxgbi_sock *csk = find_sock_on_port(chba->cdev, chba->port_id); if (csk) { len = sprintf(buf, "%pIS", (struct sockaddr *)&csk->saddr); } log_debug(1 << CXGBI_DBG_ISCSI, "hba %s, addr %s.\n", chba->ndev->name, buf); break; } default: return iscsi_host_get_param(shost, param, buf); } return len; } EXPORT_SYMBOL_GPL(cxgbi_get_host_param); struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) { struct iscsi_endpoint *ep; struct cxgbi_endpoint *cep; struct cxgbi_hba *hba = NULL; struct cxgbi_sock *csk; int ifindex = 0; int err = -EINVAL; log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n", shost, non_blocking, dst_addr); if (shost) { hba = iscsi_host_priv(shost); if (!hba) { pr_info("shost 0x%p, priv NULL.\n", shost); goto err_out; } } check_route: if (dst_addr->sa_family == AF_INET) { csk = cxgbi_check_route(dst_addr, ifindex); #if IS_ENABLED(CONFIG_IPV6) } else if (dst_addr->sa_family == AF_INET6) { csk = cxgbi_check_route6(dst_addr, ifindex); #endif } else { pr_info("address family 0x%x NOT supported.\n", dst_addr->sa_family); err = -EAFNOSUPPORT; return (struct iscsi_endpoint *)ERR_PTR(err); } if (IS_ERR(csk)) return (struct iscsi_endpoint *)csk; cxgbi_sock_get(csk); if (!hba) hba = csk->cdev->hbas[csk->port_id]; else if (hba != csk->cdev->hbas[csk->port_id]) { if (ifindex != hba->ndev->ifindex) { cxgbi_sock_put(csk); cxgbi_sock_closed(csk); ifindex = hba->ndev->ifindex; goto check_route; } pr_info("Could not connect through requested host %u" "hba 0x%p != 0x%p (%u).\n", shost->host_no, hba, csk->cdev->hbas[csk->port_id], csk->port_id); err = -ENOSPC; goto release_conn; } err = sock_get_port(csk); if (err) goto release_conn; cxgbi_sock_set_state(csk, CTP_CONNECTING); err = csk->cdev->csk_init_act_open(csk); if (err) goto release_conn; if (cxgbi_sock_is_closing(csk)) { err = -ENOSPC; pr_info("csk 0x%p is closing.\n", csk); goto release_conn; } ep = iscsi_create_endpoint(sizeof(*cep)); if (!ep) { err = -ENOMEM; pr_info("iscsi alloc ep, OOM.\n"); goto release_conn; } cep = ep->dd_data; cep->csk = csk; cep->chba = hba; log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n", ep, cep, csk, hba, hba->ndev->name); return ep; release_conn: cxgbi_sock_put(csk); cxgbi_sock_closed(csk); err_out: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(cxgbi_ep_connect); int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { struct cxgbi_endpoint *cep = ep->dd_data; struct cxgbi_sock *csk = cep->csk; if (!cxgbi_sock_is_established(csk)) return 0; return 1; } EXPORT_SYMBOL_GPL(cxgbi_ep_poll); void cxgbi_ep_disconnect(struct iscsi_endpoint *ep) { struct cxgbi_endpoint *cep = ep->dd_data; struct cxgbi_conn *cconn = cep->cconn; struct cxgbi_sock *csk = cep->csk; log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n", ep, cep, cconn, csk, csk->state, csk->flags); if (cconn && cconn->iconn) { write_lock_bh(&csk->callback_lock); cep->csk->user_data = NULL; cconn->cep = NULL; write_unlock_bh(&csk->callback_lock); } iscsi_destroy_endpoint(ep); if (likely(csk->state >= CTP_ESTABLISHED)) need_active_close(csk); else cxgbi_sock_closed(csk); cxgbi_sock_put(csk); } EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect); int cxgbi_iscsi_init(struct iscsi_transport *itp, struct scsi_transport_template **stt) { *stt = iscsi_register_transport(itp); if (*stt == NULL) { pr_err("unable to register %s transport 0x%p.\n", itp->name, itp); return -ENODEV; } log_debug(1 << CXGBI_DBG_ISCSI, "%s, registered iscsi transport 0x%p.\n", itp->name, stt); return 0; } EXPORT_SYMBOL_GPL(cxgbi_iscsi_init); void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, struct scsi_transport_template **stt) { if (*stt) { log_debug(1 << CXGBI_DBG_ISCSI, "de-register transport 0x%p, %s, stt 0x%p.\n", itp, itp->name, *stt); *stt = NULL; iscsi_unregister_transport(itp); } } EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); umode_t cxgbi_attr_is_visible(int param_type, int param) { switch (param_type) { case ISCSI_HOST_PARAM: switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: case ISCSI_HOST_PARAM_INITIATOR_NAME: return S_IRUGO; default: return 0; } case ISCSI_PARAM: switch (param) { case ISCSI_PARAM_MAX_RECV_DLENGTH: case ISCSI_PARAM_MAX_XMIT_DLENGTH: case ISCSI_PARAM_HDRDGST_EN: case ISCSI_PARAM_DATADGST_EN: case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_EXP_STATSN: case ISCSI_PARAM_PERSISTENT_ADDRESS: case ISCSI_PARAM_PERSISTENT_PORT: case ISCSI_PARAM_PING_TMO: case ISCSI_PARAM_RECV_TMO: case ISCSI_PARAM_INITIAL_R2T_EN: case ISCSI_PARAM_MAX_R2T: case ISCSI_PARAM_IMM_DATA_EN: case ISCSI_PARAM_FIRST_BURST: case ISCSI_PARAM_MAX_BURST: case ISCSI_PARAM_PDU_INORDER_EN: case ISCSI_PARAM_DATASEQ_INORDER_EN: case ISCSI_PARAM_ERL: case ISCSI_PARAM_TARGET_NAME: case ISCSI_PARAM_TPGT: case ISCSI_PARAM_USERNAME: case ISCSI_PARAM_PASSWORD: case ISCSI_PARAM_USERNAME_IN: case ISCSI_PARAM_PASSWORD_IN: case ISCSI_PARAM_FAST_ABORT: case ISCSI_PARAM_ABORT_TMO: case ISCSI_PARAM_LU_RESET_TMO: case ISCSI_PARAM_TGT_RESET_TMO: case ISCSI_PARAM_IFACE_NAME: case ISCSI_PARAM_INITIATOR_NAME: return S_IRUGO; default: return 0; } } return 0; } EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); static int __init libcxgbi_init_module(void) { pr_info("%s", version); BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) < sizeof(struct cxgbi_skb_cb)); rsvd_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!rsvd_page) return -ENOMEM; return 0; } static void __exit libcxgbi_exit_module(void) { cxgbi_device_unregister_all(0xFF); put_page(rsvd_page); return; } module_init(libcxgbi_init_module); module_exit(libcxgbi_exit_module);
linux-master
drivers/scsi/cxgbi/libcxgbi.c
/* * cxgb4i.c: Chelsio T4 iSCSI driver. * * Copyright (c) 2010-2015 Chelsio Communications, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Karen Xie ([email protected]) * Rakesh Ranjan ([email protected]) */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <scsi/scsi_host.h> #include <net/tcp.h> #include <net/dst.h> #include <linux/netdevice.h> #include <net/addrconf.h> #include "t4_regs.h" #include "t4_msg.h" #include "cxgb4.h" #include "cxgb4_uld.h" #include "t4fw_api.h" #include "l2t.h" #include "cxgb4i.h" #include "clip_tbl.h" static unsigned int dbg_level; #include "../libcxgbi.h" #ifdef CONFIG_CHELSIO_T4_DCB #include <net/dcbevent.h> #include "cxgb4_dcb.h" #endif #define DRV_MODULE_NAME "cxgb4i" #define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver" #define DRV_MODULE_VERSION "0.9.5-ko" #define DRV_MODULE_RELDATE "Apr. 2015" static char version[] = DRV_MODULE_DESC " " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Chelsio Communications, Inc."); MODULE_DESCRIPTION(DRV_MODULE_DESC); MODULE_VERSION(DRV_MODULE_VERSION); MODULE_LICENSE("GPL"); module_param(dbg_level, uint, 0644); MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024) static int cxgb4i_rcv_win = -1; module_param(cxgb4i_rcv_win, int, 0644); MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP receive window in bytes"); #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024) static int cxgb4i_snd_win = -1; module_param(cxgb4i_snd_win, int, 0644); MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); static int cxgb4i_rx_credit_thres = 10 * 1024; module_param(cxgb4i_rx_credit_thres, int, 0644); MODULE_PARM_DESC(cxgb4i_rx_credit_thres, "RX credits return threshold in bytes (default=10KB)"); static unsigned int cxgb4i_max_connect = (8 * 1024); module_param(cxgb4i_max_connect, uint, 0644); MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections"); static unsigned short cxgb4i_sport_base = 20000; module_param(cxgb4i_sport_base, ushort, 0644); MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)"); typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *); static void *t4_uld_add(const struct cxgb4_lld_info *); static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); static int t4_uld_state_change(void *, enum cxgb4_state state); static inline int send_tx_flowc_wr(struct cxgbi_sock *); static const struct cxgb4_uld_info cxgb4i_uld_info = { .name = DRV_MODULE_NAME, .nrxq = MAX_ULD_QSETS, .ntxq = MAX_ULD_QSETS, .rxq_size = 1024, .lro = false, .add = t4_uld_add, .rx_handler = t4_uld_rx_handler, .state_change = t4_uld_state_change, }; static struct scsi_host_template cxgb4i_host_template = { .module = THIS_MODULE, .name = DRV_MODULE_NAME, .proc_name = DRV_MODULE_NAME, .can_queue = CXGB4I_SCSI_HOST_QDEPTH, .queuecommand = iscsi_queuecommand, .change_queue_depth = scsi_change_queue_depth, .sg_tablesize = SG_ALL, .max_sectors = 0xFFFF, .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .target_alloc = iscsi_target_alloc, .dma_boundary = PAGE_SIZE - 1, .this_id = -1, .track_queue_depth = 1, .cmd_size = sizeof(struct iscsi_cmd), }; static struct iscsi_transport cxgb4i_iscsi_transport = { .owner = THIS_MODULE, .name = DRV_MODULE_NAME, .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST | CAP_DIGEST_OFFLOAD | CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, .attr_is_visible = cxgbi_attr_is_visible, .get_host_param = cxgbi_get_host_param, .set_host_param = cxgbi_set_host_param, /* session management */ .create_session = cxgbi_create_session, .destroy_session = cxgbi_destroy_session, .get_session_param = iscsi_session_get_param, /* connection management */ .create_conn = cxgbi_create_conn, .bind_conn = cxgbi_bind_conn, .unbind_conn = iscsi_conn_unbind, .destroy_conn = iscsi_tcp_conn_teardown, .start_conn = iscsi_conn_start, .stop_conn = iscsi_conn_stop, .get_conn_param = iscsi_conn_get_param, .set_param = cxgbi_set_conn_param, .get_stats = cxgbi_get_conn_stats, /* pdu xmit req from user space */ .send_pdu = iscsi_conn_send_pdu, /* task */ .init_task = iscsi_tcp_task_init, .xmit_task = iscsi_tcp_task_xmit, .cleanup_task = cxgbi_cleanup_task, /* pdu */ .alloc_pdu = cxgbi_conn_alloc_pdu, .init_pdu = cxgbi_conn_init_pdu, .xmit_pdu = cxgbi_conn_xmit_pdu, .parse_pdu_itt = cxgbi_parse_pdu_itt, /* TCP connect/disconnect */ .get_ep_param = cxgbi_get_ep_param, .ep_connect = cxgbi_ep_connect, .ep_poll = cxgbi_ep_poll, .ep_disconnect = cxgbi_ep_disconnect, /* Error recovery timeout call */ .session_recovery_timedout = iscsi_session_recovery_timedout, }; #ifdef CONFIG_CHELSIO_T4_DCB static int cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *); static struct notifier_block cxgb4_dcb_change = { .notifier_call = cxgb4_dcb_change_notify, }; #endif static struct scsi_transport_template *cxgb4i_stt; /* * CPL (Chelsio Protocol Language) defines a message passing interface between * the host driver and Chelsio asic. * The section below implments CPLs that related to iscsi tcp connection * open/close/abort and data send/receive. */ #define RCV_BUFSIZ_MASK 0x3FFU #define MAX_IMM_TX_PKT_LEN 256 static int push_tx_frames(struct cxgbi_sock *, int); /* * is_ofld_imm - check whether a packet can be sent as immediate data * @skb: the packet * * Returns true if a packet can be sent as an offload WR with immediate * data. We currently use the same limit as for Ethernet packets. */ static inline bool is_ofld_imm(const struct sk_buff *skb) { int len = skb->len; if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) len += sizeof(struct fw_ofld_tx_data_wr); if (likely(cxgbi_skcb_test_flag((struct sk_buff *)skb, SKCBF_TX_ISO))) len += sizeof(struct cpl_tx_data_iso); return (len <= MAX_IMM_OFLD_TX_DATA_WR_LEN); } static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, struct l2t_entry *e) { struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); unsigned long long opt0; unsigned int opt2; unsigned int qid_atid = ((unsigned int)csk->atid) | (((unsigned int)csk->rss_qid) << 14); opt0 = KEEP_ALIVE_F | WND_SCALE_V(wscale) | MSS_IDX_V(csk->mss_idx) | L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | TX_CHAN_V(csk->tx_chan) | SMAC_SEL_V(csk->smac_idx) | ULP_MODE_V(ULP_MODE_ISCSI) | RCV_BUFSIZ_V(csk->rcv_win >> 10); opt2 = RX_CHANNEL_V(0) | RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); if (is_t4(lldi->adapter_type)) { struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; INIT_TP_WR(req, 0); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid)); req->local_port = csk->saddr.sin_port; req->peer_port = csk->daddr.sin_port; req->local_ip = csk->saddr.sin_addr.s_addr; req->peer_ip = csk->daddr.sin_addr.s_addr; req->opt0 = cpu_to_be64(opt0); req->params = cpu_to_be32(cxgb4_select_ntuple( csk->cdev->ports[csk->port_id], csk->l2t)); opt2 |= RX_FC_VALID_F; req->opt2 = cpu_to_be32(opt2); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", csk, &req->local_ip, ntohs(req->local_port), &req->peer_ip, ntohs(req->peer_port), csk->atid, csk->rss_qid); } else if (is_t5(lldi->adapter_type)) { struct cpl_t5_act_open_req *req = (struct cpl_t5_act_open_req *)skb->head; u32 isn = (get_random_u32() & ~7UL) - 1; INIT_TP_WR(req, 0); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid)); req->local_port = csk->saddr.sin_port; req->peer_port = csk->daddr.sin_port; req->local_ip = csk->saddr.sin_addr.s_addr; req->peer_ip = csk->daddr.sin_addr.s_addr; req->opt0 = cpu_to_be64(opt0); req->params = cpu_to_be64(FILTER_TUPLE_V( cxgb4_select_ntuple( csk->cdev->ports[csk->port_id], csk->l2t))); req->rsvd = cpu_to_be32(isn); opt2 |= T5_ISS_VALID; opt2 |= T5_OPT_2_VALID_F; req->opt2 = cpu_to_be32(opt2); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", csk, &req->local_ip, ntohs(req->local_port), &req->peer_ip, ntohs(req->peer_port), csk->atid, csk->rss_qid); } else { struct cpl_t6_act_open_req *req = (struct cpl_t6_act_open_req *)skb->head; u32 isn = (get_random_u32() & ~7UL) - 1; INIT_TP_WR(req, 0); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid)); req->local_port = csk->saddr.sin_port; req->peer_port = csk->daddr.sin_port; req->local_ip = csk->saddr.sin_addr.s_addr; req->peer_ip = csk->daddr.sin_addr.s_addr; req->opt0 = cpu_to_be64(opt0); req->params = cpu_to_be64(FILTER_TUPLE_V( cxgb4_select_ntuple( csk->cdev->ports[csk->port_id], csk->l2t))); req->rsvd = cpu_to_be32(isn); opt2 |= T5_ISS_VALID; opt2 |= RX_FC_DISABLE_F; opt2 |= T5_OPT_2_VALID_F; req->opt2 = cpu_to_be32(opt2); req->rsvd2 = cpu_to_be32(0); req->opt3 = cpu_to_be32(0); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", csk, &req->local_ip, ntohs(req->local_port), &req->peer_ip, ntohs(req->peer_port), csk->atid, csk->rss_qid); } set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", (&csk->saddr), (&csk->daddr), CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state, csk->flags, csk->atid, csk->rss_qid); cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); } #if IS_ENABLED(CONFIG_IPV6) static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, struct l2t_entry *e) { struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); unsigned long long opt0; unsigned int opt2; unsigned int qid_atid = ((unsigned int)csk->atid) | (((unsigned int)csk->rss_qid) << 14); opt0 = KEEP_ALIVE_F | WND_SCALE_V(wscale) | MSS_IDX_V(csk->mss_idx) | L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | TX_CHAN_V(csk->tx_chan) | SMAC_SEL_V(csk->smac_idx) | ULP_MODE_V(ULP_MODE_ISCSI) | RCV_BUFSIZ_V(csk->rcv_win >> 10); opt2 = RX_CHANNEL_V(0) | RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); if (is_t4(lldi->adapter_type)) { struct cpl_act_open_req6 *req = (struct cpl_act_open_req6 *)skb->head; INIT_TP_WR(req, 0); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid)); req->local_port = csk->saddr6.sin6_port; req->peer_port = csk->daddr6.sin6_port; req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 8); req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 8); req->opt0 = cpu_to_be64(opt0); opt2 |= RX_FC_VALID_F; req->opt2 = cpu_to_be32(opt2); req->params = cpu_to_be32(cxgb4_select_ntuple( csk->cdev->ports[csk->port_id], csk->l2t)); } else if (is_t5(lldi->adapter_type)) { struct cpl_t5_act_open_req6 *req = (struct cpl_t5_act_open_req6 *)skb->head; INIT_TP_WR(req, 0); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid)); req->local_port = csk->saddr6.sin6_port; req->peer_port = csk->daddr6.sin6_port; req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 8); req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 8); req->opt0 = cpu_to_be64(opt0); opt2 |= T5_OPT_2_VALID_F; req->opt2 = cpu_to_be32(opt2); req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( csk->cdev->ports[csk->port_id], csk->l2t))); } else { struct cpl_t6_act_open_req6 *req = (struct cpl_t6_act_open_req6 *)skb->head; INIT_TP_WR(req, 0); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid)); req->local_port = csk->saddr6.sin6_port; req->peer_port = csk->daddr6.sin6_port; req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 8); req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 8); req->opt0 = cpu_to_be64(opt0); opt2 |= RX_FC_DISABLE_F; opt2 |= T5_OPT_2_VALID_F; req->opt2 = cpu_to_be32(opt2); req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( csk->cdev->ports[csk->port_id], csk->l2t))); req->rsvd2 = cpu_to_be32(0); req->opt3 = cpu_to_be32(0); } set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state, csk->flags, csk->atid, &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), csk->rss_qid); cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); } #endif static void send_close_req(struct cxgbi_sock *csk) { struct sk_buff *skb = csk->cpl_close; struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; unsigned int tid = csk->tid; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx, tid %u.\n", csk, csk->state, csk->flags, csk->tid); csk->cpl_close = NULL; set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); INIT_TP_WR(req, tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); req->rsvd = 0; cxgbi_sock_skb_entail(csk, skb); if (csk->state >= CTP_ESTABLISHED) push_tx_frames(csk, 1); } static void abort_arp_failure(void *handle, struct sk_buff *skb) { struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; struct cpl_abort_req *req; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx, tid %u, abort.\n", csk, csk->state, csk->flags, csk->tid); req = (struct cpl_abort_req *)skb->data; req->cmd = CPL_ABORT_NO_RST; cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); } static void send_abort_req(struct cxgbi_sock *csk) { struct cpl_abort_req *req; struct sk_buff *skb = csk->cpl_abort_req; if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) return; if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { send_tx_flowc_wr(csk); cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); } cxgbi_sock_set_state(csk, CTP_ABORTING); cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); cxgbi_sock_purge_write_queue(csk); csk->cpl_abort_req = NULL; req = (struct cpl_abort_req *)skb->head; set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); req->cmd = CPL_ABORT_SEND_RST; t4_set_arp_err_handler(skb, csk, abort_arp_failure); INIT_TP_WR(req, csk->tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); req->rsvd0 = htonl(csk->snd_nxt); req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, req->rsvd1); cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); } static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) { struct sk_buff *skb = csk->cpl_abort_rpl; struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, status %d.\n", csk, csk->state, csk->flags, csk->tid, rst_status); csk->cpl_abort_rpl = NULL; set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); INIT_TP_WR(rpl, csk->tid); OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); rpl->cmd = rst_status; cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); } /* * CPL connection rx data ack: host -> * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of * credits sent. */ static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) { struct sk_buff *skb; struct cpl_rx_data_ack *req; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx,%u, credit %u.\n", csk, csk->state, csk->flags, csk->tid, credits); skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); if (!skb) { pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); return 0; } req = (struct cpl_rx_data_ack *)skb->head; set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); INIT_TP_WR(req, csk->tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid)); req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) | RX_FORCE_ACK_F); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); return credits; } /* * sgl_len - calculates the size of an SGL of the given capacity * @n: the number of SGL entries * Calculates the number of flits needed for a scatter/gather list that * can hold the given number of entries. */ static inline unsigned int sgl_len(unsigned int n) { n--; return (3 * n) / 2 + (n & 1) + 2; } /* * calc_tx_flits_ofld - calculate # of flits for an offload packet * @skb: the packet * * Returns the number of flits needed for the given offload packet. * These packets are already fully constructed and no additional headers * will be added. */ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) { unsigned int flits, cnt; if (is_ofld_imm(skb)) return DIV_ROUND_UP(skb->len, 8); flits = skb_transport_offset(skb) / 8; cnt = skb_shinfo(skb)->nr_frags; if (skb_tail_pointer(skb) != skb_transport_header(skb)) cnt++; return flits + sgl_len(cnt); } #define FLOWC_WR_NPARAMS_MIN 9 static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp) { int nparams, flowclen16, flowclen; nparams = FLOWC_WR_NPARAMS_MIN; #ifdef CONFIG_CHELSIO_T4_DCB nparams++; #endif flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); flowclen16 = DIV_ROUND_UP(flowclen, 16); flowclen = flowclen16 * 16; /* * Return the number of 16-byte credits used by the FlowC request. * Pass back the nparams and actual FlowC length if requested. */ if (nparamsp) *nparamsp = nparams; if (flowclenp) *flowclenp = flowclen; return flowclen16; } static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) { struct sk_buff *skb; struct fw_flowc_wr *flowc; int nparams, flowclen16, flowclen; #ifdef CONFIG_CHELSIO_T4_DCB u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; #endif flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen); skb = alloc_wr(flowclen, 0, GFP_ATOMIC); flowc = (struct fw_flowc_wr *)skb->head; flowc->op_to_nparams = htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); flowc->flowid_len16 = htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; flowc->mnemval[0].val = htonl(csk->cdev->pfvf); flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; flowc->mnemval[1].val = htonl(csk->tx_chan); flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; flowc->mnemval[2].val = htonl(csk->tx_chan); flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; flowc->mnemval[3].val = htonl(csk->rss_qid); flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; flowc->mnemval[4].val = htonl(csk->snd_nxt); flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; flowc->mnemval[5].val = htonl(csk->rcv_nxt); flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; flowc->mnemval[6].val = htonl(csk->snd_win); flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; flowc->mnemval[7].val = htonl(csk->advmss); flowc->mnemval[8].mnemonic = 0; flowc->mnemval[8].val = 0; flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; if (csk->cdev->skb_iso_txhdr) flowc->mnemval[8].val = cpu_to_be32(CXGBI_MAX_ISO_DATA_IN_SKB); else flowc->mnemval[8].val = cpu_to_be32(16128); #ifdef CONFIG_CHELSIO_T4_DCB flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO; if (vlan == CPL_L2T_VLAN_NONE) { pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n", csk->tid); flowc->mnemval[9].val = cpu_to_be32(0); } else { flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT); } #endif set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, csk->snd_nxt, csk->rcv_nxt, csk->snd_win, csk->advmss); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); return flowclen16; } static void cxgb4i_make_tx_iso_cpl(struct sk_buff *skb, struct cpl_tx_data_iso *cpl) { struct cxgbi_iso_info *info = (struct cxgbi_iso_info *)skb->head; u32 imm_en = !!(info->flags & CXGBI_ISO_INFO_IMM_ENABLE); u32 fslice = !!(info->flags & CXGBI_ISO_INFO_FSLICE); u32 lslice = !!(info->flags & CXGBI_ISO_INFO_LSLICE); u32 pdu_type = (info->op == ISCSI_OP_SCSI_CMD) ? 0 : 1; u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3; cpl->op_to_scsi = cpu_to_be32(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) | CPL_TX_DATA_ISO_FIRST_V(fslice) | CPL_TX_DATA_ISO_LAST_V(lslice) | CPL_TX_DATA_ISO_CPLHDRLEN_V(0) | CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) | CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) | CPL_TX_DATA_ISO_IMMEDIATE_V(imm_en) | CPL_TX_DATA_ISO_SCSI_V(pdu_type)); cpl->ahs_len = info->ahs; cpl->mpdu = cpu_to_be16(DIV_ROUND_UP(info->mpdu, 4)); cpl->burst_size = cpu_to_be32(info->burst_size); cpl->len = cpu_to_be32(info->len); cpl->reserved2_seglen_offset = cpu_to_be32(CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(info->segment_offset)); cpl->datasn_offset = cpu_to_be32(info->datasn_offset); cpl->buffer_offset = cpu_to_be32(info->buffer_offset); cpl->reserved3 = cpu_to_be32(0); log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, "iso: flags 0x%x, op %u, ahs %u, num_pdu %u, mpdu %u, " "burst_size %u, iso_len %u\n", info->flags, info->op, info->ahs, info->num_pdu, info->mpdu, info->burst_size << 2, info->len); } static void cxgb4i_make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, int dlen, int len, u32 credits, int compl) { struct cxgbi_device *cdev = csk->cdev; struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct fw_ofld_tx_data_wr *req; struct cpl_tx_data_iso *cpl; u32 submode = cxgbi_skcb_tx_ulp_mode(skb) & 0x3; u32 wr_ulp_mode = 0; u32 hdr_size = sizeof(*req); u32 opcode = FW_OFLD_TX_DATA_WR; u32 immlen = 0; u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) : T6_TX_FORCE_F; if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) { hdr_size += sizeof(struct cpl_tx_data_iso); opcode = FW_ISCSI_TX_DATA_WR; immlen += sizeof(struct cpl_tx_data_iso); submode |= 8; } if (is_ofld_imm(skb)) immlen += dlen; req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, hdr_size); req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) | FW_WR_COMPL_V(compl) | FW_WR_IMMDLEN_V(immlen)); req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | FW_WR_LEN16_V(credits)); req->plen = cpu_to_be32(len); cpl = (struct cpl_tx_data_iso *)(req + 1); if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) cxgb4i_make_tx_iso_cpl(skb, cpl); if (submode) wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) | FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); req->tunnel_to_proxy = cpu_to_be32(wr_ulp_mode | force | FW_OFLD_TX_DATA_WR_SHOVE_V(1U)); if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); } static void arp_failure_skb_discard(void *handle, struct sk_buff *skb) { kfree_skb(skb); } static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) { int total_size = 0; struct sk_buff *skb; if (unlikely(csk->state < CTP_ESTABLISHED || csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK | 1 << CXGBI_DBG_PDU_TX, "csk 0x%p,%u,0x%lx,%u, in closing state.\n", csk, csk->state, csk->flags, csk->tid); return 0; } while (csk->wr_cred && ((skb = skb_peek(&csk->write_queue)) != NULL)) { struct cxgbi_iso_info *iso_cpl; u32 dlen = skb->len; u32 len = skb->len; u32 iso_cpl_len = 0; u32 flowclen16 = 0; u32 credits_needed; u32 num_pdu = 1, hdr_len; if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) iso_cpl_len = sizeof(struct cpl_tx_data_iso); if (is_ofld_imm(skb)) credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16); else credits_needed = DIV_ROUND_UP((8 * calc_tx_flits_ofld(skb)) + iso_cpl_len, 16); if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) credits_needed += DIV_ROUND_UP(sizeof(struct fw_ofld_tx_data_wr), 16); /* * Assumes the initial credits is large enough to support * fw_flowc_wr plus largest possible first payload */ if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { flowclen16 = send_tx_flowc_wr(csk); csk->wr_cred -= flowclen16; csk->wr_una_cred += flowclen16; cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); } if (csk->wr_cred < credits_needed) { log_debug(1 << CXGBI_DBG_PDU_TX, "csk 0x%p, skb %u/%u, wr %d < %u.\n", csk, skb->len, skb->data_len, credits_needed, csk->wr_cred); csk->no_tx_credits++; break; } csk->no_tx_credits = 0; __skb_unlink(skb, &csk->write_queue); set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); skb->csum = (__force __wsum)(credits_needed + flowclen16); csk->wr_cred -= credits_needed; csk->wr_una_cred += credits_needed; cxgbi_sock_enqueue_wr(csk, skb); log_debug(1 << CXGBI_DBG_PDU_TX, "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", csk, skb->len, skb->data_len, credits_needed, csk->wr_cred, csk->wr_una_cred); if (!req_completion && ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) || after(csk->write_seq, (csk->snd_una + csk->snd_win / 2)))) req_completion = 1; if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { u32 ulp_mode = cxgbi_skcb_tx_ulp_mode(skb); if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)) { iso_cpl = (struct cxgbi_iso_info *)skb->head; num_pdu = iso_cpl->num_pdu; hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb); len += (cxgbi_ulp_extra_len(ulp_mode) * num_pdu) + (hdr_len * (num_pdu - 1)); } else { len += cxgbi_ulp_extra_len(ulp_mode); } cxgb4i_make_tx_data_wr(csk, skb, dlen, len, credits_needed, req_completion); csk->snd_nxt += len; cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) && (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->data; req->wr.wr_hi |= cpu_to_be32(FW_WR_COMPL_F); } total_size += skb->truesize; t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n", csk, csk->state, csk->flags, csk->tid, skb, len); cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); } return total_size; } static inline void free_atid(struct cxgbi_sock *csk) { struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { cxgb4_free_atid(lldi->tids, csk->atid); cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); cxgbi_sock_put(csk); } } static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; unsigned short tcp_opt = ntohs(req->tcp_opt); unsigned int tid = GET_TID(req); unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; u32 rcv_isn = be32_to_cpu(req->rcv_isn); csk = lookup_atid(t, atid); if (unlikely(!csk)) { pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev); goto rel_skb; } if (csk->atid != atid) { pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n", atid, csk, csk->state, csk->flags, csk->tid, csk->atid); goto rel_skb; } pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", (&csk->saddr), (&csk->daddr), atid, tid, csk, csk->state, csk->flags, rcv_isn); module_put(cdev->owner); cxgbi_sock_get(csk); csk->tid = tid; cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family); cxgbi_sock_set_flag(csk, CTPF_HAS_TID); free_atid(csk); spin_lock_bh(&csk->lock); if (unlikely(csk->state != CTP_ACTIVE_OPEN)) pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", csk, csk->state, csk->flags, csk->tid); if (csk->retry_timer.function) { del_timer(&csk->retry_timer); csk->retry_timer.function = NULL; } csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; /* * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't * pass through opt0. */ if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10)) csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10); csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; if (TCPOPT_TSTAMP_G(tcp_opt)) csk->advmss -= 12; if (csk->advmss < 128) csk->advmss = 128; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p, mss_idx %u, advmss %u.\n", csk, TCPOPT_MSS_G(tcp_opt), csk->advmss); cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) send_abort_req(csk); else { if (skb_queue_len(&csk->write_queue)) push_tx_frames(csk, 0); cxgbi_conn_tx_open(csk); } spin_unlock_bh(&csk->lock); rel_skb: __kfree_skb(skb); } static int act_open_rpl_status_to_errno(int status) { switch (status) { case CPL_ERR_CONN_RESET: return -ECONNREFUSED; case CPL_ERR_ARP_MISS: return -EHOSTUNREACH; case CPL_ERR_CONN_TIMEDOUT: return -ETIMEDOUT; case CPL_ERR_TCAM_FULL: return -ENOMEM; case CPL_ERR_CONN_EXIST: return -EADDRINUSE; default: return -EIO; } } static void csk_act_open_retry_timer(struct timer_list *t) { struct sk_buff *skb = NULL; struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, struct l2t_entry *); int t4 = is_t4(lldi->adapter_type), size, size6; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, csk->state, csk->flags, csk->tid); cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); if (t4) { size = sizeof(struct cpl_act_open_req); size6 = sizeof(struct cpl_act_open_req6); } else { size = sizeof(struct cpl_t5_act_open_req); size6 = sizeof(struct cpl_t5_act_open_req6); } if (csk->csk_family == AF_INET) { send_act_open_func = send_act_open_req; skb = alloc_wr(size, 0, GFP_ATOMIC); #if IS_ENABLED(CONFIG_IPV6) } else { send_act_open_func = send_act_open_req6; skb = alloc_wr(size6, 0, GFP_ATOMIC); #endif } if (!skb) cxgbi_sock_fail_act_open(csk, -ENOMEM); else { skb->sk = (struct sock *)csk; t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); send_act_open_func(csk, skb, csk->l2t); } spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); } static inline bool is_neg_adv(unsigned int status) { return status == CPL_ERR_RTX_NEG_ADVICE || status == CPL_ERR_KEEPALV_NEG_ADVICE || status == CPL_ERR_PERSIST_NEG_ADVICE; } static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; unsigned int tid = GET_TID(rpl); unsigned int atid = TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status))); unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status)); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; csk = lookup_atid(t, atid); if (unlikely(!csk)) { pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid); goto rel_skb; } pr_info_ipaddr("tid %u/%u, status %u.\n" "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), atid, tid, status, csk, csk->state, csk->flags); if (is_neg_adv(status)) goto rel_skb; module_put(cdev->owner); if (status && status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && status != CPL_ERR_ARP_MISS) cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl), csk->csk_family); cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); if (status == CPL_ERR_CONN_EXIST && csk->retry_timer.function != csk_act_open_retry_timer) { csk->retry_timer.function = csk_act_open_retry_timer; mod_timer(&csk->retry_timer, jiffies + HZ / 2); } else cxgbi_sock_fail_act_open(csk, act_open_rpl_status_to_errno(status)); spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); rel_skb: __kfree_skb(skb); } static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data; unsigned int tid = GET_TID(req); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; csk = lookup_tid(t, tid); if (unlikely(!csk)) { pr_err("can't find connection for tid %u.\n", tid); goto rel_skb; } pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", (&csk->saddr), (&csk->daddr), csk, csk->state, csk->flags, csk->tid); cxgbi_sock_rcv_peer_close(csk); rel_skb: __kfree_skb(skb); } static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data; unsigned int tid = GET_TID(rpl); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; csk = lookup_tid(t, tid); if (unlikely(!csk)) { pr_err("can't find connection for tid %u.\n", tid); goto rel_skb; } pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", (&csk->saddr), (&csk->daddr), csk, csk->state, csk->flags, csk->tid); cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); rel_skb: __kfree_skb(skb); } static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, int *need_rst) { switch (abort_reason) { case CPL_ERR_BAD_SYN: case CPL_ERR_CONN_RESET: return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET; case CPL_ERR_XMIT_TIMEDOUT: case CPL_ERR_PERSIST_TIMEDOUT: case CPL_ERR_FINWAIT2_TIMEDOUT: case CPL_ERR_KEEPALIVE_TIMEDOUT: return -ETIMEDOUT; default: return -EIO; } } static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data; unsigned int tid = GET_TID(req); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; int rst_status = CPL_ABORT_NO_RST; csk = lookup_tid(t, tid); if (unlikely(!csk)) { pr_err("can't find connection for tid %u.\n", tid); goto rel_skb; } pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", (&csk->saddr), (&csk->daddr), csk, csk->state, csk->flags, csk->tid, req->status); if (is_neg_adv(req->status)) goto rel_skb; cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { send_tx_flowc_wr(csk); cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); } cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); cxgbi_sock_set_state(csk, CTP_ABORTING); send_abort_rpl(csk, rst_status); if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { csk->err = abort_status_to_errno(csk, req->status, &rst_status); cxgbi_sock_closed(csk); } spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); rel_skb: __kfree_skb(skb); } static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data; unsigned int tid = GET_TID(rpl); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; csk = lookup_tid(t, tid); if (!csk) goto rel_skb; pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", (&csk->saddr), (&csk->daddr), csk, csk->state, csk->flags, csk->tid, rpl->status); if (rpl->status == CPL_ERR_ABORT_FAILED) goto rel_skb; cxgbi_sock_rcv_abort_rpl(csk); rel_skb: __kfree_skb(skb); } static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data; unsigned int tid = GET_TID(cpl); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; csk = lookup_tid(t, tid); if (!csk) { pr_err("can't find connection for tid %u.\n", tid); } else { /* not expecting this, reset the connection. */ pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid); spin_lock_bh(&csk->lock); send_abort_req(csk); spin_unlock_bh(&csk->lock); } __kfree_skb(skb); } static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); unsigned int tid = GET_TID(cpl); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; csk = lookup_tid(t, tid); if (unlikely(!csk)) { pr_err("can't find conn. for tid %u.\n", tid); goto rel_skb; } log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", csk, csk->state, csk->flags, csk->tid, skb, skb->len, pdu_len_ddp); spin_lock_bh(&csk->lock); if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, bad state.\n", csk, csk->state, csk->flags, csk->tid); if (csk->state != CTP_ABORTING) goto abort_conn; else goto discard; } cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); cxgbi_skcb_flags(skb) = 0; skb_reset_transport_header(skb); __skb_pull(skb, sizeof(*cpl)); __pskb_trim(skb, ntohs(cpl->len)); if (!csk->skb_ulp_lhdr) { unsigned char *bhs; unsigned int hlen, dlen, plen; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", csk, csk->state, csk->flags, csk->tid, skb); csk->skb_ulp_lhdr = skb; cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); if ((CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) && (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) { pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", csk->tid, cxgbi_skcb_tcp_seq(skb), csk->rcv_nxt); goto abort_conn; } bhs = skb->data; hlen = ntohs(cpl->len); dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; plen = ISCSI_PDU_LEN_G(pdu_len_ddp); if (is_t4(lldi->adapter_type)) plen -= 40; if ((hlen + dlen) != plen) { pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " "mismatch %u != %u + %u, seq 0x%x.\n", csk->tid, plen, hlen, dlen, cxgbi_skcb_tcp_seq(skb)); goto abort_conn; } cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3); if (dlen) cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n", csk, skb, *bhs, hlen, dlen, ntohl(*((unsigned int *)(bhs + 16))), ntohl(*((unsigned int *)(bhs + 24)))); } else { struct sk_buff *lskb = csk->skb_ulp_lhdr; cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", csk, csk->state, csk->flags, skb, lskb); } __skb_queue_tail(&csk->receive_queue, skb); spin_unlock_bh(&csk->lock); return; abort_conn: send_abort_req(csk); discard: spin_unlock_bh(&csk->lock); rel_skb: __kfree_skb(skb); } static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; struct sk_buff *lskb; u32 tid = GET_TID(cpl); u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); csk = lookup_tid(t, tid); if (unlikely(!csk)) { pr_err("can't find conn. for tid %u.\n", tid); goto rel_skb; } log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", csk, csk->state, csk->flags, csk->tid, skb, skb->len, pdu_len_ddp); spin_lock_bh(&csk->lock); if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, bad state.\n", csk, csk->state, csk->flags, csk->tid); if (csk->state != CTP_ABORTING) goto abort_conn; else goto discard; } cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq); cxgbi_skcb_flags(skb) = 0; skb_reset_transport_header(skb); __skb_pull(skb, sizeof(*cpl)); __pskb_trim(skb, ntohs(cpl->len)); if (!csk->skb_ulp_lhdr) csk->skb_ulp_lhdr = skb; lskb = csk->skb_ulp_lhdr; cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", csk, csk->state, csk->flags, skb, lskb); __skb_queue_tail(&csk->receive_queue, skb); spin_unlock_bh(&csk->lock); return; abort_conn: send_abort_req(csk); discard: spin_unlock_bh(&csk->lock); rel_skb: __kfree_skb(skb); } static void cxgb4i_process_ddpvld(struct cxgbi_sock *csk, struct sk_buff *skb, u32 ddpvld) { if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", csk, skb, ddpvld, cxgbi_skcb_flags(skb)); cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); } if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", csk, skb, ddpvld, cxgbi_skcb_flags(skb)); cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); } if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", csk, skb, ddpvld); cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); } if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", csk, skb, ddpvld); cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); } } static void do_rx_data_ddp(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; struct sk_buff *lskb; struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data; unsigned int tid = GET_TID(rpl); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; u32 ddpvld = be32_to_cpu(rpl->ddpvld); csk = lookup_tid(t, tid); if (unlikely(!csk)) { pr_err("can't find connection for tid %u.\n", tid); goto rel_skb; } log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr); spin_lock_bh(&csk->lock); if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, bad state.\n", csk, csk->state, csk->flags, csk->tid); if (csk->state != CTP_ABORTING) goto abort_conn; else goto discard; } if (!csk->skb_ulp_lhdr) { pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); goto abort_conn; } lskb = csk->skb_ulp_lhdr; csk->skb_ulp_lhdr = NULL; cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); cxgb4i_process_ddpvld(csk, lskb, ddpvld); log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, lskb 0x%p, f 0x%lx.\n", csk, lskb, cxgbi_skcb_flags(lskb)); cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); cxgbi_conn_pdu_ready(csk); spin_unlock_bh(&csk->lock); goto rel_skb; abort_conn: send_abort_req(csk); discard: spin_unlock_bh(&csk->lock); rel_skb: __kfree_skb(skb); } static void do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data; struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; struct sk_buff *data_skb = NULL; u32 tid = GET_TID(rpl); u32 ddpvld = be32_to_cpu(rpl->ddpvld); u32 seq = be32_to_cpu(rpl->seq); u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp); csk = lookup_tid(t, tid); if (unlikely(!csk)) { pr_err("can't find connection for tid %u.\n", tid); goto rel_skb; } log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, " "pdu_len_ddp %u, status %u.\n", csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr, ntohs(rpl->len), pdu_len_ddp, rpl->status); spin_lock_bh(&csk->lock); if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, bad state.\n", csk, csk->state, csk->flags, csk->tid); if (csk->state != CTP_ABORTING) goto abort_conn; else goto discard; } cxgbi_skcb_tcp_seq(skb) = seq; cxgbi_skcb_flags(skb) = 0; cxgbi_skcb_rx_pdulen(skb) = 0; skb_reset_transport_header(skb); __skb_pull(skb, sizeof(*rpl)); __pskb_trim(skb, be16_to_cpu(rpl->len)); csk->rcv_nxt = seq + pdu_len_ddp; if (csk->skb_ulp_lhdr) { data_skb = skb_peek(&csk->receive_queue); if (!data_skb || !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) { pr_err("Error! freelist data not found 0x%p, tid %u\n", data_skb, tid); goto abort_conn; } __skb_unlink(data_skb, &csk->receive_queue); cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA); __skb_queue_tail(&csk->receive_queue, skb); __skb_queue_tail(&csk->receive_queue, data_skb); } else { __skb_queue_tail(&csk->receive_queue, skb); } csk->skb_ulp_lhdr = NULL; cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL); cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc); cxgb4i_process_ddpvld(csk, skb, ddpvld); log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n", csk, skb, cxgbi_skcb_flags(skb)); cxgbi_conn_pdu_ready(csk); spin_unlock_bh(&csk->lock); return; abort_conn: send_abort_req(csk); discard: spin_unlock_bh(&csk->lock); rel_skb: __kfree_skb(skb); } static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cxgbi_sock *csk; struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data; unsigned int tid = GET_TID(rpl); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; csk = lookup_tid(t, tid); if (unlikely(!csk)) pr_err("can't find connection for tid %u.\n", tid); else { log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, csk->state, csk->flags, csk->tid); cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), rpl->seq_vld); } __kfree_skb(skb); } static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) { struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; unsigned int tid = GET_TID(rpl); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; struct cxgbi_sock *csk; csk = lookup_tid(t, tid); if (!csk) { pr_err("can't find conn. for tid %u.\n", tid); return; } log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,%lx,%u, status 0x%x.\n", csk, csk->state, csk->flags, csk->tid, rpl->status); if (rpl->status != CPL_ERR_NONE) { pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", csk, tid, rpl->status); csk->err = -EINVAL; } complete(&csk->cmpl); __kfree_skb(skb); } static int alloc_cpls(struct cxgbi_sock *csk) { csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0, GFP_KERNEL); if (!csk->cpl_close) return -ENOMEM; csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0, GFP_KERNEL); if (!csk->cpl_abort_req) goto free_cpls; csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0, GFP_KERNEL); if (!csk->cpl_abort_rpl) goto free_cpls; return 0; free_cpls: cxgbi_sock_free_cpl_skbs(csk); return -ENOMEM; } static inline void l2t_put(struct cxgbi_sock *csk) { if (csk->l2t) { cxgb4_l2t_release(csk->l2t); csk->l2t = NULL; cxgbi_sock_put(csk); } } static void release_offload_resources(struct cxgbi_sock *csk) { struct cxgb4_lld_info *lldi; #if IS_ENABLED(CONFIG_IPV6) struct net_device *ndev = csk->cdev->ports[csk->port_id]; #endif log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, csk->state, csk->flags, csk->tid); cxgbi_sock_free_cpl_skbs(csk); cxgbi_sock_purge_write_queue(csk); if (csk->wr_cred != csk->wr_max_cred) { cxgbi_sock_purge_wr_queue(csk); cxgbi_sock_reset_wr_list(csk); } l2t_put(csk); #if IS_ENABLED(CONFIG_IPV6) if (csk->csk_family == AF_INET6) cxgb4_clip_release(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); #endif if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) free_atid(csk); else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { lldi = cxgbi_cdev_priv(csk->cdev); cxgb4_remove_tid(lldi->tids, 0, csk->tid, csk->csk_family); cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); cxgbi_sock_put(csk); } csk->dst = NULL; } #ifdef CONFIG_CHELSIO_T4_DCB static inline u8 get_iscsi_dcb_state(struct net_device *ndev) { return ndev->dcbnl_ops->getstate(ndev); } static int select_priority(int pri_mask) { if (!pri_mask) return 0; return (ffs(pri_mask) - 1); } static u8 get_iscsi_dcb_priority(struct net_device *ndev) { int rv; u8 caps; struct dcb_app iscsi_dcb_app = { .protocol = 3260 }; rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps); if (rv) return 0; if (caps & DCB_CAP_DCBX_VER_IEEE) { iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM; rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); if (!rv) { iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); } } else if (caps & DCB_CAP_DCBX_VER_CEE) { iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; rv = dcb_getapp(ndev, &iscsi_dcb_app); } log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority is set to %u\n", select_priority(rv)); return select_priority(rv); } #endif static int init_act_open(struct cxgbi_sock *csk) { struct cxgbi_device *cdev = csk->cdev; struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct net_device *ndev = cdev->ports[csk->port_id]; struct sk_buff *skb = NULL; struct neighbour *n = NULL; void *daddr; unsigned int step; unsigned int rxq_idx; unsigned int size, size6; unsigned int linkspeed; unsigned int rcv_winf, snd_winf; #ifdef CONFIG_CHELSIO_T4_DCB u8 priority = 0; #endif log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, csk->state, csk->flags, csk->tid); if (csk->csk_family == AF_INET) daddr = &csk->daddr.sin_addr.s_addr; #if IS_ENABLED(CONFIG_IPV6) else if (csk->csk_family == AF_INET6) daddr = &csk->daddr6.sin6_addr; #endif else { pr_err("address family 0x%x not supported\n", csk->csk_family); goto rel_resource; } n = dst_neigh_lookup(csk->dst, daddr); if (!n) { pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); goto rel_resource; } if (!(n->nud_state & NUD_VALID)) neigh_event_send(n, NULL); csk->atid = cxgb4_alloc_atid(lldi->tids, csk); if (csk->atid < 0) { pr_err("%s, NO atid available.\n", ndev->name); goto rel_resource_without_clip; } cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); #ifdef CONFIG_CHELSIO_T4_DCB if (get_iscsi_dcb_state(ndev)) priority = get_iscsi_dcb_priority(ndev); csk->dcb_priority = priority; csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority); #else csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); #endif if (!csk->l2t) { pr_err("%s, cannot alloc l2t.\n", ndev->name); goto rel_resource_without_clip; } cxgbi_sock_get(csk); #if IS_ENABLED(CONFIG_IPV6) if (csk->csk_family == AF_INET6) cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); #endif if (is_t4(lldi->adapter_type)) { size = sizeof(struct cpl_act_open_req); size6 = sizeof(struct cpl_act_open_req6); } else if (is_t5(lldi->adapter_type)) { size = sizeof(struct cpl_t5_act_open_req); size6 = sizeof(struct cpl_t5_act_open_req6); } else { size = sizeof(struct cpl_t6_act_open_req); size6 = sizeof(struct cpl_t6_act_open_req6); } if (csk->csk_family == AF_INET) skb = alloc_wr(size, 0, GFP_NOIO); #if IS_ENABLED(CONFIG_IPV6) else skb = alloc_wr(size6, 0, GFP_NOIO); #endif if (!skb) goto rel_resource; skb->sk = (struct sock *)csk; t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); if (!csk->mtu) csk->mtu = dst_mtu(csk->dst); cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); csk->tx_chan = cxgb4_port_chan(ndev); csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; step = lldi->ntxq / lldi->nchan; csk->txq_idx = cxgb4_port_idx(ndev) * step; step = lldi->nrxq / lldi->nchan; rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step); cdev->rxq_idx_cntr++; csk->rss_qid = lldi->rxq_ids[rxq_idx]; linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed; csk->snd_win = cxgb4i_snd_win; csk->rcv_win = cxgb4i_rcv_win; if (cxgb4i_rcv_win <= 0) { csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN; rcv_winf = linkspeed / SPEED_10000; if (rcv_winf) csk->rcv_win *= rcv_winf; } if (cxgb4i_snd_win <= 0) { csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN; snd_winf = linkspeed / SPEED_10000; if (snd_winf) csk->snd_win *= snd_winf; } csk->wr_cred = lldi->wr_cred - DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); csk->wr_max_cred = csk->wr_cred; csk->wr_una_cred = 0; cxgbi_sock_reset_wr_list(csk); csk->err = 0; pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n", (&csk->saddr), (&csk->daddr), csk, csk->state, csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx, csk->smac_idx); /* must wait for either a act_open_rpl or act_open_establish */ if (!try_module_get(cdev->owner)) { pr_err("%s, try_module_get failed.\n", ndev->name); goto rel_resource; } cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); if (csk->csk_family == AF_INET) send_act_open_req(csk, skb, csk->l2t); #if IS_ENABLED(CONFIG_IPV6) else send_act_open_req6(csk, skb, csk->l2t); #endif neigh_release(n); return 0; rel_resource: #if IS_ENABLED(CONFIG_IPV6) if (csk->csk_family == AF_INET6) cxgb4_clip_release(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); #endif rel_resource_without_clip: if (n) neigh_release(n); if (skb) __kfree_skb(skb); return -EINVAL; } static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { [CPL_ACT_ESTABLISH] = do_act_establish, [CPL_ACT_OPEN_RPL] = do_act_open_rpl, [CPL_PEER_CLOSE] = do_peer_close, [CPL_ABORT_REQ_RSS] = do_abort_req_rss, [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss, [CPL_CLOSE_CON_RPL] = do_close_con_rpl, [CPL_FW4_ACK] = do_fw4_ack, [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, [CPL_ISCSI_DATA] = do_rx_iscsi_data, [CPL_SET_TCB_RPL] = do_set_tcb_rpl, [CPL_RX_DATA_DDP] = do_rx_data_ddp, [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp, [CPL_RX_DATA] = do_rx_data, }; static int cxgb4i_ofld_init(struct cxgbi_device *cdev) { int rc; if (cxgb4i_max_connect > CXGB4I_MAX_CONN) cxgb4i_max_connect = CXGB4I_MAX_CONN; rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base, cxgb4i_max_connect); if (rc < 0) return rc; cdev->csk_release_offload_resources = release_offload_resources; cdev->csk_push_tx_frames = push_tx_frames; cdev->csk_send_abort_req = send_abort_req; cdev->csk_send_close_req = send_close_req; cdev->csk_send_rx_credits = send_rx_credits; cdev->csk_alloc_cpls = alloc_cpls; cdev->csk_init_act_open = init_act_open; pr_info("cdev 0x%p, offload up, added.\n", cdev); return 0; } static inline void ulp_mem_io_set_hdr(struct cxgbi_device *cdev, struct ulp_mem_io *req, unsigned int wr_len, unsigned int dlen, unsigned int pm_addr, int tid) { struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); INIT_ULPTX_WR(req, wr_len, 0, tid); req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | FW_WR_ATOMIC_V(0)); req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) | T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type))); req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); idata->len = htonl(dlen); } static struct sk_buff * ddp_ppod_init_idata(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm, unsigned int idx, unsigned int npods, unsigned int tid) { unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; unsigned int dlen = npods << PPOD_SIZE_SHIFT; unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + sizeof(struct ulptx_idata) + dlen, 16); struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC); if (!skb) { pr_err("%s: %s idx %u, npods %u, OOM.\n", __func__, ppm->ndev->name, idx, npods); return NULL; } ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen, pm_addr, tid); return skb; } static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, struct cxgbi_task_tag_info *ttinfo, unsigned int idx, unsigned int npods, struct scatterlist **sg_pp, unsigned int *sg_off) { struct cxgbi_device *cdev = csk->cdev; struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods, csk->tid); struct ulp_mem_io *req; struct ulptx_idata *idata; struct cxgbi_pagepod *ppod; int i; if (!skb) return -ENOMEM; req = (struct ulp_mem_io *)skb->head; idata = (struct ulptx_idata *)(req + 1); ppod = (struct cxgbi_pagepod *)(idata + 1); for (i = 0; i < npods; i++, ppod++) cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE); cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL); set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); spin_lock_bh(&csk->lock); cxgbi_sock_skb_entail(csk, skb); spin_unlock_bh(&csk->lock); return 0; } static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, struct cxgbi_task_tag_info *ttinfo) { unsigned int pidx = ttinfo->idx; unsigned int npods = ttinfo->npods; unsigned int i, cnt; int err = 0; struct scatterlist *sg = ttinfo->sgl; unsigned int offset = 0; ttinfo->cid = csk->port_id; for (i = 0; i < npods; i += cnt, pidx += cnt) { cnt = npods - i; if (cnt > ULPMEM_IDATA_MAX_NPPODS) cnt = ULPMEM_IDATA_MAX_NPPODS; err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, &sg, &offset); if (err < 0) break; } return err; } static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, int pg_idx) { struct sk_buff *skb; struct cpl_set_tcb_field *req; if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) return 0; skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); if (!skb) return -ENOMEM; /* set up ulp page size */ req = (struct cpl_set_tcb_field *)skb->head; INIT_TP_WR(req, csk->tid); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); req->word_cookie = htons(0); req->mask = cpu_to_be64(0x3 << 8); req->val = cpu_to_be64(pg_idx << 8); set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); reinit_completion(&csk->cmpl); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); wait_for_completion(&csk->cmpl); return csk->err; } static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, int hcrc, int dcrc) { struct sk_buff *skb; struct cpl_set_tcb_field *req; if (!hcrc && !dcrc) return 0; skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); if (!skb) return -ENOMEM; csk->hcrc_len = (hcrc ? 4 : 0); csk->dcrc_len = (dcrc ? 4 : 0); /* set up ulp submode */ req = (struct cpl_set_tcb_field *)skb->head; INIT_TP_WR(req, tid); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); req->word_cookie = htons(0); req->mask = cpu_to_be64(0x3 << 4); req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | (dcrc ? ULP_CRC_DATA : 0)) << 4); set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); reinit_completion(&csk->cmpl); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); wait_for_completion(&csk->cmpl); return csk->err; } static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) { return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *) (cxgbi_cdev_priv(cdev)))->iscsi_ppm); } static int cxgb4i_ddp_init(struct cxgbi_device *cdev) { struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct net_device *ndev = cdev->ports[0]; struct cxgbi_tag_format tformat; int i, err; if (!lldi->vr->iscsi.size) { pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); return -EACCES; } cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ; memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); for (i = 0; i < 4; i++) tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) & 0xF; cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); pr_info("iscsi_edram.start 0x%x iscsi_edram.size 0x%x", lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size); err = cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, lldi->vr->iscsi.size, lldi->iscsi_llimit, lldi->vr->iscsi.start, 2, lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size); if (err < 0) return err; cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; cdev->csk_ddp_set_map = ddp_set_map; cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); cdev->cdev2ppm = cdev2ppm; return 0; } static bool is_memfree(struct adapter *adap) { u32 io; io = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); if (is_t5(adap->params.chip)) { if ((io & EXT_MEM0_ENABLE_F) || (io & EXT_MEM1_ENABLE_F)) return false; } else if (io & EXT_MEM_ENABLE_F) { return false; } return true; } static void *t4_uld_add(const struct cxgb4_lld_info *lldi) { struct cxgbi_device *cdev; struct port_info *pi; struct net_device *ndev; struct adapter *adap; struct tid_info *t; u32 max_cmds = CXGB4I_SCSI_HOST_QDEPTH; u32 max_conn = CXGBI_MAX_CONN; int i, rc; cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); if (!cdev) { pr_info("t4 device 0x%p, register failed.\n", lldi); return NULL; } pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n", cdev, lldi->adapter_type, lldi->nports, lldi->ports[0]->name, lldi->nchan, lldi->ntxq, lldi->nrxq, lldi->wr_cred); for (i = 0; i < lldi->nrxq; i++) log_debug(1 << CXGBI_DBG_DEV, "t4 0x%p, rxq id #%d: %u.\n", cdev, i, lldi->rxq_ids[i]); memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); cdev->flags = CXGBI_FLAG_DEV_T4; cdev->pdev = lldi->pdev; cdev->ports = lldi->ports; cdev->nports = lldi->nports; cdev->mtus = lldi->mtus; cdev->nmtus = NMTUS; cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0; cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); cdev->itp = &cxgb4i_iscsi_transport; cdev->owner = THIS_MODULE; cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf); pr_info("cdev 0x%p,%s, pfvf %u.\n", cdev, lldi->ports[0]->name, cdev->pfvf); rc = cxgb4i_ddp_init(cdev); if (rc) { pr_info("t4 0x%p ddp init failed %d.\n", cdev, rc); goto err_out; } ndev = cdev->ports[0]; adap = netdev2adap(ndev); if (adap) { t = &adap->tids; if (t->ntids <= CXGBI_MAX_CONN) max_conn = t->ntids; if (is_memfree(adap)) { cdev->flags |= CXGBI_FLAG_DEV_ISO_OFF; max_cmds = CXGB4I_SCSI_HOST_QDEPTH >> 2; pr_info("%s: 0x%p, tid %u, SO adapter.\n", ndev->name, cdev, t->ntids); } } else { pr_info("%s, 0x%p, NO adapter struct.\n", ndev->name, cdev); } /* ISO is enabled in T5/T6 firmware version >= 1.13.43.0 */ if (!is_t4(lldi->adapter_type) && (lldi->fw_vers >= 0x10d2b00) && !(cdev->flags & CXGBI_FLAG_DEV_ISO_OFF)) cdev->skb_iso_txhdr = sizeof(struct cpl_tx_data_iso); rc = cxgb4i_ofld_init(cdev); if (rc) { pr_info("t4 0x%p ofld init failed.\n", cdev); goto err_out; } cxgb4i_host_template.can_queue = max_cmds; rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, max_conn, &cxgb4i_host_template, cxgb4i_stt); if (rc) goto err_out; for (i = 0; i < cdev->nports; i++) { pi = netdev_priv(lldi->ports[i]); cdev->hbas[i]->port_id = pi->port_id; } return cdev; err_out: cxgbi_device_unregister(cdev); return ERR_PTR(-ENOMEM); } #define RX_PULL_LEN 128 static int t4_uld_rx_handler(void *handle, const __be64 *rsp, const struct pkt_gl *pgl) { const struct cpl_act_establish *rpl; struct sk_buff *skb; unsigned int opc; struct cxgbi_device *cdev = handle; if (pgl == NULL) { unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; skb = alloc_wr(len, 0, GFP_ATOMIC); if (!skb) goto nomem; skb_copy_to_linear_data(skb, &rsp[1], len); } else { if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) { pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", pgl->va, be64_to_cpu(*rsp), be64_to_cpu(*(u64 *)pgl->va), pgl->tot_len); return 0; } skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN); if (unlikely(!skb)) goto nomem; } rpl = (struct cpl_act_establish *)skb->data; opc = rpl->ot.opcode; log_debug(1 << CXGBI_DBG_TOE, "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb); if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) { pr_err("No handler for opcode 0x%x.\n", opc); __kfree_skb(skb); } else cxgb4i_cplhandlers[opc](cdev, skb); return 0; nomem: log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n"); return 1; } static int t4_uld_state_change(void *handle, enum cxgb4_state state) { struct cxgbi_device *cdev = handle; switch (state) { case CXGB4_STATE_UP: pr_info("cdev 0x%p, UP.\n", cdev); break; case CXGB4_STATE_START_RECOVERY: pr_info("cdev 0x%p, RECOVERY.\n", cdev); /* close all connections */ break; case CXGB4_STATE_DOWN: pr_info("cdev 0x%p, DOWN.\n", cdev); break; case CXGB4_STATE_DETACH: pr_info("cdev 0x%p, DETACH.\n", cdev); cxgbi_device_unregister(cdev); break; default: pr_info("cdev 0x%p, unknown state %d.\n", cdev, state); break; } return 0; } #ifdef CONFIG_CHELSIO_T4_DCB static int cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val, void *data) { int i, port = 0xFF; struct net_device *ndev; struct cxgbi_device *cdev = NULL; struct dcb_app_type *iscsi_app = data; struct cxgbi_ports_map *pmap; u8 priority; if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) && (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)) return NOTIFY_DONE; priority = iscsi_app->app.priority; } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) { if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM) return NOTIFY_DONE; if (!iscsi_app->app.priority) return NOTIFY_DONE; priority = ffs(iscsi_app->app.priority) - 1; } else { return NOTIFY_DONE; } if (iscsi_app->app.protocol != 3260) return NOTIFY_DONE; log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n", iscsi_app->ifindex, priority); ndev = dev_get_by_index(&init_net, iscsi_app->ifindex); if (!ndev) return NOTIFY_DONE; cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port); dev_put(ndev); if (!cdev) return NOTIFY_DONE; pmap = &cdev->pmap; for (i = 0; i < pmap->used; i++) { if (pmap->port_csk[i]) { struct cxgbi_sock *csk = pmap->port_csk[i]; if (csk->dcb_priority != priority) { iscsi_conn_failure(csk->user_data, ISCSI_ERR_CONN_FAILED); pr_info("Restarting iSCSI connection %p with " "priority %u->%u.\n", csk, csk->dcb_priority, priority); } } } return NOTIFY_OK; } #endif static int __init cxgb4i_init_module(void) { int rc; printk(KERN_INFO "%s", version); rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt); if (rc < 0) return rc; cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); #ifdef CONFIG_CHELSIO_T4_DCB pr_info("%s dcb enabled.\n", DRV_MODULE_NAME); register_dcbevent_notifier(&cxgb4_dcb_change); #endif return 0; } static void __exit cxgb4i_exit_module(void) { #ifdef CONFIG_CHELSIO_T4_DCB unregister_dcbevent_notifier(&cxgb4_dcb_change); #endif cxgb4_unregister_uld(CXGB4_ULD_ISCSI); cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); } module_init(cxgb4i_init_module); module_exit(cxgb4i_exit_module);
linux-master
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c